commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39aa4000a0625e9589803c9a823733a63d68d8ef
|
corehq/apps/reports/management/commands/update_custom_recipient_type.py
|
corehq/apps/reports/management/commands/update_custom_recipient_type.py
|
from django.core.management import BaseCommand
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseTimedScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
AlertScheduleInstance,
)
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.apps.data_interfaces.models import CreateScheduleInstanceActionDefinition
from django.db.models import Q
class Command(BaseCommand):
help = 'Updates a custom recipient value'
def add_arguments(self, parser):
parser.add_argument(
'--current-type',
help='The current custom type which is to be updated.',
)
parser.add_argument(
'--new-type',
help='The new custom type which the current type is to be updated to.',
)
def handle(self, **options):
current_type = options.get('current_type')
new_type = options.get('new_type')
if not current_type or not new_type:
raise Exception('current-type or new-type value missing')
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
TimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
CaseAlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
AlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
# Filter those not equal to [] just to be safe
definitions = CreateScheduleInstanceActionDefinition.objects.filter(~Q(recipients=[]))
for definition in definitions:
recipients = definition.recipients
has_changed = False
for recipient in recipients:
if recipient[0] == "CustomRecipient" and recipient[1] == current_type:
recipient[1] = new_type
has_changed = True
if has_changed:
definition.save()
|
Add management command to update custom recipient type
|
Add management command to update custom recipient type
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add management command to update custom recipient type
|
from django.core.management import BaseCommand
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseTimedScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
AlertScheduleInstance,
)
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.apps.data_interfaces.models import CreateScheduleInstanceActionDefinition
from django.db.models import Q
class Command(BaseCommand):
help = 'Updates a custom recipient value'
def add_arguments(self, parser):
parser.add_argument(
'--current-type',
help='The current custom type which is to be updated.',
)
parser.add_argument(
'--new-type',
help='The new custom type which the current type is to be updated to.',
)
def handle(self, **options):
current_type = options.get('current_type')
new_type = options.get('new_type')
if not current_type or not new_type:
raise Exception('current-type or new-type value missing')
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
TimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
CaseAlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
AlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
# Filter those not equal to [] just to be safe
definitions = CreateScheduleInstanceActionDefinition.objects.filter(~Q(recipients=[]))
for definition in definitions:
recipients = definition.recipients
has_changed = False
for recipient in recipients:
if recipient[0] == "CustomRecipient" and recipient[1] == current_type:
recipient[1] = new_type
has_changed = True
if has_changed:
definition.save()
|
<commit_before><commit_msg>Add management command to update custom recipient type<commit_after>
|
from django.core.management import BaseCommand
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseTimedScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
AlertScheduleInstance,
)
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.apps.data_interfaces.models import CreateScheduleInstanceActionDefinition
from django.db.models import Q
class Command(BaseCommand):
help = 'Updates a custom recipient value'
def add_arguments(self, parser):
parser.add_argument(
'--current-type',
help='The current custom type which is to be updated.',
)
parser.add_argument(
'--new-type',
help='The new custom type which the current type is to be updated to.',
)
def handle(self, **options):
current_type = options.get('current_type')
new_type = options.get('new_type')
if not current_type or not new_type:
raise Exception('current-type or new-type value missing')
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
TimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
CaseAlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
AlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
# Filter those not equal to [] just to be safe
definitions = CreateScheduleInstanceActionDefinition.objects.filter(~Q(recipients=[]))
for definition in definitions:
recipients = definition.recipients
has_changed = False
for recipient in recipients:
if recipient[0] == "CustomRecipient" and recipient[1] == current_type:
recipient[1] = new_type
has_changed = True
if has_changed:
definition.save()
|
Add management command to update custom recipient typefrom django.core.management import BaseCommand
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseTimedScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
AlertScheduleInstance,
)
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.apps.data_interfaces.models import CreateScheduleInstanceActionDefinition
from django.db.models import Q
class Command(BaseCommand):
help = 'Updates a custom recipient value'
def add_arguments(self, parser):
parser.add_argument(
'--current-type',
help='The current custom type which is to be updated.',
)
parser.add_argument(
'--new-type',
help='The new custom type which the current type is to be updated to.',
)
def handle(self, **options):
current_type = options.get('current_type')
new_type = options.get('new_type')
if not current_type or not new_type:
raise Exception('current-type or new-type value missing')
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
TimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
CaseAlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
AlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
# Filter those not equal to [] just to be safe
definitions = CreateScheduleInstanceActionDefinition.objects.filter(~Q(recipients=[]))
for definition in definitions:
recipients = definition.recipients
has_changed = False
for recipient in recipients:
if recipient[0] == "CustomRecipient" and recipient[1] == current_type:
recipient[1] = new_type
has_changed = True
if has_changed:
definition.save()
|
<commit_before><commit_msg>Add management command to update custom recipient type<commit_after>from django.core.management import BaseCommand
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseTimedScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
AlertScheduleInstance,
)
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.apps.data_interfaces.models import CreateScheduleInstanceActionDefinition
from django.db.models import Q
class Command(BaseCommand):
help = 'Updates a custom recipient value'
def add_arguments(self, parser):
parser.add_argument(
'--current-type',
help='The current custom type which is to be updated.',
)
parser.add_argument(
'--new-type',
help='The new custom type which the current type is to be updated to.',
)
def handle(self, **options):
current_type = options.get('current_type')
new_type = options.get('new_type')
if not current_type or not new_type:
raise Exception('current-type or new-type value missing')
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
TimedScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
CaseAlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
AlertScheduleInstance.objects.using(db).filter(recipient_id=current_type).update(
recipient_id=new_type
)
# Filter those not equal to [] just to be safe
definitions = CreateScheduleInstanceActionDefinition.objects.filter(~Q(recipients=[]))
for definition in definitions:
recipients = definition.recipients
has_changed = False
for recipient in recipients:
if recipient[0] == "CustomRecipient" and recipient[1] == current_type:
recipient[1] = new_type
has_changed = True
if has_changed:
definition.save()
|
|
dfd7ee9d60c1ea0b2ecd5b09eb9a5468f6045852
|
gen_training_data_1.py
|
gen_training_data_1.py
|
import sys
import os
import json
import numpy as np
MAX_ACTIONS = 20
PLAYER_RANGE = (4, 7)
def init_vec(num_players):
input_vec = [0]*8
input_vec[num_players - PLAYER_RANGE[0]] = 1
return input_vec
def gen_training_data(hand):
num_players = hand['num_players']
if PLAYER_RANGE[0] <= num_players <= PLAYER_RANGE[1]:
players = []
for action in hand['actions'][:num_players]:
players.append(action[0])
i = 0
j = 0
inputs = [init_vec(num_players)]
outputs = []
while i < len(hand['actions']):
action = hand['actions'][i]
if action == 'NEXT':
break
input_vec = init_vec(num_players)
output_vec = [0] * 4
if action[0] == players[j]:
if action[1] == 0:
input_vec[4] = 1
output_vec[0] = 1
elif action[1] in [1, 2]:
input_vec[5] = 1
output_vec[1] = 1
else:
input_vec[6] = 1
output_vec[2] = 1
i += 1
else:
input_vec[7] = 1
output_vec[3] = 1
inputs.append(input_vec)
outputs.append(output_vec)
j = (j + 1) % num_players
inputs = inputs[:-1]
if len(inputs) < MAX_ACTIONS:
for _ in range(MAX_ACTIONS - len(inputs)):
inputs.append([0]*8)
outputs.append([0, 0, 0, 1])
elif len(inputs) > MAX_ACTIONS:
inputs = inputs[:MAX_ACTIONS]
outputs = outputs[:MAX_ACTIONS]
return inputs, outputs
return False
if __name__ == '__main__':
input_dir = sys.argv[1]
output_file = sys.argv[2]
inputs = []
outputs = []
for filename in os.listdir(input_dir)[:20]:
filename = os.path.join(input_dir, filename)
print "Filename:", filename
f = open(filename, 'r')
counter = 0
for hand in json.loads(f.read()):
res = gen_training_data(hand)
if res:
inp, out = res
inputs.append(inp)
outputs.append(out)
counter += 1
print "Num Hands: ", counter
f.close()
input_arr = np.asarray(inputs)
output_arr = np.asarray(outputs)
np.savez_compressed(output_file, input=input_arr, output=output_arr)
|
Add first training data generator.
|
Add first training data generator.
|
Python
|
apache-2.0
|
session-id/poker-predictor
|
Add first training data generator.
|
import sys
import os
import json
import numpy as np
MAX_ACTIONS = 20
PLAYER_RANGE = (4, 7)
def init_vec(num_players):
input_vec = [0]*8
input_vec[num_players - PLAYER_RANGE[0]] = 1
return input_vec
def gen_training_data(hand):
num_players = hand['num_players']
if PLAYER_RANGE[0] <= num_players <= PLAYER_RANGE[1]:
players = []
for action in hand['actions'][:num_players]:
players.append(action[0])
i = 0
j = 0
inputs = [init_vec(num_players)]
outputs = []
while i < len(hand['actions']):
action = hand['actions'][i]
if action == 'NEXT':
break
input_vec = init_vec(num_players)
output_vec = [0] * 4
if action[0] == players[j]:
if action[1] == 0:
input_vec[4] = 1
output_vec[0] = 1
elif action[1] in [1, 2]:
input_vec[5] = 1
output_vec[1] = 1
else:
input_vec[6] = 1
output_vec[2] = 1
i += 1
else:
input_vec[7] = 1
output_vec[3] = 1
inputs.append(input_vec)
outputs.append(output_vec)
j = (j + 1) % num_players
inputs = inputs[:-1]
if len(inputs) < MAX_ACTIONS:
for _ in range(MAX_ACTIONS - len(inputs)):
inputs.append([0]*8)
outputs.append([0, 0, 0, 1])
elif len(inputs) > MAX_ACTIONS:
inputs = inputs[:MAX_ACTIONS]
outputs = outputs[:MAX_ACTIONS]
return inputs, outputs
return False
if __name__ == '__main__':
input_dir = sys.argv[1]
output_file = sys.argv[2]
inputs = []
outputs = []
for filename in os.listdir(input_dir)[:20]:
filename = os.path.join(input_dir, filename)
print "Filename:", filename
f = open(filename, 'r')
counter = 0
for hand in json.loads(f.read()):
res = gen_training_data(hand)
if res:
inp, out = res
inputs.append(inp)
outputs.append(out)
counter += 1
print "Num Hands: ", counter
f.close()
input_arr = np.asarray(inputs)
output_arr = np.asarray(outputs)
np.savez_compressed(output_file, input=input_arr, output=output_arr)
|
<commit_before><commit_msg>Add first training data generator.<commit_after>
|
import sys
import os
import json
import numpy as np
MAX_ACTIONS = 20
PLAYER_RANGE = (4, 7)
def init_vec(num_players):
input_vec = [0]*8
input_vec[num_players - PLAYER_RANGE[0]] = 1
return input_vec
def gen_training_data(hand):
num_players = hand['num_players']
if PLAYER_RANGE[0] <= num_players <= PLAYER_RANGE[1]:
players = []
for action in hand['actions'][:num_players]:
players.append(action[0])
i = 0
j = 0
inputs = [init_vec(num_players)]
outputs = []
while i < len(hand['actions']):
action = hand['actions'][i]
if action == 'NEXT':
break
input_vec = init_vec(num_players)
output_vec = [0] * 4
if action[0] == players[j]:
if action[1] == 0:
input_vec[4] = 1
output_vec[0] = 1
elif action[1] in [1, 2]:
input_vec[5] = 1
output_vec[1] = 1
else:
input_vec[6] = 1
output_vec[2] = 1
i += 1
else:
input_vec[7] = 1
output_vec[3] = 1
inputs.append(input_vec)
outputs.append(output_vec)
j = (j + 1) % num_players
inputs = inputs[:-1]
if len(inputs) < MAX_ACTIONS:
for _ in range(MAX_ACTIONS - len(inputs)):
inputs.append([0]*8)
outputs.append([0, 0, 0, 1])
elif len(inputs) > MAX_ACTIONS:
inputs = inputs[:MAX_ACTIONS]
outputs = outputs[:MAX_ACTIONS]
return inputs, outputs
return False
if __name__ == '__main__':
input_dir = sys.argv[1]
output_file = sys.argv[2]
inputs = []
outputs = []
for filename in os.listdir(input_dir)[:20]:
filename = os.path.join(input_dir, filename)
print "Filename:", filename
f = open(filename, 'r')
counter = 0
for hand in json.loads(f.read()):
res = gen_training_data(hand)
if res:
inp, out = res
inputs.append(inp)
outputs.append(out)
counter += 1
print "Num Hands: ", counter
f.close()
input_arr = np.asarray(inputs)
output_arr = np.asarray(outputs)
np.savez_compressed(output_file, input=input_arr, output=output_arr)
|
Add first training data generator.import sys
import os
import json
import numpy as np
MAX_ACTIONS = 20
PLAYER_RANGE = (4, 7)
def init_vec(num_players):
input_vec = [0]*8
input_vec[num_players - PLAYER_RANGE[0]] = 1
return input_vec
def gen_training_data(hand):
num_players = hand['num_players']
if PLAYER_RANGE[0] <= num_players <= PLAYER_RANGE[1]:
players = []
for action in hand['actions'][:num_players]:
players.append(action[0])
i = 0
j = 0
inputs = [init_vec(num_players)]
outputs = []
while i < len(hand['actions']):
action = hand['actions'][i]
if action == 'NEXT':
break
input_vec = init_vec(num_players)
output_vec = [0] * 4
if action[0] == players[j]:
if action[1] == 0:
input_vec[4] = 1
output_vec[0] = 1
elif action[1] in [1, 2]:
input_vec[5] = 1
output_vec[1] = 1
else:
input_vec[6] = 1
output_vec[2] = 1
i += 1
else:
input_vec[7] = 1
output_vec[3] = 1
inputs.append(input_vec)
outputs.append(output_vec)
j = (j + 1) % num_players
inputs = inputs[:-1]
if len(inputs) < MAX_ACTIONS:
for _ in range(MAX_ACTIONS - len(inputs)):
inputs.append([0]*8)
outputs.append([0, 0, 0, 1])
elif len(inputs) > MAX_ACTIONS:
inputs = inputs[:MAX_ACTIONS]
outputs = outputs[:MAX_ACTIONS]
return inputs, outputs
return False
if __name__ == '__main__':
input_dir = sys.argv[1]
output_file = sys.argv[2]
inputs = []
outputs = []
for filename in os.listdir(input_dir)[:20]:
filename = os.path.join(input_dir, filename)
print "Filename:", filename
f = open(filename, 'r')
counter = 0
for hand in json.loads(f.read()):
res = gen_training_data(hand)
if res:
inp, out = res
inputs.append(inp)
outputs.append(out)
counter += 1
print "Num Hands: ", counter
f.close()
input_arr = np.asarray(inputs)
output_arr = np.asarray(outputs)
np.savez_compressed(output_file, input=input_arr, output=output_arr)
|
<commit_before><commit_msg>Add first training data generator.<commit_after>import sys
import os
import json
import numpy as np
MAX_ACTIONS = 20
PLAYER_RANGE = (4, 7)
def init_vec(num_players):
input_vec = [0]*8
input_vec[num_players - PLAYER_RANGE[0]] = 1
return input_vec
def gen_training_data(hand):
num_players = hand['num_players']
if PLAYER_RANGE[0] <= num_players <= PLAYER_RANGE[1]:
players = []
for action in hand['actions'][:num_players]:
players.append(action[0])
i = 0
j = 0
inputs = [init_vec(num_players)]
outputs = []
while i < len(hand['actions']):
action = hand['actions'][i]
if action == 'NEXT':
break
input_vec = init_vec(num_players)
output_vec = [0] * 4
if action[0] == players[j]:
if action[1] == 0:
input_vec[4] = 1
output_vec[0] = 1
elif action[1] in [1, 2]:
input_vec[5] = 1
output_vec[1] = 1
else:
input_vec[6] = 1
output_vec[2] = 1
i += 1
else:
input_vec[7] = 1
output_vec[3] = 1
inputs.append(input_vec)
outputs.append(output_vec)
j = (j + 1) % num_players
inputs = inputs[:-1]
if len(inputs) < MAX_ACTIONS:
for _ in range(MAX_ACTIONS - len(inputs)):
inputs.append([0]*8)
outputs.append([0, 0, 0, 1])
elif len(inputs) > MAX_ACTIONS:
inputs = inputs[:MAX_ACTIONS]
outputs = outputs[:MAX_ACTIONS]
return inputs, outputs
return False
if __name__ == '__main__':
input_dir = sys.argv[1]
output_file = sys.argv[2]
inputs = []
outputs = []
for filename in os.listdir(input_dir)[:20]:
filename = os.path.join(input_dir, filename)
print "Filename:", filename
f = open(filename, 'r')
counter = 0
for hand in json.loads(f.read()):
res = gen_training_data(hand)
if res:
inp, out = res
inputs.append(inp)
outputs.append(out)
counter += 1
print "Num Hands: ", counter
f.close()
input_arr = np.asarray(inputs)
output_arr = np.asarray(outputs)
np.savez_compressed(output_file, input=input_arr, output=output_arr)
|
|
864ab5e404ecf87e226a8cdb74e65cc343a8b377
|
JoiningData/join2_mapper.py
|
JoiningData/join2_mapper.py
|
#!/usr/bin/env python
import sys
shows = []
for line in sys.stdin:
line = line.strip()
key_value = line.split(',')
if key_value[1] == 'ABC':
if key_value[1] not in shows:
shows.append(key_value[0])
if key_value[1].isdigit() and (key_value[0] in shows):
print('{0}\t{1}'.format(key_value[0], key_value[1]) )
|
Add custom implementation from stack overflow.
|
Add custom implementation from stack overflow.
|
Python
|
mit
|
dimitardanailov/learn-hadoop
|
Add custom implementation from stack overflow.
|
#!/usr/bin/env python
import sys
shows = []
for line in sys.stdin:
line = line.strip()
key_value = line.split(',')
if key_value[1] == 'ABC':
if key_value[1] not in shows:
shows.append(key_value[0])
if key_value[1].isdigit() and (key_value[0] in shows):
print('{0}\t{1}'.format(key_value[0], key_value[1]) )
|
<commit_before><commit_msg>Add custom implementation from stack overflow.<commit_after>
|
#!/usr/bin/env python
import sys
shows = []
for line in sys.stdin:
line = line.strip()
key_value = line.split(',')
if key_value[1] == 'ABC':
if key_value[1] not in shows:
shows.append(key_value[0])
if key_value[1].isdigit() and (key_value[0] in shows):
print('{0}\t{1}'.format(key_value[0], key_value[1]) )
|
Add custom implementation from stack overflow.#!/usr/bin/env python
import sys
shows = []
for line in sys.stdin:
line = line.strip()
key_value = line.split(',')
if key_value[1] == 'ABC':
if key_value[1] not in shows:
shows.append(key_value[0])
if key_value[1].isdigit() and (key_value[0] in shows):
print('{0}\t{1}'.format(key_value[0], key_value[1]) )
|
<commit_before><commit_msg>Add custom implementation from stack overflow.<commit_after>#!/usr/bin/env python
import sys
shows = []
for line in sys.stdin:
line = line.strip()
key_value = line.split(',')
if key_value[1] == 'ABC':
if key_value[1] not in shows:
shows.append(key_value[0])
if key_value[1].isdigit() and (key_value[0] in shows):
print('{0}\t{1}'.format(key_value[0], key_value[1]) )
|
|
bc721bb9e31c54a2eaab9ec84743f635f807abf8
|
benchexec/tools/libkluzzer.py
|
benchexec/tools/libkluzzer.py
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for LibKluzzer (http://unihb.eu/kluzzer).
"""
REQUIRED_PATHS = ["bin", "lib"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("LibKluzzer", "bin/LibKluzzer")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "LibKluzzer"
|
Create Tool-info module for LibKluzzer
|
Create Tool-info module for LibKluzzer
|
Python
|
apache-2.0
|
dbeyer/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,dbeyer/benchexec
|
Create Tool-info module for LibKluzzer
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for LibKluzzer (http://unihb.eu/kluzzer).
"""
REQUIRED_PATHS = ["bin", "lib"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("LibKluzzer", "bin/LibKluzzer")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "LibKluzzer"
|
<commit_before><commit_msg>Create Tool-info module for LibKluzzer<commit_after>
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for LibKluzzer (http://unihb.eu/kluzzer).
"""
REQUIRED_PATHS = ["bin", "lib"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("LibKluzzer", "bin/LibKluzzer")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "LibKluzzer"
|
Create Tool-info module for LibKluzzer"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for LibKluzzer (http://unihb.eu/kluzzer).
"""
REQUIRED_PATHS = ["bin", "lib"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("LibKluzzer", "bin/LibKluzzer")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "LibKluzzer"
|
<commit_before><commit_msg>Create Tool-info module for LibKluzzer<commit_after>"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for LibKluzzer (http://unihb.eu/kluzzer).
"""
REQUIRED_PATHS = ["bin", "lib"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("LibKluzzer", "bin/LibKluzzer")
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "LibKluzzer"
|
|
caf1325a0ba81dba20492cd3013fb4f1f7f11b99
|
repeatable_fetcher.py
|
repeatable_fetcher.py
|
import asyncio
import aiohttp
def fetch_url(client, url):
resp = yield from client.get(url)
try:
text = yield from resp.text()
return text
finally:
yield from resp.release()
def fetch(client, url, retries=5, timeout=30):
for i in range(retries):
ret = yield from asyncio.wait_for(fetch_url(client, url),
timeout)
return ret
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession()
try:
txt = loop.run_until_complete(fetch(client, 'http://httpbin.org/get'))
print(txt)
finally:
client.close()
|
Add example for repeatable url fetcher implemented on top of streams
|
Add example for repeatable url fetcher implemented on top of streams
|
Python
|
apache-2.0
|
ifosch/europython2015,hguemar/europython2015,asvetlov/europython2015
|
Add example for repeatable url fetcher implemented on top of streams
|
import asyncio
import aiohttp
def fetch_url(client, url):
resp = yield from client.get(url)
try:
text = yield from resp.text()
return text
finally:
yield from resp.release()
def fetch(client, url, retries=5, timeout=30):
for i in range(retries):
ret = yield from asyncio.wait_for(fetch_url(client, url),
timeout)
return ret
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession()
try:
txt = loop.run_until_complete(fetch(client, 'http://httpbin.org/get'))
print(txt)
finally:
client.close()
|
<commit_before><commit_msg>Add example for repeatable url fetcher implemented on top of streams<commit_after>
|
import asyncio
import aiohttp
def fetch_url(client, url):
resp = yield from client.get(url)
try:
text = yield from resp.text()
return text
finally:
yield from resp.release()
def fetch(client, url, retries=5, timeout=30):
for i in range(retries):
ret = yield from asyncio.wait_for(fetch_url(client, url),
timeout)
return ret
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession()
try:
txt = loop.run_until_complete(fetch(client, 'http://httpbin.org/get'))
print(txt)
finally:
client.close()
|
Add example for repeatable url fetcher implemented on top of streamsimport asyncio
import aiohttp
def fetch_url(client, url):
resp = yield from client.get(url)
try:
text = yield from resp.text()
return text
finally:
yield from resp.release()
def fetch(client, url, retries=5, timeout=30):
for i in range(retries):
ret = yield from asyncio.wait_for(fetch_url(client, url),
timeout)
return ret
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession()
try:
txt = loop.run_until_complete(fetch(client, 'http://httpbin.org/get'))
print(txt)
finally:
client.close()
|
<commit_before><commit_msg>Add example for repeatable url fetcher implemented on top of streams<commit_after>import asyncio
import aiohttp
def fetch_url(client, url):
resp = yield from client.get(url)
try:
text = yield from resp.text()
return text
finally:
yield from resp.release()
def fetch(client, url, retries=5, timeout=30):
for i in range(retries):
ret = yield from asyncio.wait_for(fetch_url(client, url),
timeout)
return ret
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession()
try:
txt = loop.run_until_complete(fetch(client, 'http://httpbin.org/get'))
print(txt)
finally:
client.close()
|
|
288e0b7b5847fde184769cb67dc5ef40f9de3c30
|
IoTPy/tests/shared_variables_test.py
|
IoTPy/tests/shared_variables_test.py
|
import sys
import os
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
from agent import Agent
from stream import Stream, StreamArray
from stream import _no_value, _multivalue
from check_agent_parameter_types import *
from recent_values import recent_values
from op import *
def sort(lst):
def flip(I, L):
i = I[0]
if lst[i] > lst[i+1]:
lst[i], lst[i+1] = lst[i+1], lst[i]
return (1)
else:
return (_no_value)
x = Stream('x')
for i in range(len(lst) - 1):
signal_element(func=flip, in_stream=x, out_stream=x, name=i, I=[i], L=lst)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
def shortest_path(D):
def triangle_inequality(triple, D):
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
D[k][i] = D[i][k]
return(1)
else:
return (_no_value)
x = Stream('x')
size = len(D)
for i in range(size):
for j in range(i):
for k in range(size):
signal_element(func=triangle_inequality,
in_stream=x, out_stream=x,
name=str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k], D=D)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
return D
def test_shared_variables():
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
print 'TEST OF SHARED VARIABLES IS SUCCESSFUL!'
if __name__ == '__main__':
test_shared_variables()
|
Test of shared variables. Used in UNITY operations.
|
Test of shared variables. Used in UNITY operations.
|
Python
|
bsd-3-clause
|
AssembleSoftware/IoTPy
|
Test of shared variables. Used in UNITY operations.
|
import sys
import os
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
from agent import Agent
from stream import Stream, StreamArray
from stream import _no_value, _multivalue
from check_agent_parameter_types import *
from recent_values import recent_values
from op import *
def sort(lst):
def flip(I, L):
i = I[0]
if lst[i] > lst[i+1]:
lst[i], lst[i+1] = lst[i+1], lst[i]
return (1)
else:
return (_no_value)
x = Stream('x')
for i in range(len(lst) - 1):
signal_element(func=flip, in_stream=x, out_stream=x, name=i, I=[i], L=lst)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
def shortest_path(D):
def triangle_inequality(triple, D):
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
D[k][i] = D[i][k]
return(1)
else:
return (_no_value)
x = Stream('x')
size = len(D)
for i in range(size):
for j in range(i):
for k in range(size):
signal_element(func=triangle_inequality,
in_stream=x, out_stream=x,
name=str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k], D=D)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
return D
def test_shared_variables():
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
print 'TEST OF SHARED VARIABLES IS SUCCESSFUL!'
if __name__ == '__main__':
test_shared_variables()
|
<commit_before><commit_msg>Test of shared variables. Used in UNITY operations.<commit_after>
|
import sys
import os
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
from agent import Agent
from stream import Stream, StreamArray
from stream import _no_value, _multivalue
from check_agent_parameter_types import *
from recent_values import recent_values
from op import *
def sort(lst):
def flip(I, L):
i = I[0]
if lst[i] > lst[i+1]:
lst[i], lst[i+1] = lst[i+1], lst[i]
return (1)
else:
return (_no_value)
x = Stream('x')
for i in range(len(lst) - 1):
signal_element(func=flip, in_stream=x, out_stream=x, name=i, I=[i], L=lst)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
def shortest_path(D):
def triangle_inequality(triple, D):
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
D[k][i] = D[i][k]
return(1)
else:
return (_no_value)
x = Stream('x')
size = len(D)
for i in range(size):
for j in range(i):
for k in range(size):
signal_element(func=triangle_inequality,
in_stream=x, out_stream=x,
name=str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k], D=D)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
return D
def test_shared_variables():
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
print 'TEST OF SHARED VARIABLES IS SUCCESSFUL!'
if __name__ == '__main__':
test_shared_variables()
|
Test of shared variables. Used in UNITY operations.import sys
import os
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
from agent import Agent
from stream import Stream, StreamArray
from stream import _no_value, _multivalue
from check_agent_parameter_types import *
from recent_values import recent_values
from op import *
def sort(lst):
def flip(I, L):
i = I[0]
if lst[i] > lst[i+1]:
lst[i], lst[i+1] = lst[i+1], lst[i]
return (1)
else:
return (_no_value)
x = Stream('x')
for i in range(len(lst) - 1):
signal_element(func=flip, in_stream=x, out_stream=x, name=i, I=[i], L=lst)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
def shortest_path(D):
def triangle_inequality(triple, D):
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
D[k][i] = D[i][k]
return(1)
else:
return (_no_value)
x = Stream('x')
size = len(D)
for i in range(size):
for j in range(i):
for k in range(size):
signal_element(func=triangle_inequality,
in_stream=x, out_stream=x,
name=str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k], D=D)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
return D
def test_shared_variables():
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
print 'TEST OF SHARED VARIABLES IS SUCCESSFUL!'
if __name__ == '__main__':
test_shared_variables()
|
<commit_before><commit_msg>Test of shared variables. Used in UNITY operations.<commit_after>import sys
import os
sys.path.append(os.path.abspath("../helper_functions"))
sys.path.append(os.path.abspath("../core"))
sys.path.append(os.path.abspath("../agent_types"))
from agent import Agent
from stream import Stream, StreamArray
from stream import _no_value, _multivalue
from check_agent_parameter_types import *
from recent_values import recent_values
from op import *
def sort(lst):
def flip(I, L):
i = I[0]
if lst[i] > lst[i+1]:
lst[i], lst[i+1] = lst[i+1], lst[i]
return (1)
else:
return (_no_value)
x = Stream('x')
for i in range(len(lst) - 1):
signal_element(func=flip, in_stream=x, out_stream=x, name=i, I=[i], L=lst)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
def shortest_path(D):
def triangle_inequality(triple, D):
i, j, k = triple
if D[i][j] + D[j][k] < D[i][k]:
D[i][k] = D[i][j] + D[j][k]
D[k][i] = D[i][k]
return(1)
else:
return (_no_value)
x = Stream('x')
size = len(D)
for i in range(size):
for j in range(i):
for k in range(size):
signal_element(func=triangle_inequality,
in_stream=x, out_stream=x,
name=str(i)+"_"+str(j)+"_"+str(k),
triple=[i, j, k], D=D)
scheduler = Stream.scheduler
x.append(1)
scheduler.step()
return D
def test_shared_variables():
lst = [10, 6, 8, 3, 20, 2, 23, 35]
sort(lst)
assert lst == [2, 3, 6, 8, 10, 20, 23, 35]
D = [[0, 20, 40, 60], [20, 0, 10, 1], [40, 10, 0, 100],
[60, 1, 100, 0]]
shortest_path(D)
assert D == [[0, 20, 30, 21], [20, 0, 10, 1],
[30, 10, 0, 11], [21, 1, 11, 0]]
print 'TEST OF SHARED VARIABLES IS SUCCESSFUL!'
if __name__ == '__main__':
test_shared_variables()
|
|
512ebb46c4f1bd851cc91afc67a2ce444fe077dd
|
tests/cupy_tests/cuda_tests/test_cusolver.py
|
tests/cupy_tests/cuda_tests/test_cusolver.py
|
import unittest
from cupy import cuda
class TestCusolver(unittest.TestCase):
def test_cusolver_enabled(self):
self.assertEqual(cuda.runtime.runtimeGetVersion() >= 8000,
cuda.cusolver_enabled)
|
Add test for cuda.cusolver_enabled flag
|
Add test for cuda.cusolver_enabled flag
|
Python
|
mit
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
Add test for cuda.cusolver_enabled flag
|
import unittest
from cupy import cuda
class TestCusolver(unittest.TestCase):
def test_cusolver_enabled(self):
self.assertEqual(cuda.runtime.runtimeGetVersion() >= 8000,
cuda.cusolver_enabled)
|
<commit_before><commit_msg>Add test for cuda.cusolver_enabled flag<commit_after>
|
import unittest
from cupy import cuda
class TestCusolver(unittest.TestCase):
def test_cusolver_enabled(self):
self.assertEqual(cuda.runtime.runtimeGetVersion() >= 8000,
cuda.cusolver_enabled)
|
Add test for cuda.cusolver_enabled flagimport unittest
from cupy import cuda
class TestCusolver(unittest.TestCase):
def test_cusolver_enabled(self):
self.assertEqual(cuda.runtime.runtimeGetVersion() >= 8000,
cuda.cusolver_enabled)
|
<commit_before><commit_msg>Add test for cuda.cusolver_enabled flag<commit_after>import unittest
from cupy import cuda
class TestCusolver(unittest.TestCase):
def test_cusolver_enabled(self):
self.assertEqual(cuda.runtime.runtimeGetVersion() >= 8000,
cuda.cusolver_enabled)
|
|
51056fd6e51752b44ba6d10f70c40111db6f9e07
|
tests/mapper/test_parameterization.py
|
tests/mapper/test_parameterization.py
|
from tests.mapper import get_symbols, SOURCE_PAIR, SOURCE_FULL
from thinglang.lexer.values.identifier import Identifier
from thinglang.symbols.symbol import Symbol
def test_simple_parameterization():
pair = get_symbols(SOURCE_PAIR)[Identifier('Pair')]
pair_number = pair.parameterize({
Identifier('T'): Identifier('number')
})
lhs_desc = pair_number[Identifier('lhs')]
assert lhs_desc.kind is Symbol.MEMBER
assert lhs_desc.type == Identifier('number')
assert lhs_desc.visibility is Symbol.PUBLIC
assert not lhs_desc.static
assert lhs_desc.index == 0
set_values = pair_number[Identifier('set_values')]
assert set_values.kind is Symbol.METHOD
assert set_values.type == Identifier('number')
assert set_values.visibility is Symbol.PUBLIC
assert not set_values.static
assert set_values.index == 1
assert set_values.arguments == [Identifier('number')] * 2
def test_parameterization_propagation():
symbols = get_symbols(SOURCE_FULL)
generic_type = symbols[Identifier('Person')][Identifier('favorite_numbers')].type
parametrized_type = symbols[generic_type]
assert parametrized_type.name == Identifier('Pair:<{T: number}>')
|
Add test for generic parametrization
|
Add test for generic parametrization
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add test for generic parametrization
|
from tests.mapper import get_symbols, SOURCE_PAIR, SOURCE_FULL
from thinglang.lexer.values.identifier import Identifier
from thinglang.symbols.symbol import Symbol
def test_simple_parameterization():
pair = get_symbols(SOURCE_PAIR)[Identifier('Pair')]
pair_number = pair.parameterize({
Identifier('T'): Identifier('number')
})
lhs_desc = pair_number[Identifier('lhs')]
assert lhs_desc.kind is Symbol.MEMBER
assert lhs_desc.type == Identifier('number')
assert lhs_desc.visibility is Symbol.PUBLIC
assert not lhs_desc.static
assert lhs_desc.index == 0
set_values = pair_number[Identifier('set_values')]
assert set_values.kind is Symbol.METHOD
assert set_values.type == Identifier('number')
assert set_values.visibility is Symbol.PUBLIC
assert not set_values.static
assert set_values.index == 1
assert set_values.arguments == [Identifier('number')] * 2
def test_parameterization_propagation():
symbols = get_symbols(SOURCE_FULL)
generic_type = symbols[Identifier('Person')][Identifier('favorite_numbers')].type
parametrized_type = symbols[generic_type]
assert parametrized_type.name == Identifier('Pair:<{T: number}>')
|
<commit_before><commit_msg>Add test for generic parametrization<commit_after>
|
from tests.mapper import get_symbols, SOURCE_PAIR, SOURCE_FULL
from thinglang.lexer.values.identifier import Identifier
from thinglang.symbols.symbol import Symbol
def test_simple_parameterization():
pair = get_symbols(SOURCE_PAIR)[Identifier('Pair')]
pair_number = pair.parameterize({
Identifier('T'): Identifier('number')
})
lhs_desc = pair_number[Identifier('lhs')]
assert lhs_desc.kind is Symbol.MEMBER
assert lhs_desc.type == Identifier('number')
assert lhs_desc.visibility is Symbol.PUBLIC
assert not lhs_desc.static
assert lhs_desc.index == 0
set_values = pair_number[Identifier('set_values')]
assert set_values.kind is Symbol.METHOD
assert set_values.type == Identifier('number')
assert set_values.visibility is Symbol.PUBLIC
assert not set_values.static
assert set_values.index == 1
assert set_values.arguments == [Identifier('number')] * 2
def test_parameterization_propagation():
symbols = get_symbols(SOURCE_FULL)
generic_type = symbols[Identifier('Person')][Identifier('favorite_numbers')].type
parametrized_type = symbols[generic_type]
assert parametrized_type.name == Identifier('Pair:<{T: number}>')
|
Add test for generic parametrizationfrom tests.mapper import get_symbols, SOURCE_PAIR, SOURCE_FULL
from thinglang.lexer.values.identifier import Identifier
from thinglang.symbols.symbol import Symbol
def test_simple_parameterization():
pair = get_symbols(SOURCE_PAIR)[Identifier('Pair')]
pair_number = pair.parameterize({
Identifier('T'): Identifier('number')
})
lhs_desc = pair_number[Identifier('lhs')]
assert lhs_desc.kind is Symbol.MEMBER
assert lhs_desc.type == Identifier('number')
assert lhs_desc.visibility is Symbol.PUBLIC
assert not lhs_desc.static
assert lhs_desc.index == 0
set_values = pair_number[Identifier('set_values')]
assert set_values.kind is Symbol.METHOD
assert set_values.type == Identifier('number')
assert set_values.visibility is Symbol.PUBLIC
assert not set_values.static
assert set_values.index == 1
assert set_values.arguments == [Identifier('number')] * 2
def test_parameterization_propagation():
symbols = get_symbols(SOURCE_FULL)
generic_type = symbols[Identifier('Person')][Identifier('favorite_numbers')].type
parametrized_type = symbols[generic_type]
assert parametrized_type.name == Identifier('Pair:<{T: number}>')
|
<commit_before><commit_msg>Add test for generic parametrization<commit_after>from tests.mapper import get_symbols, SOURCE_PAIR, SOURCE_FULL
from thinglang.lexer.values.identifier import Identifier
from thinglang.symbols.symbol import Symbol
def test_simple_parameterization():
pair = get_symbols(SOURCE_PAIR)[Identifier('Pair')]
pair_number = pair.parameterize({
Identifier('T'): Identifier('number')
})
lhs_desc = pair_number[Identifier('lhs')]
assert lhs_desc.kind is Symbol.MEMBER
assert lhs_desc.type == Identifier('number')
assert lhs_desc.visibility is Symbol.PUBLIC
assert not lhs_desc.static
assert lhs_desc.index == 0
set_values = pair_number[Identifier('set_values')]
assert set_values.kind is Symbol.METHOD
assert set_values.type == Identifier('number')
assert set_values.visibility is Symbol.PUBLIC
assert not set_values.static
assert set_values.index == 1
assert set_values.arguments == [Identifier('number')] * 2
def test_parameterization_propagation():
symbols = get_symbols(SOURCE_FULL)
generic_type = symbols[Identifier('Person')][Identifier('favorite_numbers')].type
parametrized_type = symbols[generic_type]
assert parametrized_type.name == Identifier('Pair:<{T: number}>')
|
|
1e957f371f36c4a5b92ba7b53af6e99075c13096
|
alembic/versions/188c85878d36_add_webhook_column.py
|
alembic/versions/188c85878d36_add_webhook_column.py
|
"""add webhook column
Revision ID: 188c85878d36
Revises: a9ecd1c767
Create Date: 2014-11-06 11:06:28.337421
"""
# revision identifiers, used by Alembic.
revision = '188c85878d36'
down_revision = 'a9ecd1c767'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('app', sa.Column('webhook', sa.Text))
def downgrade():
op.drop_column('app', 'webhook')
|
Add webhook column to project.
|
Add webhook column to project.
|
Python
|
agpl-3.0
|
inteligencia-coletiva-lsd/pybossa,geotagx/pybossa,jean/pybossa,jean/pybossa,OpenNewsLabs/pybossa,geotagx/pybossa,PyBossa/pybossa,PyBossa/pybossa,OpenNewsLabs/pybossa,stefanhahmann/pybossa,stefanhahmann/pybossa,Scifabric/pybossa,Scifabric/pybossa,inteligencia-coletiva-lsd/pybossa
|
Add webhook column to project.
|
"""add webhook column
Revision ID: 188c85878d36
Revises: a9ecd1c767
Create Date: 2014-11-06 11:06:28.337421
"""
# revision identifiers, used by Alembic.
revision = '188c85878d36'
down_revision = 'a9ecd1c767'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('app', sa.Column('webhook', sa.Text))
def downgrade():
op.drop_column('app', 'webhook')
|
<commit_before><commit_msg>Add webhook column to project.<commit_after>
|
"""add webhook column
Revision ID: 188c85878d36
Revises: a9ecd1c767
Create Date: 2014-11-06 11:06:28.337421
"""
# revision identifiers, used by Alembic.
revision = '188c85878d36'
down_revision = 'a9ecd1c767'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('app', sa.Column('webhook', sa.Text))
def downgrade():
op.drop_column('app', 'webhook')
|
Add webhook column to project."""add webhook column
Revision ID: 188c85878d36
Revises: a9ecd1c767
Create Date: 2014-11-06 11:06:28.337421
"""
# revision identifiers, used by Alembic.
revision = '188c85878d36'
down_revision = 'a9ecd1c767'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('app', sa.Column('webhook', sa.Text))
def downgrade():
op.drop_column('app', 'webhook')
|
<commit_before><commit_msg>Add webhook column to project.<commit_after>"""add webhook column
Revision ID: 188c85878d36
Revises: a9ecd1c767
Create Date: 2014-11-06 11:06:28.337421
"""
# revision identifiers, used by Alembic.
revision = '188c85878d36'
down_revision = 'a9ecd1c767'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('app', sa.Column('webhook', sa.Text))
def downgrade():
op.drop_column('app', 'webhook')
|
|
7358f2197f3faef3003ffa29188737616833ae42
|
project-euler/062.py
|
project-euler/062.py
|
# coding=utf-8
'''
Problem 62
30 January 2004
The cube, 41063625 (345^(3)), can be permuted to produce two other cubes: 56623104 (384^(3)) and 66430125 (405^(3)). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
cubes = {}
solution, i = 0, 0
while solution == 0:
i += 1
cube = "".join(sorted(list(str(i**3))))
if cube not in cubes:
cubes[cube] = [i]
else:
cubes[cube] += [i]
if len(cubes[cube]) == 5:
solution = min(cubes[cube])**3
print solution
|
Add solution for Problem 62.
|
Add solution for Problem 62.
|
Python
|
mit
|
robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles
|
Add solution for Problem 62.
|
# coding=utf-8
'''
Problem 62
30 January 2004
The cube, 41063625 (345^(3)), can be permuted to produce two other cubes: 56623104 (384^(3)) and 66430125 (405^(3)). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
cubes = {}
solution, i = 0, 0
while solution == 0:
i += 1
cube = "".join(sorted(list(str(i**3))))
if cube not in cubes:
cubes[cube] = [i]
else:
cubes[cube] += [i]
if len(cubes[cube]) == 5:
solution = min(cubes[cube])**3
print solution
|
<commit_before><commit_msg>Add solution for Problem 62.<commit_after>
|
# coding=utf-8
'''
Problem 62
30 January 2004
The cube, 41063625 (345^(3)), can be permuted to produce two other cubes: 56623104 (384^(3)) and 66430125 (405^(3)). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
cubes = {}
solution, i = 0, 0
while solution == 0:
i += 1
cube = "".join(sorted(list(str(i**3))))
if cube not in cubes:
cubes[cube] = [i]
else:
cubes[cube] += [i]
if len(cubes[cube]) == 5:
solution = min(cubes[cube])**3
print solution
|
Add solution for Problem 62.# coding=utf-8
'''
Problem 62
30 January 2004
The cube, 41063625 (345^(3)), can be permuted to produce two other cubes: 56623104 (384^(3)) and 66430125 (405^(3)). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
cubes = {}
solution, i = 0, 0
while solution == 0:
i += 1
cube = "".join(sorted(list(str(i**3))))
if cube not in cubes:
cubes[cube] = [i]
else:
cubes[cube] += [i]
if len(cubes[cube]) == 5:
solution = min(cubes[cube])**3
print solution
|
<commit_before><commit_msg>Add solution for Problem 62.<commit_after># coding=utf-8
'''
Problem 62
30 January 2004
The cube, 41063625 (345^(3)), can be permuted to produce two other cubes: 56623104 (384^(3)) and 66430125 (405^(3)). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
cubes = {}
solution, i = 0, 0
while solution == 0:
i += 1
cube = "".join(sorted(list(str(i**3))))
if cube not in cubes:
cubes[cube] = [i]
else:
cubes[cube] += [i]
if len(cubes[cube]) == 5:
solution = min(cubes[cube])**3
print solution
|
|
f6b0fbc208012610e7658c02318226d0085ed7a0
|
py/valid-square.py
|
py/valid-square.py
|
from operator import itemgetter
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
mid = [sum(map(itemgetter(0), [p1, p2, p3, p4])), sum(map(itemgetter(1), [p1, p2, p3, p4]))]
v1 = [p1[0] * 4 - mid[0], p1[1] * 4 - mid[1]]
for p in [p2, p3, p4]:
if p1 == p:
return False
v = [p[0] * 4 - mid[0], p[1] * 4 - mid[1]]
# perpendicular or parallel
if v[0] * v1[0] + v[1] * v1[1] != 0 and v[0] * v1[1] != v[1] * v1[0]:
return False
# same length
if v[0] * v[0] + v[1] * v[1] != v1[0] * v1[0] + v1[1] * v1[1]:
return False
return True
|
Add py solution for 593. Valid Square
|
Add py solution for 593. Valid Square
593. Valid Square: https://leetcode.com/problems/valid-square/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 593. Valid Square
593. Valid Square: https://leetcode.com/problems/valid-square/
|
from operator import itemgetter
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
mid = [sum(map(itemgetter(0), [p1, p2, p3, p4])), sum(map(itemgetter(1), [p1, p2, p3, p4]))]
v1 = [p1[0] * 4 - mid[0], p1[1] * 4 - mid[1]]
for p in [p2, p3, p4]:
if p1 == p:
return False
v = [p[0] * 4 - mid[0], p[1] * 4 - mid[1]]
# perpendicular or parallel
if v[0] * v1[0] + v[1] * v1[1] != 0 and v[0] * v1[1] != v[1] * v1[0]:
return False
# same length
if v[0] * v[0] + v[1] * v[1] != v1[0] * v1[0] + v1[1] * v1[1]:
return False
return True
|
<commit_before><commit_msg>Add py solution for 593. Valid Square
593. Valid Square: https://leetcode.com/problems/valid-square/<commit_after>
|
from operator import itemgetter
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
mid = [sum(map(itemgetter(0), [p1, p2, p3, p4])), sum(map(itemgetter(1), [p1, p2, p3, p4]))]
v1 = [p1[0] * 4 - mid[0], p1[1] * 4 - mid[1]]
for p in [p2, p3, p4]:
if p1 == p:
return False
v = [p[0] * 4 - mid[0], p[1] * 4 - mid[1]]
# perpendicular or parallel
if v[0] * v1[0] + v[1] * v1[1] != 0 and v[0] * v1[1] != v[1] * v1[0]:
return False
# same length
if v[0] * v[0] + v[1] * v[1] != v1[0] * v1[0] + v1[1] * v1[1]:
return False
return True
|
Add py solution for 593. Valid Square
593. Valid Square: https://leetcode.com/problems/valid-square/from operator import itemgetter
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
mid = [sum(map(itemgetter(0), [p1, p2, p3, p4])), sum(map(itemgetter(1), [p1, p2, p3, p4]))]
v1 = [p1[0] * 4 - mid[0], p1[1] * 4 - mid[1]]
for p in [p2, p3, p4]:
if p1 == p:
return False
v = [p[0] * 4 - mid[0], p[1] * 4 - mid[1]]
# perpendicular or parallel
if v[0] * v1[0] + v[1] * v1[1] != 0 and v[0] * v1[1] != v[1] * v1[0]:
return False
# same length
if v[0] * v[0] + v[1] * v[1] != v1[0] * v1[0] + v1[1] * v1[1]:
return False
return True
|
<commit_before><commit_msg>Add py solution for 593. Valid Square
593. Valid Square: https://leetcode.com/problems/valid-square/<commit_after>from operator import itemgetter
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
mid = [sum(map(itemgetter(0), [p1, p2, p3, p4])), sum(map(itemgetter(1), [p1, p2, p3, p4]))]
v1 = [p1[0] * 4 - mid[0], p1[1] * 4 - mid[1]]
for p in [p2, p3, p4]:
if p1 == p:
return False
v = [p[0] * 4 - mid[0], p[1] * 4 - mid[1]]
# perpendicular or parallel
if v[0] * v1[0] + v[1] * v1[1] != 0 and v[0] * v1[1] != v[1] * v1[0]:
return False
# same length
if v[0] * v[0] + v[1] * v[1] != v1[0] * v1[0] + v1[1] * v1[1]:
return False
return True
|
|
02e58eb884046d04c988f3b8dde76e047d7c591f
|
python/example1.py
|
python/example1.py
|
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
|
Add an example script to illustrate how to use the API library.
|
Add an example script to illustrate how to use the API library.
|
Python
|
mit
|
ste616/atca-rapid-response-api
|
Add an example script to illustrate how to use the API library.
|
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
|
<commit_before><commit_msg>Add an example script to illustrate how to use the API library.<commit_after>
|
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
|
Add an example script to illustrate how to use the API library.# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
|
<commit_before><commit_msg>Add an example script to illustrate how to use the API library.<commit_after># ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
|
|
45a68b285306ed3030cba1fdcc388c4ea1df3855
|
folia2text.py
|
folia2text.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract text from a folia xml file.
"""
import os
import argparse
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, note
def act2text(act_xml):
"""Extract text from act.
Returns a string that can be written to file.
"""
text = []
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
# some t elements appear to be empty (this is not allowed, but
# it happens). So, check whether there is a string to add
# before adding it.
if elem.t:
if elem.t.string:
text.append(elem.t.string)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing the FoLiA XML files that should be '
'processed')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(input_dir)
counter = 0
for file_name in os.listdir(input_dir):
counter += 1
print '({}) {}'.format(counter, file_name)
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
text = []
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
text += act2text(act_xml)
del context
# write text to file
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[0:13]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(text))
print ''
|
Add script to print all text in folia xml file
|
Add script to print all text in folia xml file
Extract text in acts from folia xml files. Processes all folia xml files
in the input directory and put output texts in the output directory.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to print all text in folia xml file
Extract text in acts from folia xml files. Processes all folia xml files
in the input directory and put output texts in the output directory.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract text from a folia xml file.
"""
import os
import argparse
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, note
def act2text(act_xml):
"""Extract text from act.
Returns a string that can be written to file.
"""
text = []
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
# some t elements appear to be empty (this is not allowed, but
# it happens). So, check whether there is a string to add
# before adding it.
if elem.t:
if elem.t.string:
text.append(elem.t.string)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing the FoLiA XML files that should be '
'processed')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(input_dir)
counter = 0
for file_name in os.listdir(input_dir):
counter += 1
print '({}) {}'.format(counter, file_name)
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
text = []
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
text += act2text(act_xml)
del context
# write text to file
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[0:13]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(text))
print ''
|
<commit_before><commit_msg>Add script to print all text in folia xml file
Extract text in acts from folia xml files. Processes all folia xml files
in the input directory and put output texts in the output directory.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract text from a folia xml file.
"""
import os
import argparse
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, note
def act2text(act_xml):
"""Extract text from act.
Returns a string that can be written to file.
"""
text = []
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
# some t elements appear to be empty (this is not allowed, but
# it happens). So, check whether there is a string to add
# before adding it.
if elem.t:
if elem.t.string:
text.append(elem.t.string)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing the FoLiA XML files that should be '
'processed')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(input_dir)
counter = 0
for file_name in os.listdir(input_dir):
counter += 1
print '({}) {}'.format(counter, file_name)
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
text = []
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
text += act2text(act_xml)
del context
# write text to file
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[0:13]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(text))
print ''
|
Add script to print all text in folia xml file
Extract text in acts from folia xml files. Processes all folia xml files
in the input directory and put output texts in the output directory.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract text from a folia xml file.
"""
import os
import argparse
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, note
def act2text(act_xml):
"""Extract text from act.
Returns a string that can be written to file.
"""
text = []
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
# some t elements appear to be empty (this is not allowed, but
# it happens). So, check whether there is a string to add
# before adding it.
if elem.t:
if elem.t.string:
text.append(elem.t.string)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing the FoLiA XML files that should be '
'processed')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(input_dir)
counter = 0
for file_name in os.listdir(input_dir):
counter += 1
print '({}) {}'.format(counter, file_name)
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
text = []
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
text += act2text(act_xml)
del context
# write text to file
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[0:13]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(text))
print ''
|
<commit_before><commit_msg>Add script to print all text in folia xml file
Extract text in acts from folia xml files. Processes all folia xml files
in the input directory and put output texts in the output directory.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract text from a folia xml file.
"""
import os
import argparse
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, note
def act2text(act_xml):
"""Extract text from act.
Returns a string that can be written to file.
"""
text = []
print 'act:', act_xml.find('div', 'act').attrs.get('xml:id')
subacts = act_xml.find_all(act)
# act_xml should contain exactly one act; if it contains more acts, these
# acts are sub acts, that will be processed later
if len(subacts) == 1:
for elem in act_xml.descendants:
if sentence(elem) and not note(elem.parent):
# some t elements appear to be empty (this is not allowed, but
# it happens). So, check whether there is a string to add
# before adding it.
if elem.t:
if elem.t.string:
text.append(elem.t.string)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing the FoLiA XML files that should be '
'processed')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(input_dir)
counter = 0
for file_name in os.listdir(input_dir):
counter += 1
print '({}) {}'.format(counter, file_name)
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
text = []
for event, elem in context:
if elem.tag == act_tag and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
text += act2text(act_xml)
del context
# write text to file
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[0:13]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write('\n'.join(text))
print ''
|
|
fb5612d641296a022a869bd0a4b9a0aed9255e51
|
_pytest/test_formatting.py
|
_pytest/test_formatting.py
|
import pytest
import wee_slack
@pytest.mark.parametrize("text", [
"""
* an item
* another item
""",
"* Run this command: `find . -name '*.exe'`",
])
def test_does_not_format(text):
assert wee_slack.render_formatting(text) == text
|
Add tests for no formatting cases
|
Add tests for no formatting cases
|
Python
|
mit
|
wee-slack/wee-slack,rawdigits/wee-slack,trygveaa/wee-slack
|
Add tests for no formatting cases
|
import pytest
import wee_slack
@pytest.mark.parametrize("text", [
"""
* an item
* another item
""",
"* Run this command: `find . -name '*.exe'`",
])
def test_does_not_format(text):
assert wee_slack.render_formatting(text) == text
|
<commit_before><commit_msg>Add tests for no formatting cases<commit_after>
|
import pytest
import wee_slack
@pytest.mark.parametrize("text", [
"""
* an item
* another item
""",
"* Run this command: `find . -name '*.exe'`",
])
def test_does_not_format(text):
assert wee_slack.render_formatting(text) == text
|
Add tests for no formatting casesimport pytest
import wee_slack
@pytest.mark.parametrize("text", [
"""
* an item
* another item
""",
"* Run this command: `find . -name '*.exe'`",
])
def test_does_not_format(text):
assert wee_slack.render_formatting(text) == text
|
<commit_before><commit_msg>Add tests for no formatting cases<commit_after>import pytest
import wee_slack
@pytest.mark.parametrize("text", [
"""
* an item
* another item
""",
"* Run this command: `find . -name '*.exe'`",
])
def test_does_not_format(text):
assert wee_slack.render_formatting(text) == text
|
|
45a9c2b3b8e0033c6dca5b87d52f515d23a9356c
|
leetcode/077-Combinations/Combinations_002.py
|
leetcode/077-Combinations/Combinations_002.py
|
class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n < 1:
return []
if k == 1:
return [[i] for i in range(1, n + 1)]
res = self.combine(n - 1, k)
tmp = self.combine(n - 1, k - 1)
for t in tmp:
res.append(t + [n])
return res
|
Create Combination like 001 for Leetcode
|
Create Combination like 001 for Leetcode
|
Python
|
mit
|
Chasego/codirit,Chasego/codi,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codirit,Chasego/codirit,Chasego/cod,Chasego/codi,Chasego/codirit,Chasego/cod,Chasego/codi,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/algo,cc13ny/Allin,cc13ny/Allin,cc13ny/algo,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/cod,Chasego/cod
|
Create Combination like 001 for Leetcode
|
class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n < 1:
return []
if k == 1:
return [[i] for i in range(1, n + 1)]
res = self.combine(n - 1, k)
tmp = self.combine(n - 1, k - 1)
for t in tmp:
res.append(t + [n])
return res
|
<commit_before><commit_msg>Create Combination like 001 for Leetcode<commit_after>
|
class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n < 1:
return []
if k == 1:
return [[i] for i in range(1, n + 1)]
res = self.combine(n - 1, k)
tmp = self.combine(n - 1, k - 1)
for t in tmp:
res.append(t + [n])
return res
|
Create Combination like 001 for Leetcodeclass Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n < 1:
return []
if k == 1:
return [[i] for i in range(1, n + 1)]
res = self.combine(n - 1, k)
tmp = self.combine(n - 1, k - 1)
for t in tmp:
res.append(t + [n])
return res
|
<commit_before><commit_msg>Create Combination like 001 for Leetcode<commit_after>class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n < 1:
return []
if k == 1:
return [[i] for i in range(1, n + 1)]
res = self.combine(n - 1, k)
tmp = self.combine(n - 1, k - 1)
for t in tmp:
res.append(t + [n])
return res
|
|
e4994b15cd40ec8b70f0dee0b8311cd238a18e66
|
common/djangoapps/student/management/commands/anonymized_id_mapping.py
|
common/djangoapps/student/management/commands/anonymized_id_mapping.py
|
import csv
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import unique_id_for_user
class Command(BaseCommand):
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """
Exports a CSV document mapping from a username to the anonymized,
unique user ID for every user in the specified course.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_id = args[0]
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_id.replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_id)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_id)
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(("User ID", "Anonymized user ID"))
for student in students:
csv_writer.writerow((student.id, unique_id_for_user(student)))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
|
Add mgmt cmd to generate anonymized ID mapping
|
Add mgmt cmd to generate anonymized ID mapping
So that instructors have easy access to the mapping from anonymized
IDs (a simple MD5 hash of the user ID that's used when integrating
with third-party services like Qualtrics) to user IDs, we have a
simple Django management command to generate a CSV mapping. To run,
use the following:
rake django-admin[anonymized_id_mapping,<system>,<env>,<course_id>]
And, of course, substitute the appropriate values for <system>, <env>,
and <course_id>. (E.g., lms, dev, and MITx/6.002x/Circuits)
|
Python
|
agpl-3.0
|
chand3040/cloud_that,jolyonb/edx-platform,BehavioralInsightsTeam/edx-platform,kxliugang/edx-platform,ZLLab-Mooc/edx-platform,bigdatauniversity/edx-platform,appsembler/edx-platform,shubhdev/openedx,TsinghuaX/edx-platform,shubhdev/openedx,ZLLab-Mooc/edx-platform,martynovp/edx-platform,ak2703/edx-platform,polimediaupv/edx-platform,martynovp/edx-platform,zubair-arbi/edx-platform,bdero/edx-platform,LICEF/edx-platform,SivilTaram/edx-platform,devs1991/test_edx_docmode,rue89-tech/edx-platform,pku9104038/edx-platform,RPI-OPENEDX/edx-platform,gymnasium/edx-platform,itsjeyd/edx-platform,hkawasaki/kawasaki-aio8-0,J861449197/edx-platform,yokose-ks/edx-platform,itsjeyd/edx-platform,shabab12/edx-platform,jonathan-beard/edx-platform,fly19890211/edx-platform,mjirayu/sit_academy,kmoocdev/edx-platform,OmarIthawi/edx-platform,olexiim/edx-platform,CourseTalk/edx-platform,Endika/edx-platform,PepperPD/edx-pepper-platform,zhenzhai/edx-platform,UXE/local-edx,knehez/edx-platform,zofuthan/edx-platform,pepeportela/edx-platform,jzoldak/edx-platform,jzoldak/edx-platform,sameetb-cuelogic/edx-platform-test,raccoongang/edx-platform,valtech-mooc/edx-platform,EDUlib/edx-platform,waheedahmed/edx-platform,dsajkl/123,motion2015/edx-platform,CredoReference/edx-platform,ampax/edx-platform-backup,shubhdev/edx-platform,nikolas/edx-platform,praveen-pal/edx-platform,mcgachey/edx-platform,jazztpt/edx-platform,sudheerchintala/LearnEraPlatForm,cyanna/edx-platform,mitocw/edx-platform,DNFcode/edx-platform,pku9104038/edx-platform,defance/edx-platform,motion2015/a3,MSOpenTech/edx-platform,rue89-tech/edx-platform,edx-solutions/edx-platform,pelikanchik/edx-platform,ahmadiga/min_edx,SravanthiSinha/edx-platform,wwj718/ANALYSE,Shrhawk/edx-platform,proversity-org/edx-platform,wwj718/edx-platform,edx-solutions/edx-platform,arbrandes/edx-platform,hmcmooc/muddx-platform,utecuy/edx-platform,chauhanhardik/populo,unicri/edx-platform,martynovp/edx-platform,benpatterson/edx-platform,shubhdev/openedx,kmoocdev2/edx-platform,jbzdak/edx-platform,kursitet/edx-platform,polimediaupv/edx-platform,bdero/edx-platform,tiagochiavericosta/edx-platform,cecep-edu/edx-platform,vasyarv/edx-platform,dsajkl/123,Stanford-Online/edx-platform,utecuy/edx-platform,halvertoluke/edx-platform,ahmadio/edx-platform,EduPepperPDTesting/pepper2013-testing,cognitiveclass/edx-platform,xuxiao19910803/edx-platform,Kalyzee/edx-platform,lduarte1991/edx-platform,zubair-arbi/edx-platform,nanolearningllc/edx-platform-cypress,wwj718/edx-platform,Edraak/edraak-platform,DNFcode/edx-platform,Edraak/edx-platform,eduNEXT/edunext-platform,cognitiveclass/edx-platform,unicri/edx-platform,valtech-mooc/edx-platform,y12uc231/edx-platform,DNFcode/edx-platform,deepsrijit1105/edx-platform,ahmadio/edx-platform,nttks/jenkins-test,jjmiranda/edx-platform,shubhdev/openedx,hmcmooc/muddx-platform,mitocw/edx-platform,mushtaqak/edx-platform,JioEducation/edx-platform,alexthered/kienhoc-platform,antoviaque/edx-platform,romain-li/edx-platform,jazkarta/edx-platform-for-isc,hkawasaki/kawasaki-aio8-0,EduPepperPD/pepper2013,EduPepperPD/pepper2013,jelugbo/tundex,vismartltd/edx-platform,Endika/edx-platform,xuxiao19910803/edx,zhenzhai/edx-platform,MSOpenTech/edx-platform,ak2703/edx-platform,shubhdev/edxOnBaadal,hmcmooc/muddx-platform,marcore/edx-platform,CourseTalk/edx-platform,fly19890211/edx-platform,raccoongang/edx-platform,dsajkl/reqiop,shashank971/edx-platform,cecep-edu/edx-platform,nanolearningllc/edx-platform-cypress,naresh21/synergetics-edx-platform,atsolakid/edx-platform,wwj718/ANALYSE,ESOedX/edx-platform,mitocw/edx-platform,ZLLab-Mooc/edx-platform,msegado/edx-platform,syjeon/new_edx,BehavioralInsightsTeam/edx-platform,Edraak/edraak-platform,apigee/edx-platform,DefyVentures/edx-platform,RPI-OPENEDX/edx-platform,UOMx/edx-platform,zadgroup/edx-platform,tanmaykm/edx-platform,antonve/s4-project-mooc,hamzehd/edx-platform,yokose-ks/edx-platform,jruiperezv/ANALYSE,pepeportela/edx-platform,CredoReference/edx-platform,mbareta/edx-platform-ft,CredoReference/edx-platform,Lektorium-LLC/edx-platform,y12uc231/edx-platform,pdehaye/theming-edx-platform,hkawasaki/kawasaki-aio8-1,Edraak/edx-platform,LearnEra/LearnEraPlaftform,AkA84/edx-platform,doismellburning/edx-platform,JioEducation/edx-platform,kxliugang/edx-platform,RPI-OPENEDX/edx-platform,edry/edx-platform,procangroup/edx-platform,shurihell/testasia,solashirai/edx-platform,Endika/edx-platform,waheedahmed/edx-platform,etzhou/edx-platform,andyzsf/edx,beacloudgenius/edx-platform,B-MOOC/edx-platform,antoviaque/edx-platform,nanolearningllc/edx-platform-cypress-2,proversity-org/edx-platform,morenopc/edx-platform,simbs/edx-platform,ak2703/edx-platform,arbrandes/edx-platform,inares/edx-platform,nikolas/edx-platform,franosincic/edx-platform,zerobatu/edx-platform,Lektorium-LLC/edx-platform,abdoosh00/edx-rtl-final,angelapper/edx-platform,shurihell/testasia,SivilTaram/edx-platform,mbareta/edx-platform-ft,morenopc/edx-platform,eestay/edx-platform,AkA84/edx-platform,lduarte1991/edx-platform,pomegranited/edx-platform,nikolas/edx-platform,Semi-global/edx-platform,ampax/edx-platform,jamesblunt/edx-platform,antonve/s4-project-mooc,analyseuc3m/ANALYSE-v1,benpatterson/edx-platform,adoosii/edx-platform,beni55/edx-platform,syjeon/new_edx,yokose-ks/edx-platform,alu042/edx-platform,mcgachey/edx-platform,ahmedaljazzar/edx-platform,franosincic/edx-platform,zadgroup/edx-platform,inares/edx-platform,wwj718/edx-platform,auferack08/edx-platform,nanolearningllc/edx-platform-cypress-2,shurihell/testasia,mjirayu/sit_academy,J861449197/edx-platform,prarthitm/edxplatform,pomegranited/edx-platform,iivic/BoiseStateX,cecep-edu/edx-platform,devs1991/test_edx_docmode,nikolas/edx-platform,etzhou/edx-platform,doismellburning/edx-platform,devs1991/test_edx_docmode,amir-qayyum-khan/edx-platform,jjmiranda/edx-platform,jazkarta/edx-platform,marcore/edx-platform,jzoldak/edx-platform,B-MOOC/edx-platform,ahmadio/edx-platform,TsinghuaX/edx-platform,Ayub-Khan/edx-platform,abdoosh00/edx-rtl-final,kamalx/edx-platform,atsolakid/edx-platform,Softmotions/edx-platform,shubhdev/edx-platform,wwj718/ANALYSE,zerobatu/edx-platform,playm2mboy/edx-platform,xinjiguaike/edx-platform,deepsrijit1105/edx-platform,abdoosh00/edx-rtl-final,Stanford-Online/edx-platform,cognitiveclass/edx-platform,motion2015/edx-platform,ESOedX/edx-platform,edx/edx-platform,arifsetiawan/edx-platform,fintech-circle/edx-platform,alu042/edx-platform,rue89-tech/edx-platform,TeachAtTUM/edx-platform,LearnEra/LearnEraPlaftform,UOMx/edx-platform,amir-qayyum-khan/edx-platform,proversity-org/edx-platform,ampax/edx-platform,ZLLab-Mooc/edx-platform,hamzehd/edx-platform,ferabra/edx-platform,UXE/local-edx,ferabra/edx-platform,kamalx/edx-platform,UOMx/edx-platform,carsongee/edx-platform,jruiperezv/ANALYSE,eestay/edx-platform,mcgachey/edx-platform,dcosentino/edx-platform,gymnasium/edx-platform,ESOedX/edx-platform,shabab12/edx-platform,kamalx/edx-platform,y12uc231/edx-platform,hkawasaki/kawasaki-aio8-1,alexthered/kienhoc-platform,gsehub/edx-platform,Kalyzee/edx-platform,WatanabeYasumasa/edx-platform,jbzdak/edx-platform,jswope00/griffinx,openfun/edx-platform,kmoocdev/edx-platform,Shrhawk/edx-platform,procangroup/edx-platform,xingyepei/edx-platform,abdoosh00/edx-rtl-final,10clouds/edx-platform,EDUlib/edx-platform,defance/edx-platform,shubhdev/edxOnBaadal,nanolearning/edx-platform,rhndg/openedx,DefyVentures/edx-platform,abdoosh00/edraak,jamiefolsom/edx-platform,atsolakid/edx-platform,kmoocdev/edx-platform,angelapper/edx-platform,Edraak/circleci-edx-platform,msegado/edx-platform,vasyarv/edx-platform,jbzdak/edx-platform,lduarte1991/edx-platform,jamesblunt/edx-platform,doganov/edx-platform,chudaol/edx-platform,devs1991/test_edx_docmode,jazkarta/edx-platform-for-isc,xuxiao19910803/edx-platform,halvertoluke/edx-platform,JCBarahona/edX,solashirai/edx-platform,doganov/edx-platform,LICEF/edx-platform,dkarakats/edx-platform,mjg2203/edx-platform-seas,stvstnfrd/edx-platform,ahmadiga/min_edx,B-MOOC/edx-platform,jazkarta/edx-platform-for-isc,jbassen/edx-platform,leansoft/edx-platform,rhndg/openedx,simbs/edx-platform,hkawasaki/kawasaki-aio8-0,pelikanchik/edx-platform,solashirai/edx-platform,BehavioralInsightsTeam/edx-platform,Edraak/circleci-edx-platform,arifsetiawan/edx-platform,caesar2164/edx-platform,zofuthan/edx-platform,IndonesiaX/edx-platform,analyseuc3m/ANALYSE-v1,morpheby/levelup-by,Livit/Livit.Learn.EdX,arbrandes/edx-platform,JCBarahona/edX,dsajkl/reqiop,zofuthan/edx-platform,naresh21/synergetics-edx-platform,pku9104038/edx-platform,IndonesiaX/edx-platform,jonathan-beard/edx-platform,Edraak/circleci-edx-platform,ovnicraft/edx-platform,tanmaykm/edx-platform,chauhanhardik/populo,utecuy/edx-platform,dsajkl/123,bitifirefly/edx-platform,arifsetiawan/edx-platform,carsongee/edx-platform,pabloborrego93/edx-platform,xuxiao19910803/edx,JCBarahona/edX,SravanthiSinha/edx-platform,auferack08/edx-platform,Ayub-Khan/edx-platform,fly19890211/edx-platform,WatanabeYasumasa/edx-platform,pepeportela/edx-platform,louyihua/edx-platform,Edraak/circleci-edx-platform,WatanabeYasumasa/edx-platform,hkawasaki/kawasaki-aio8-1,AkA84/edx-platform,jswope00/GAI,hastexo/edx-platform,cyanna/edx-platform,hamzehd/edx-platform,nttks/edx-platform,eemirtekin/edx-platform,Softmotions/edx-platform,romain-li/edx-platform,Ayub-Khan/edx-platform,MSOpenTech/edx-platform,zubair-arbi/edx-platform,MakeHer/edx-platform,SravanthiSinha/edx-platform,longmen21/edx-platform,beni55/edx-platform,jamiefolsom/edx-platform,IONISx/edx-platform,vikas1885/test1,4eek/edx-platform,nttks/edx-platform,sudheerchintala/LearnEraPlatForm,benpatterson/edx-platform,adoosii/edx-platform,xinjiguaike/edx-platform,rue89-tech/edx-platform,zadgroup/edx-platform,MSOpenTech/edx-platform,EduPepperPD/pepper2013,hkawasaki/kawasaki-aio8-1,andyzsf/edx,jamiefolsom/edx-platform,TeachAtTUM/edx-platform,EDUlib/edx-platform,andyzsf/edx,alexthered/kienhoc-platform,eestay/edx-platform,ahmadio/edx-platform,edx/edx-platform,halvertoluke/edx-platform,nagyistoce/edx-platform,motion2015/a3,waheedahmed/edx-platform,Unow/edx-platform,hkawasaki/kawasaki-aio8-2,procangroup/edx-platform,RPI-OPENEDX/edx-platform,jbassen/edx-platform,don-github/edx-platform,olexiim/edx-platform,CredoReference/edx-platform,EduPepperPD/pepper2013,CourseTalk/edx-platform,valtech-mooc/edx-platform,proversity-org/edx-platform,alexthered/kienhoc-platform,defance/edx-platform,xuxiao19910803/edx,cselis86/edx-platform,mushtaqak/edx-platform,kursitet/edx-platform,xinjiguaike/edx-platform,jamiefolsom/edx-platform,MSOpenTech/edx-platform,martynovp/edx-platform,unicri/edx-platform,Semi-global/edx-platform,SivilTaram/edx-platform,DNFcode/edx-platform,jelugbo/tundex,miptliot/edx-platform,4eek/edx-platform,peterm-itr/edx-platform,SravanthiSinha/edx-platform,amir-qayyum-khan/edx-platform,longmen21/edx-platform,kmoocdev2/edx-platform,prarthitm/edxplatform,abdoosh00/edraak,xingyepei/edx-platform,mtlchun/edx,ahmedaljazzar/edx-platform,chand3040/cloud_that,caesar2164/edx-platform,kmoocdev2/edx-platform,prarthitm/edxplatform,nttks/jenkins-test,cpennington/edx-platform,Shrhawk/edx-platform,ahmedaljazzar/edx-platform,rue89-tech/edx-platform,openfun/edx-platform,eduNEXT/edunext-platform,zofuthan/edx-platform,bitifirefly/edx-platform,mbareta/edx-platform-ft,polimediaupv/edx-platform,wwj718/ANALYSE,jruiperezv/ANALYSE,pelikanchik/edx-platform,adoosii/edx-platform,vikas1885/test1,mjg2203/edx-platform-seas,apigee/edx-platform,jelugbo/tundex,BehavioralInsightsTeam/edx-platform,UOMx/edx-platform,torchingloom/edx-platform,bdero/edx-platform,andyzsf/edx,jswope00/GAI,tiagochiavericosta/edx-platform,synergeticsedx/deployment-wipro,dcosentino/edx-platform,AkA84/edx-platform,simbs/edx-platform,tanmaykm/edx-platform,jamesblunt/edx-platform,Semi-global/edx-platform,leansoft/edx-platform,zhenzhai/edx-platform,vismartltd/edx-platform,xuxiao19910803/edx,polimediaupv/edx-platform,sameetb-cuelogic/edx-platform-test,simbs/edx-platform,mtlchun/edx,amir-qayyum-khan/edx-platform,JCBarahona/edX,jazztpt/edx-platform,nanolearningllc/edx-platform-cypress,PepperPD/edx-pepper-platform,ahmedaljazzar/edx-platform,valtech-mooc/edx-platform,fly19890211/edx-platform,Unow/edx-platform,edry/edx-platform,shubhdev/edxOnBaadal,beni55/edx-platform,bigdatauniversity/edx-platform,mushtaqak/edx-platform,mushtaqak/edx-platform,louyihua/edx-platform,Kalyzee/edx-platform,jonathan-beard/edx-platform,halvertoluke/edx-platform,Edraak/edx-platform,cselis86/edx-platform,Lektorium-LLC/edx-platform,sameetb-cuelogic/edx-platform-test,appliedx/edx-platform,Edraak/edx-platform,ampax/edx-platform-backup,motion2015/a3,beacloudgenius/edx-platform,romain-li/edx-platform,xuxiao19910803/edx-platform,inares/edx-platform,appliedx/edx-platform,romain-li/edx-platform,pomegranited/edx-platform,zhenzhai/edx-platform,sameetb-cuelogic/edx-platform-test,openfun/edx-platform,chauhanhardik/populo_2,miptliot/edx-platform,appsembler/edx-platform,jbassen/edx-platform,ubc/edx-platform,cecep-edu/edx-platform,alu042/edx-platform,mcgachey/edx-platform,OmarIthawi/edx-platform,peterm-itr/edx-platform,Shrhawk/edx-platform,TsinghuaX/edx-platform,praveen-pal/edx-platform,jamesblunt/edx-platform,jelugbo/tundex,eduNEXT/edx-platform,antonve/s4-project-mooc,shashank971/edx-platform,vismartltd/edx-platform,carsongee/edx-platform,mitocw/edx-platform,ZLLab-Mooc/edx-platform,cselis86/edx-platform,olexiim/edx-platform,PepperPD/edx-pepper-platform,openfun/edx-platform,dcosentino/edx-platform,jonathan-beard/edx-platform,miptliot/edx-platform,alu042/edx-platform,utecuy/edx-platform,stvstnfrd/edx-platform,ampax/edx-platform,mahendra-r/edx-platform,bitifirefly/edx-platform,devs1991/test_edx_docmode,marcore/edx-platform,rhndg/openedx,nagyistoce/edx-platform,jazkarta/edx-platform,EDUlib/edx-platform,eduNEXT/edunext-platform,mbareta/edx-platform-ft,vasyarv/edx-platform,EduPepperPD/pepper2013,jazztpt/edx-platform,pdehaye/theming-edx-platform,SivilTaram/edx-platform,CourseTalk/edx-platform,zerobatu/edx-platform,mjirayu/sit_academy,B-MOOC/edx-platform,eestay/edx-platform,procangroup/edx-platform,etzhou/edx-platform,benpatterson/edx-platform,jswope00/GAI,olexiim/edx-platform,solashirai/edx-platform,shashank971/edx-platform,RPI-OPENEDX/edx-platform,gsehub/edx-platform,teltek/edx-platform,torchingloom/edx-platform,ovnicraft/edx-platform,mcgachey/edx-platform,DNFcode/edx-platform,eduNEXT/edx-platform,hastexo/edx-platform,deepsrijit1105/edx-platform,J861449197/edx-platform,playm2mboy/edx-platform,hastexo/edx-platform,jswope00/griffinx,mahendra-r/edx-platform,shurihell/testasia,cognitiveclass/edx-platform,cyanna/edx-platform,4eek/edx-platform,vismartltd/edx-platform,kursitet/edx-platform,motion2015/edx-platform,chand3040/cloud_that,franosincic/edx-platform,EduPepperPDTesting/pepper2013-testing,rismalrv/edx-platform,kamalx/edx-platform,ampax/edx-platform,etzhou/edx-platform,Edraak/edx-platform,synergeticsedx/deployment-wipro,solashirai/edx-platform,itsjeyd/edx-platform,chrisndodge/edx-platform,chauhanhardik/populo_2,ahmadiga/min_edx,teltek/edx-platform,arifsetiawan/edx-platform,waheedahmed/edx-platform,msegado/edx-platform,openfun/edx-platform,arbrandes/edx-platform,kmoocdev2/edx-platform,motion2015/a3,sameetb-cuelogic/edx-platform-test,shashank971/edx-platform,nttks/edx-platform,knehez/edx-platform,kmoocdev/edx-platform,longmen21/edx-platform,inares/edx-platform,cselis86/edx-platform,10clouds/edx-platform,eduNEXT/edunext-platform,nttks/jenkins-test,nanolearningllc/edx-platform-cypress,eemirtekin/edx-platform,xingyepei/edx-platform,nanolearningllc/edx-platform-cypress-2,zubair-arbi/edx-platform,deepsrijit1105/edx-platform,analyseuc3m/ANALYSE-v1,cpennington/edx-platform,polimediaupv/edx-platform,morenopc/edx-platform,jbassen/edx-platform,pepeportela/edx-platform,apigee/edx-platform,chauhanhardik/populo_2,antoviaque/edx-platform,chand3040/cloud_that,ovnicraft/edx-platform,cpennington/edx-platform,OmarIthawi/edx-platform,gymnasium/edx-platform,chauhanhardik/populo,playm2mboy/edx-platform,appliedx/edx-platform,edry/edx-platform,DefyVentures/edx-platform,jazkarta/edx-platform-for-isc,OmarIthawi/edx-platform,dsajkl/reqiop,appliedx/edx-platform,philanthropy-u/edx-platform,jbzdak/edx-platform,jjmiranda/edx-platform,jazztpt/edx-platform,ak2703/edx-platform,tiagochiavericosta/edx-platform,mushtaqak/edx-platform,a-parhom/edx-platform,DefyVentures/edx-platform,atsolakid/edx-platform,iivic/BoiseStateX,jswope00/griffinx,kursitet/edx-platform,zubair-arbi/edx-platform,hkawasaki/kawasaki-aio8-0,devs1991/test_edx_docmode,mahendra-r/edx-platform,gsehub/edx-platform,ubc/edx-platform,pomegranited/edx-platform,Softmotions/edx-platform,rismalrv/edx-platform,chauhanhardik/populo,eemirtekin/edx-platform,jswope00/griffinx,mjirayu/sit_academy,devs1991/test_edx_docmode,SravanthiSinha/edx-platform,auferack08/edx-platform,rismalrv/edx-platform,edx/edx-platform,nanolearning/edx-platform,jazkarta/edx-platform-for-isc,gsehub/edx-platform,xingyepei/edx-platform,nanolearning/edx-platform,angelapper/edx-platform,nagyistoce/edx-platform,jazkarta/edx-platform,antonve/s4-project-mooc,LearnEra/LearnEraPlaftform,Livit/Livit.Learn.EdX,sudheerchintala/LearnEraPlatForm,10clouds/edx-platform,UXE/local-edx,tiagochiavericosta/edx-platform,4eek/edx-platform,hmcmooc/muddx-platform,hamzehd/edx-platform,dkarakats/edx-platform,ampax/edx-platform-backup,defance/edx-platform,knehez/edx-platform,wwj718/ANALYSE,Stanford-Online/edx-platform,ahmadiga/min_edx,UXE/local-edx,morenopc/edx-platform,gymnasium/edx-platform,vismartltd/edx-platform,ESOedX/edx-platform,ampax/edx-platform-backup,beni55/edx-platform,edx-solutions/edx-platform,mtlchun/edx,leansoft/edx-platform,Semi-global/edx-platform,don-github/edx-platform,vikas1885/test1,mjirayu/sit_academy,jazkarta/edx-platform,Kalyzee/edx-platform,vasyarv/edx-platform,devs1991/test_edx_docmode,TeachAtTUM/edx-platform,nanolearningllc/edx-platform-cypress-2,pabloborrego93/edx-platform,AkA84/edx-platform,nanolearningllc/edx-platform-cypress-2,xuxiao19910803/edx,Edraak/circleci-edx-platform,bigdatauniversity/edx-platform,PepperPD/edx-pepper-platform,WatanabeYasumasa/edx-platform,romain-li/edx-platform,inares/edx-platform,chudaol/edx-platform,mjg2203/edx-platform-seas,yokose-ks/edx-platform,Semi-global/edx-platform,a-parhom/edx-platform,dcosentino/edx-platform,Kalyzee/edx-platform,ahmadiga/min_edx,SivilTaram/edx-platform,beacloudgenius/edx-platform,torchingloom/edx-platform,mtlchun/edx,praveen-pal/edx-platform,jamesblunt/edx-platform,wwj718/edx-platform,shubhdev/edx-platform,appliedx/edx-platform,adoosii/edx-platform,kamalx/edx-platform,ovnicraft/edx-platform,jazztpt/edx-platform,nagyistoce/edx-platform,doismellburning/edx-platform,chauhanhardik/populo_2,J861449197/edx-platform,teltek/edx-platform,unicri/edx-platform,don-github/edx-platform,syjeon/new_edx,bdero/edx-platform,atsolakid/edx-platform,LICEF/edx-platform,jbzdak/edx-platform,EduPepperPDTesting/pepper2013-testing,franosincic/edx-platform,IONISx/edx-platform,JioEducation/edx-platform,doismellburning/edx-platform,JCBarahona/edX,nikolas/edx-platform,stvstnfrd/edx-platform,synergeticsedx/deployment-wipro,bigdatauniversity/edx-platform,shubhdev/openedx,teltek/edx-platform,pku9104038/edx-platform,apigee/edx-platform,DefyVentures/edx-platform,vikas1885/test1,fintech-circle/edx-platform,kmoocdev/edx-platform,edry/edx-platform,vasyarv/edx-platform,TeachAtTUM/edx-platform,longmen21/edx-platform,mahendra-r/edx-platform,jswope00/griffinx,jzoldak/edx-platform,ferabra/edx-platform,rhndg/openedx,ferabra/edx-platform,dkarakats/edx-platform,raccoongang/edx-platform,beacloudgenius/edx-platform,don-github/edx-platform,doganov/edx-platform,torchingloom/edx-platform,ak2703/edx-platform,louyihua/edx-platform,MakeHer/edx-platform,leansoft/edx-platform,lduarte1991/edx-platform,motion2015/edx-platform,tanmaykm/edx-platform,dsajkl/123,jelugbo/tundex,MakeHer/edx-platform,y12uc231/edx-platform,doismellburning/edx-platform,a-parhom/edx-platform,kxliugang/edx-platform,PepperPD/edx-pepper-platform,Ayub-Khan/edx-platform,zofuthan/edx-platform,fintech-circle/edx-platform,torchingloom/edx-platform,IndonesiaX/edx-platform,abdoosh00/edraak,cecep-edu/edx-platform,xuxiao19910803/edx-platform,mahendra-r/edx-platform,Lektorium-LLC/edx-platform,hkawasaki/kawasaki-aio8-2,Softmotions/edx-platform,shubhdev/edx-platform,chrisndodge/edx-platform,cyanna/edx-platform,zerobatu/edx-platform,etzhou/edx-platform,dkarakats/edx-platform,bitifirefly/edx-platform,praveen-pal/edx-platform,pabloborrego93/edx-platform,bitifirefly/edx-platform,shabab12/edx-platform,longmen21/edx-platform,adoosii/edx-platform,morpheby/levelup-by,LICEF/edx-platform,jazkarta/edx-platform,pdehaye/theming-edx-platform,B-MOOC/edx-platform,kxliugang/edx-platform,IndonesiaX/edx-platform,dsajkl/123,syjeon/new_edx,chauhanhardik/populo_2,motion2015/a3,analyseuc3m/ANALYSE-v1,kxliugang/edx-platform,beacloudgenius/edx-platform,jamiefolsom/edx-platform,MakeHer/edx-platform,abdoosh00/edraak,rhndg/openedx,vikas1885/test1,nttks/edx-platform,msegado/edx-platform,Ayub-Khan/edx-platform,jolyonb/edx-platform,Unow/edx-platform,naresh21/synergetics-edx-platform,IONISx/edx-platform,shubhdev/edxOnBaadal,ubc/edx-platform,appsembler/edx-platform,xinjiguaike/edx-platform,valtech-mooc/edx-platform,IndonesiaX/edx-platform,peterm-itr/edx-platform,shubhdev/edx-platform,hkawasaki/kawasaki-aio8-2,morpheby/levelup-by,appsembler/edx-platform,unicri/edx-platform,eestay/edx-platform,EduPepperPDTesting/pepper2013-testing,jolyonb/edx-platform,hastexo/edx-platform,franosincic/edx-platform,leansoft/edx-platform,caesar2164/edx-platform,caesar2164/edx-platform,a-parhom/edx-platform,bigdatauniversity/edx-platform,iivic/BoiseStateX,jolyonb/edx-platform,morpheby/levelup-by,motion2015/edx-platform,MakeHer/edx-platform,cpennington/edx-platform,nttks/jenkins-test,kursitet/edx-platform,EduPepperPDTesting/pepper2013-testing,angelapper/edx-platform,morenopc/edx-platform,sudheerchintala/LearnEraPlatForm,IONISx/edx-platform,waheedahmed/edx-platform,nttks/jenkins-test,nttks/edx-platform,EduPepperPDTesting/pepper2013-testing,chand3040/cloud_that,xingyepei/edx-platform,stvstnfrd/edx-platform,antonve/s4-project-mooc,J861449197/edx-platform,ahmadio/edx-platform,olexiim/edx-platform,eduNEXT/edx-platform,cselis86/edx-platform,JioEducation/edx-platform,ovnicraft/edx-platform,benpatterson/edx-platform,Shrhawk/edx-platform,fintech-circle/edx-platform,xinjiguaike/edx-platform,edry/edx-platform,Edraak/edraak-platform,rismalrv/edx-platform,jjmiranda/edx-platform,Livit/Livit.Learn.EdX,prarthitm/edxplatform,wwj718/edx-platform,msegado/edx-platform,LICEF/edx-platform,shubhdev/edxOnBaadal,playm2mboy/edx-platform,philanthropy-u/edx-platform,arifsetiawan/edx-platform,10clouds/edx-platform,zadgroup/edx-platform,IONISx/edx-platform,jruiperezv/ANALYSE,dcosentino/edx-platform,louyihua/edx-platform,itsjeyd/edx-platform,chrisndodge/edx-platform,yokose-ks/edx-platform,philanthropy-u/edx-platform,nanolearningllc/edx-platform-cypress,playm2mboy/edx-platform,mtlchun/edx,utecuy/edx-platform,Stanford-Online/edx-platform,Softmotions/edx-platform,chudaol/edx-platform,peterm-itr/edx-platform,jswope00/GAI,Endika/edx-platform,miptliot/edx-platform,naresh21/synergetics-edx-platform,mjg2203/edx-platform-seas,TsinghuaX/edx-platform,zhenzhai/edx-platform,shabab12/edx-platform,y12uc231/edx-platform,xuxiao19910803/edx-platform,simbs/edx-platform,pdehaye/theming-edx-platform,hamzehd/edx-platform,tiagochiavericosta/edx-platform,don-github/edx-platform,hkawasaki/kawasaki-aio8-2,chudaol/edx-platform,eemirtekin/edx-platform,halvertoluke/edx-platform,knehez/edx-platform,antoviaque/edx-platform,jbassen/edx-platform,beni55/edx-platform,rismalrv/edx-platform,cyanna/edx-platform,4eek/edx-platform,kmoocdev2/edx-platform,martynovp/edx-platform,Unow/edx-platform,eduNEXT/edx-platform,shashank971/edx-platform,alexthered/kienhoc-platform,auferack08/edx-platform,chauhanhardik/populo,iivic/BoiseStateX,LearnEra/LearnEraPlaftform,ubc/edx-platform,iivic/BoiseStateX,marcore/edx-platform,cognitiveclass/edx-platform,zadgroup/edx-platform,dkarakats/edx-platform,philanthropy-u/edx-platform,edx-solutions/edx-platform,ampax/edx-platform-backup,zerobatu/edx-platform,Livit/Livit.Learn.EdX,chrisndodge/edx-platform,raccoongang/edx-platform,nagyistoce/edx-platform,doganov/edx-platform,pomegranited/edx-platform,jruiperezv/ANALYSE,pelikanchik/edx-platform,Edraak/edraak-platform,fly19890211/edx-platform,eemirtekin/edx-platform,synergeticsedx/deployment-wipro,shurihell/testasia,carsongee/edx-platform,dsajkl/reqiop,ubc/edx-platform,edx/edx-platform,nanolearning/edx-platform,pabloborrego93/edx-platform,jonathan-beard/edx-platform,nanolearning/edx-platform,doganov/edx-platform,knehez/edx-platform,ferabra/edx-platform,chudaol/edx-platform
|
Add mgmt cmd to generate anonymized ID mapping
So that instructors have easy access to the mapping from anonymized
IDs (a simple MD5 hash of the user ID that's used when integrating
with third-party services like Qualtrics) to user IDs, we have a
simple Django management command to generate a CSV mapping. To run,
use the following:
rake django-admin[anonymized_id_mapping,<system>,<env>,<course_id>]
And, of course, substitute the appropriate values for <system>, <env>,
and <course_id>. (E.g., lms, dev, and MITx/6.002x/Circuits)
|
import csv
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import unique_id_for_user
class Command(BaseCommand):
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """
Exports a CSV document mapping from a username to the anonymized,
unique user ID for every user in the specified course.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_id = args[0]
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_id.replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_id)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_id)
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(("User ID", "Anonymized user ID"))
for student in students:
csv_writer.writerow((student.id, unique_id_for_user(student)))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
|
<commit_before><commit_msg>Add mgmt cmd to generate anonymized ID mapping
So that instructors have easy access to the mapping from anonymized
IDs (a simple MD5 hash of the user ID that's used when integrating
with third-party services like Qualtrics) to user IDs, we have a
simple Django management command to generate a CSV mapping. To run,
use the following:
rake django-admin[anonymized_id_mapping,<system>,<env>,<course_id>]
And, of course, substitute the appropriate values for <system>, <env>,
and <course_id>. (E.g., lms, dev, and MITx/6.002x/Circuits)<commit_after>
|
import csv
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import unique_id_for_user
class Command(BaseCommand):
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """
Exports a CSV document mapping from a username to the anonymized,
unique user ID for every user in the specified course.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_id = args[0]
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_id.replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_id)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_id)
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(("User ID", "Anonymized user ID"))
for student in students:
csv_writer.writerow((student.id, unique_id_for_user(student)))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
|
Add mgmt cmd to generate anonymized ID mapping
So that instructors have easy access to the mapping from anonymized
IDs (a simple MD5 hash of the user ID that's used when integrating
with third-party services like Qualtrics) to user IDs, we have a
simple Django management command to generate a CSV mapping. To run,
use the following:
rake django-admin[anonymized_id_mapping,<system>,<env>,<course_id>]
And, of course, substitute the appropriate values for <system>, <env>,
and <course_id>. (E.g., lms, dev, and MITx/6.002x/Circuits)import csv
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import unique_id_for_user
class Command(BaseCommand):
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """
Exports a CSV document mapping from a username to the anonymized,
unique user ID for every user in the specified course.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_id = args[0]
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_id.replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_id)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_id)
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(("User ID", "Anonymized user ID"))
for student in students:
csv_writer.writerow((student.id, unique_id_for_user(student)))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
|
<commit_before><commit_msg>Add mgmt cmd to generate anonymized ID mapping
So that instructors have easy access to the mapping from anonymized
IDs (a simple MD5 hash of the user ID that's used when integrating
with third-party services like Qualtrics) to user IDs, we have a
simple Django management command to generate a CSV mapping. To run,
use the following:
rake django-admin[anonymized_id_mapping,<system>,<env>,<course_id>]
And, of course, substitute the appropriate values for <system>, <env>,
and <course_id>. (E.g., lms, dev, and MITx/6.002x/Circuits)<commit_after>import csv
import sys
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import unique_id_for_user
class Command(BaseCommand):
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """
Exports a CSV document mapping from a username to the anonymized,
unique user ID for every user in the specified course.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_id = args[0]
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_id.replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_id)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_id)
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(("User ID", "Anonymized user ID"))
for student in students:
csv_writer.writerow((student.id, unique_id_for_user(student)))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
|
|
8d193a6eb59028eb2c21203bc8543d34603f86bd
|
bamova/plot_sampled_phis.py
|
bamova/plot_sampled_phis.py
|
import sys
import numpy as np
from sklearn.neighbors.kde import KernelDensity
import matplotlib
from matplotlib import pyplot
def estimate_distribution(matrix, locus, h=0.2, n_points=100):
kde = KernelDensity(bandwidth=h)
kde.fit(matrix[:, locus])
xs = np.linspace(-1.0, 1.0, n_points)
ys = np.log(kde.score(xs))
return xs, ys
def plot_phis(plot_flname, subset):
plt.hold(True)
for loci in xrange(subset.shape[1]):
xs, ys = estimate_distribution(subset, loci)
plt.plot(xs, ys)
plt.xlim([-1.0, 1.0])
plt.xlabel("\phi", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.savefig(plot_flname, DPI=200)
if __name__ == "__main__":
npy_flname = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
plot_flname = sys.argv[4]
phi_values = np.load(npy_flname)
subset = phi_values[:, start:end]
|
Add bamova phi plotting script
|
Add bamova phi plotting script
|
Python
|
apache-2.0
|
rnowling/pop-gen-models
|
Add bamova phi plotting script
|
import sys
import numpy as np
from sklearn.neighbors.kde import KernelDensity
import matplotlib
from matplotlib import pyplot
def estimate_distribution(matrix, locus, h=0.2, n_points=100):
kde = KernelDensity(bandwidth=h)
kde.fit(matrix[:, locus])
xs = np.linspace(-1.0, 1.0, n_points)
ys = np.log(kde.score(xs))
return xs, ys
def plot_phis(plot_flname, subset):
plt.hold(True)
for loci in xrange(subset.shape[1]):
xs, ys = estimate_distribution(subset, loci)
plt.plot(xs, ys)
plt.xlim([-1.0, 1.0])
plt.xlabel("\phi", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.savefig(plot_flname, DPI=200)
if __name__ == "__main__":
npy_flname = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
plot_flname = sys.argv[4]
phi_values = np.load(npy_flname)
subset = phi_values[:, start:end]
|
<commit_before><commit_msg>Add bamova phi plotting script<commit_after>
|
import sys
import numpy as np
from sklearn.neighbors.kde import KernelDensity
import matplotlib
from matplotlib import pyplot
def estimate_distribution(matrix, locus, h=0.2, n_points=100):
kde = KernelDensity(bandwidth=h)
kde.fit(matrix[:, locus])
xs = np.linspace(-1.0, 1.0, n_points)
ys = np.log(kde.score(xs))
return xs, ys
def plot_phis(plot_flname, subset):
plt.hold(True)
for loci in xrange(subset.shape[1]):
xs, ys = estimate_distribution(subset, loci)
plt.plot(xs, ys)
plt.xlim([-1.0, 1.0])
plt.xlabel("\phi", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.savefig(plot_flname, DPI=200)
if __name__ == "__main__":
npy_flname = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
plot_flname = sys.argv[4]
phi_values = np.load(npy_flname)
subset = phi_values[:, start:end]
|
Add bamova phi plotting scriptimport sys
import numpy as np
from sklearn.neighbors.kde import KernelDensity
import matplotlib
from matplotlib import pyplot
def estimate_distribution(matrix, locus, h=0.2, n_points=100):
kde = KernelDensity(bandwidth=h)
kde.fit(matrix[:, locus])
xs = np.linspace(-1.0, 1.0, n_points)
ys = np.log(kde.score(xs))
return xs, ys
def plot_phis(plot_flname, subset):
plt.hold(True)
for loci in xrange(subset.shape[1]):
xs, ys = estimate_distribution(subset, loci)
plt.plot(xs, ys)
plt.xlim([-1.0, 1.0])
plt.xlabel("\phi", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.savefig(plot_flname, DPI=200)
if __name__ == "__main__":
npy_flname = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
plot_flname = sys.argv[4]
phi_values = np.load(npy_flname)
subset = phi_values[:, start:end]
|
<commit_before><commit_msg>Add bamova phi plotting script<commit_after>import sys
import numpy as np
from sklearn.neighbors.kde import KernelDensity
import matplotlib
from matplotlib import pyplot
def estimate_distribution(matrix, locus, h=0.2, n_points=100):
kde = KernelDensity(bandwidth=h)
kde.fit(matrix[:, locus])
xs = np.linspace(-1.0, 1.0, n_points)
ys = np.log(kde.score(xs))
return xs, ys
def plot_phis(plot_flname, subset):
plt.hold(True)
for loci in xrange(subset.shape[1]):
xs, ys = estimate_distribution(subset, loci)
plt.plot(xs, ys)
plt.xlim([-1.0, 1.0])
plt.xlabel("\phi", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.savefig(plot_flname, DPI=200)
if __name__ == "__main__":
npy_flname = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
plot_flname = sys.argv[4]
phi_values = np.load(npy_flname)
subset = phi_values[:, start:end]
|
|
6be818822d7bd6ecf87837f82187c8e84442d6ba
|
cnxarchive/sql/migrations/20160423022147_add_python_sql_debug_functions.py
|
cnxarchive/sql/migrations/20160423022147_add_python_sql_debug_functions.py
|
# -*- coding: utf-8 -*-
"""\
- Add SQL function `pypath`
- Add SQL function `pyimport`
"""
def up(cursor):
# Add SQL function `pypath`
cursor.execute("""\
-- Returns the Python `sys.path`
-- Example usage, `SELECT unnest(pypath())`
CREATE OR REPLACE FUNCTION pypath()
RETURNS TEXT[]
AS $$
import sys
return sys.path
$$ LANGUAGE plpythonu;
""")
# Add SQL function `pyimport`
cursor.execute("""\
-- Returns module location for the given module
-- Example usage, `SELECT * FROM pyimport('cnxarchive.database');`
CREATE TYPE pyimport_value AS (
import TEXT,
directory TEXT,
file_path TEXT
);
CREATE OR REPLACE FUNCTION pyimport(pymodule TEXT)
RETURNS SETOF pyimport_value
AS $$
import os
import importlib
try:
module = importlib.import_module(pymodule)
except ImportError:
return [] #{'import': None, 'directory': None, 'file_path': None}
file_path = os.path.abspath(module.__file__)
directory = os.path.dirname(file_path)
info = {
'import': pymodule,
'directory': directory,
'file_path': file_path,
}
return [info]
$$ LANGUAGE plpythonu;
""")
def down(cursor):
# Remove SQL function `pypath`
cursor.execute("DROP FUNCTION IF EXISTS pypath();")
# Remove SQL function `pyimport`
cursor.execute("DROP TYPE IF EXISTS pyimport_value CASCADE;")
|
Add migration for python debug SQL functions
|
:sparkles: Add migration for python debug SQL functions
|
Python
|
agpl-3.0
|
Connexions/cnx-archive,Connexions/cnx-archive
|
:sparkles: Add migration for python debug SQL functions
|
# -*- coding: utf-8 -*-
"""\
- Add SQL function `pypath`
- Add SQL function `pyimport`
"""
def up(cursor):
# Add SQL function `pypath`
cursor.execute("""\
-- Returns the Python `sys.path`
-- Example usage, `SELECT unnest(pypath())`
CREATE OR REPLACE FUNCTION pypath()
RETURNS TEXT[]
AS $$
import sys
return sys.path
$$ LANGUAGE plpythonu;
""")
# Add SQL function `pyimport`
cursor.execute("""\
-- Returns module location for the given module
-- Example usage, `SELECT * FROM pyimport('cnxarchive.database');`
CREATE TYPE pyimport_value AS (
import TEXT,
directory TEXT,
file_path TEXT
);
CREATE OR REPLACE FUNCTION pyimport(pymodule TEXT)
RETURNS SETOF pyimport_value
AS $$
import os
import importlib
try:
module = importlib.import_module(pymodule)
except ImportError:
return [] #{'import': None, 'directory': None, 'file_path': None}
file_path = os.path.abspath(module.__file__)
directory = os.path.dirname(file_path)
info = {
'import': pymodule,
'directory': directory,
'file_path': file_path,
}
return [info]
$$ LANGUAGE plpythonu;
""")
def down(cursor):
# Remove SQL function `pypath`
cursor.execute("DROP FUNCTION IF EXISTS pypath();")
# Remove SQL function `pyimport`
cursor.execute("DROP TYPE IF EXISTS pyimport_value CASCADE;")
|
<commit_before><commit_msg>:sparkles: Add migration for python debug SQL functions<commit_after>
|
# -*- coding: utf-8 -*-
"""\
- Add SQL function `pypath`
- Add SQL function `pyimport`
"""
def up(cursor):
# Add SQL function `pypath`
cursor.execute("""\
-- Returns the Python `sys.path`
-- Example usage, `SELECT unnest(pypath())`
CREATE OR REPLACE FUNCTION pypath()
RETURNS TEXT[]
AS $$
import sys
return sys.path
$$ LANGUAGE plpythonu;
""")
# Add SQL function `pyimport`
cursor.execute("""\
-- Returns module location for the given module
-- Example usage, `SELECT * FROM pyimport('cnxarchive.database');`
CREATE TYPE pyimport_value AS (
import TEXT,
directory TEXT,
file_path TEXT
);
CREATE OR REPLACE FUNCTION pyimport(pymodule TEXT)
RETURNS SETOF pyimport_value
AS $$
import os
import importlib
try:
module = importlib.import_module(pymodule)
except ImportError:
return [] #{'import': None, 'directory': None, 'file_path': None}
file_path = os.path.abspath(module.__file__)
directory = os.path.dirname(file_path)
info = {
'import': pymodule,
'directory': directory,
'file_path': file_path,
}
return [info]
$$ LANGUAGE plpythonu;
""")
def down(cursor):
# Remove SQL function `pypath`
cursor.execute("DROP FUNCTION IF EXISTS pypath();")
# Remove SQL function `pyimport`
cursor.execute("DROP TYPE IF EXISTS pyimport_value CASCADE;")
|
:sparkles: Add migration for python debug SQL functions# -*- coding: utf-8 -*-
"""\
- Add SQL function `pypath`
- Add SQL function `pyimport`
"""
def up(cursor):
# Add SQL function `pypath`
cursor.execute("""\
-- Returns the Python `sys.path`
-- Example usage, `SELECT unnest(pypath())`
CREATE OR REPLACE FUNCTION pypath()
RETURNS TEXT[]
AS $$
import sys
return sys.path
$$ LANGUAGE plpythonu;
""")
# Add SQL function `pyimport`
cursor.execute("""\
-- Returns module location for the given module
-- Example usage, `SELECT * FROM pyimport('cnxarchive.database');`
CREATE TYPE pyimport_value AS (
import TEXT,
directory TEXT,
file_path TEXT
);
CREATE OR REPLACE FUNCTION pyimport(pymodule TEXT)
RETURNS SETOF pyimport_value
AS $$
import os
import importlib
try:
module = importlib.import_module(pymodule)
except ImportError:
return [] #{'import': None, 'directory': None, 'file_path': None}
file_path = os.path.abspath(module.__file__)
directory = os.path.dirname(file_path)
info = {
'import': pymodule,
'directory': directory,
'file_path': file_path,
}
return [info]
$$ LANGUAGE plpythonu;
""")
def down(cursor):
# Remove SQL function `pypath`
cursor.execute("DROP FUNCTION IF EXISTS pypath();")
# Remove SQL function `pyimport`
cursor.execute("DROP TYPE IF EXISTS pyimport_value CASCADE;")
|
<commit_before><commit_msg>:sparkles: Add migration for python debug SQL functions<commit_after># -*- coding: utf-8 -*-
"""\
- Add SQL function `pypath`
- Add SQL function `pyimport`
"""
def up(cursor):
# Add SQL function `pypath`
cursor.execute("""\
-- Returns the Python `sys.path`
-- Example usage, `SELECT unnest(pypath())`
CREATE OR REPLACE FUNCTION pypath()
RETURNS TEXT[]
AS $$
import sys
return sys.path
$$ LANGUAGE plpythonu;
""")
# Add SQL function `pyimport`
cursor.execute("""\
-- Returns module location for the given module
-- Example usage, `SELECT * FROM pyimport('cnxarchive.database');`
CREATE TYPE pyimport_value AS (
import TEXT,
directory TEXT,
file_path TEXT
);
CREATE OR REPLACE FUNCTION pyimport(pymodule TEXT)
RETURNS SETOF pyimport_value
AS $$
import os
import importlib
try:
module = importlib.import_module(pymodule)
except ImportError:
return [] #{'import': None, 'directory': None, 'file_path': None}
file_path = os.path.abspath(module.__file__)
directory = os.path.dirname(file_path)
info = {
'import': pymodule,
'directory': directory,
'file_path': file_path,
}
return [info]
$$ LANGUAGE plpythonu;
""")
def down(cursor):
# Remove SQL function `pypath`
cursor.execute("DROP FUNCTION IF EXISTS pypath();")
# Remove SQL function `pyimport`
cursor.execute("DROP TYPE IF EXISTS pyimport_value CASCADE;")
|
|
a81826e4667d4ec195a5524ae05f5b7f972e8ce0
|
parse_results.py
|
parse_results.py
|
#!/usr/bin/python
# coding: utf-8
import fileinput
import sys
buffer_size = 0
threshold = 0
elapsed_time = 0.0
for line in fileinput.input():
l = line.split()
if l:
if l[0] == 'IMPORTANT:':
if l[1] == 'Maximum':
pass
elif l[1] == 'Buffer':
buffer_size = int(l[3][:-1])
threshold = int(l[5])
elif l[0] == 'Elapsed':
elapsed_time = float(l[2])
print '{}, {}, {}'.format(buffer_size, threshold, elapsed_time)
sys.stdout.flush()
|
Add script to parse the results
|
Add script to parse the results
|
Python
|
bsd-2-clause
|
mrjimenez/arduino_serial
|
Add script to parse the results
|
#!/usr/bin/python
# coding: utf-8
import fileinput
import sys
buffer_size = 0
threshold = 0
elapsed_time = 0.0
for line in fileinput.input():
l = line.split()
if l:
if l[0] == 'IMPORTANT:':
if l[1] == 'Maximum':
pass
elif l[1] == 'Buffer':
buffer_size = int(l[3][:-1])
threshold = int(l[5])
elif l[0] == 'Elapsed':
elapsed_time = float(l[2])
print '{}, {}, {}'.format(buffer_size, threshold, elapsed_time)
sys.stdout.flush()
|
<commit_before><commit_msg>Add script to parse the results<commit_after>
|
#!/usr/bin/python
# coding: utf-8
import fileinput
import sys
buffer_size = 0
threshold = 0
elapsed_time = 0.0
for line in fileinput.input():
l = line.split()
if l:
if l[0] == 'IMPORTANT:':
if l[1] == 'Maximum':
pass
elif l[1] == 'Buffer':
buffer_size = int(l[3][:-1])
threshold = int(l[5])
elif l[0] == 'Elapsed':
elapsed_time = float(l[2])
print '{}, {}, {}'.format(buffer_size, threshold, elapsed_time)
sys.stdout.flush()
|
Add script to parse the results#!/usr/bin/python
# coding: utf-8
import fileinput
import sys
buffer_size = 0
threshold = 0
elapsed_time = 0.0
for line in fileinput.input():
l = line.split()
if l:
if l[0] == 'IMPORTANT:':
if l[1] == 'Maximum':
pass
elif l[1] == 'Buffer':
buffer_size = int(l[3][:-1])
threshold = int(l[5])
elif l[0] == 'Elapsed':
elapsed_time = float(l[2])
print '{}, {}, {}'.format(buffer_size, threshold, elapsed_time)
sys.stdout.flush()
|
<commit_before><commit_msg>Add script to parse the results<commit_after>#!/usr/bin/python
# coding: utf-8
import fileinput
import sys
buffer_size = 0
threshold = 0
elapsed_time = 0.0
for line in fileinput.input():
l = line.split()
if l:
if l[0] == 'IMPORTANT:':
if l[1] == 'Maximum':
pass
elif l[1] == 'Buffer':
buffer_size = int(l[3][:-1])
threshold = int(l[5])
elif l[0] == 'Elapsed':
elapsed_time = float(l[2])
print '{}, {}, {}'.format(buffer_size, threshold, elapsed_time)
sys.stdout.flush()
|
|
f72f48a0bc7ab92439e3af04538b8b46e6673d62
|
robot/robot/src/component/drive.py
|
robot/robot/src/component/drive.py
|
'''
Driving component
'''
class Drive(object):
def __init__(self, robotDrive):
self.robotDrive = robotDrive
#
# Verb functions
#
def move(self, x, y, z):
self.x = x
self.y = y
self.z = z
#
# Actually does stuff
#
def doit(self):
self.robotDrive.MecanumDrive_Cartesian(self.x, self.y, self.z)
# reset things to defaults
self.x = 0
self.y = 0
self.z = 0
|
Add driving component as an example
|
Add driving component as an example
|
Python
|
bsd-3-clause
|
frc1418/2014
|
Add driving component as an example
|
'''
Driving component
'''
class Drive(object):
def __init__(self, robotDrive):
self.robotDrive = robotDrive
#
# Verb functions
#
def move(self, x, y, z):
self.x = x
self.y = y
self.z = z
#
# Actually does stuff
#
def doit(self):
self.robotDrive.MecanumDrive_Cartesian(self.x, self.y, self.z)
# reset things to defaults
self.x = 0
self.y = 0
self.z = 0
|
<commit_before><commit_msg>Add driving component as an example<commit_after>
|
'''
Driving component
'''
class Drive(object):
def __init__(self, robotDrive):
self.robotDrive = robotDrive
#
# Verb functions
#
def move(self, x, y, z):
self.x = x
self.y = y
self.z = z
#
# Actually does stuff
#
def doit(self):
self.robotDrive.MecanumDrive_Cartesian(self.x, self.y, self.z)
# reset things to defaults
self.x = 0
self.y = 0
self.z = 0
|
Add driving component as an example'''
Driving component
'''
class Drive(object):
def __init__(self, robotDrive):
self.robotDrive = robotDrive
#
# Verb functions
#
def move(self, x, y, z):
self.x = x
self.y = y
self.z = z
#
# Actually does stuff
#
def doit(self):
self.robotDrive.MecanumDrive_Cartesian(self.x, self.y, self.z)
# reset things to defaults
self.x = 0
self.y = 0
self.z = 0
|
<commit_before><commit_msg>Add driving component as an example<commit_after>'''
Driving component
'''
class Drive(object):
def __init__(self, robotDrive):
self.robotDrive = robotDrive
#
# Verb functions
#
def move(self, x, y, z):
self.x = x
self.y = y
self.z = z
#
# Actually does stuff
#
def doit(self):
self.robotDrive.MecanumDrive_Cartesian(self.x, self.y, self.z)
# reset things to defaults
self.x = 0
self.y = 0
self.z = 0
|
|
08f6fb805938b89ede5cb69eed9929d32104278b
|
content/test/gpu/gpu_tests/webgl_robustness.py
|
content/test/gpu/gpu_tests/webgl_robustness.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
|
Add WebGL robustness telemetry test
|
gpu: Add WebGL robustness telemetry test
This patch adds a telemetry-based WebGL robustness test. It executes an
optional test from the WebGL conformance test suite to trigger a GPU
context loss and verifies this doesn't crash the browser.
To run the test for example on Android, execute:
$ content/test/gpu/run_gpu_test --browser=android-chrome webgl_robustness
BUG=232449
Review URL: https://chromiumcodereview.appspot.com/21150015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@215929 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
anirudhSK/chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,anirudhSK/chromium,ltilve/chromium,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,TheTypoMaster/chromium-crosswalk,ltilve/chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,patrickm/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,littlstar/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,ondra-novak/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,jaruba/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,littlstar/chromium.src,dushu1203/chromium.src,mogoweb/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,patrickm/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,Just-D/chromium-1,Jonekee/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,Chilledheart/chromium,Chilledheart/chromium,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,Just-D/chromium-1,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,anirudhSK/chromium,M4sse/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,Just-D/chromium-1,anirudhSK/chromium,M4sse/chromium.src,ChromiumWebApps/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,Jonekee/chromium.src,mogoweb/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,Just-D/chromium-1,ondra-novak/chromium.src,M4sse/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,axinging/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,M4sse/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,krieger-od/nwjs_chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,anirudhSK/chromium,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,patrickm/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,Jonekee/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk
|
gpu: Add WebGL robustness telemetry test
This patch adds a telemetry-based WebGL robustness test. It executes an
optional test from the WebGL conformance test suite to trigger a GPU
context loss and verifies this doesn't crash the browser.
To run the test for example on Android, execute:
$ content/test/gpu/run_gpu_test --browser=android-chrome webgl_robustness
BUG=232449
Review URL: https://chromiumcodereview.appspot.com/21150015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@215929 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
|
<commit_before><commit_msg>gpu: Add WebGL robustness telemetry test
This patch adds a telemetry-based WebGL robustness test. It executes an
optional test from the WebGL conformance test suite to trigger a GPU
context loss and verifies this doesn't crash the browser.
To run the test for example on Android, execute:
$ content/test/gpu/run_gpu_test --browser=android-chrome webgl_robustness
BUG=232449
Review URL: https://chromiumcodereview.appspot.com/21150015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@215929 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
|
gpu: Add WebGL robustness telemetry test
This patch adds a telemetry-based WebGL robustness test. It executes an
optional test from the WebGL conformance test suite to trigger a GPU
context loss and verifies this doesn't crash the browser.
To run the test for example on Android, execute:
$ content/test/gpu/run_gpu_test --browser=android-chrome webgl_robustness
BUG=232449
Review URL: https://chromiumcodereview.appspot.com/21150015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@215929 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
|
<commit_before><commit_msg>gpu: Add WebGL robustness telemetry test
This patch adds a telemetry-based WebGL robustness test. It executes an
optional test from the WebGL conformance test suite to trigger a GPU
context loss and verifies this doesn't crash the browser.
To run the test for example on Android, execute:
$ content/test/gpu/run_gpu_test --browser=android-chrome webgl_robustness
BUG=232449
Review URL: https://chromiumcodereview.appspot.com/21150015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@215929 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from telemetry.page import page_set
from webgl_conformance import WebglConformanceValidator
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucceeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustness(test.Test):
enabled = False
test = WebglConformanceValidator
def CreatePageSet(self, options):
page_set_dict = {
'description': 'Test cases for WebGL robustness',
'user_agent_type': 'desktop',
'serving_dirs': [''],
'pages': [
{
'url': 'file:///extra/lots-of-polys-example.html',
'script_to_evaluate_on_commit': robustness_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, conformance_path)
|
|
fc245d78542b9468174d0abb282a35272374a626
|
project/teams/migrations/0008_auto_20150907_1914.py
|
project/teams/migrations/0008_auto_20150907_1914.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.teams.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20150907_1808'),
]
operations = [
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(help_text=b"Your team's project name!", max_length=50, validators=[django.core.validators.RegexValidator(regex=b'[a-zA-Z0-9_\\-.: ]+', message=b'Names can contain letters, numbers, dashes, periods, colons, and whitespace.'), project.teams.models.slug_validator]),
),
]
|
Add migration for modified RegexValidator
|
Add migration for modified RegexValidator
|
Python
|
mit
|
compsci-hfh/app,jonsimington/app,jonsimington/app,compsci-hfh/app
|
Add migration for modified RegexValidator
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.teams.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20150907_1808'),
]
operations = [
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(help_text=b"Your team's project name!", max_length=50, validators=[django.core.validators.RegexValidator(regex=b'[a-zA-Z0-9_\\-.: ]+', message=b'Names can contain letters, numbers, dashes, periods, colons, and whitespace.'), project.teams.models.slug_validator]),
),
]
|
<commit_before><commit_msg>Add migration for modified RegexValidator<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.teams.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20150907_1808'),
]
operations = [
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(help_text=b"Your team's project name!", max_length=50, validators=[django.core.validators.RegexValidator(regex=b'[a-zA-Z0-9_\\-.: ]+', message=b'Names can contain letters, numbers, dashes, periods, colons, and whitespace.'), project.teams.models.slug_validator]),
),
]
|
Add migration for modified RegexValidator# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.teams.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20150907_1808'),
]
operations = [
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(help_text=b"Your team's project name!", max_length=50, validators=[django.core.validators.RegexValidator(regex=b'[a-zA-Z0-9_\\-.: ]+', message=b'Names can contain letters, numbers, dashes, periods, colons, and whitespace.'), project.teams.models.slug_validator]),
),
]
|
<commit_before><commit_msg>Add migration for modified RegexValidator<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import project.teams.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20150907_1808'),
]
operations = [
migrations.AlterField(
model_name='team',
name='name',
field=models.CharField(help_text=b"Your team's project name!", max_length=50, validators=[django.core.validators.RegexValidator(regex=b'[a-zA-Z0-9_\\-.: ]+', message=b'Names can contain letters, numbers, dashes, periods, colons, and whitespace.'), project.teams.models.slug_validator]),
),
]
|
|
153d7c34d89ec4b9af525abebddad9fa83377594
|
server.py
|
server.py
|
import BaseHTTPServer
import sys
import time
import json
from irc_hooky.github.main import handler as gh_handler
HOST_NAME = sys.argv[1]
PORT_NUMBER = int(sys.argv[2])
def handle_github_hook(payload, headers):
event = {
"X-Hub-Signature": headers.get("X-Hub-Signature"),
"X-Github-Event": headers.get("X-Github-Event"),
"gh-payload": payload
}
gh_handler(event, {})
class LocalIRCHooky(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "LocalIRCHooky/0.1"
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
payload = json.loads(post_data)
if (self.path == "/github"):
handle_github_hook(payload, self.headers)
self.send_response(200)
self.end_headers()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), LocalIRCHooky)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
Add the ability to do basic testing locally
|
Add the ability to do basic testing locally
|
Python
|
mit
|
marvinpinto/irc-hooky,marvinpinto/irc-hooky
|
Add the ability to do basic testing locally
|
import BaseHTTPServer
import sys
import time
import json
from irc_hooky.github.main import handler as gh_handler
HOST_NAME = sys.argv[1]
PORT_NUMBER = int(sys.argv[2])
def handle_github_hook(payload, headers):
event = {
"X-Hub-Signature": headers.get("X-Hub-Signature"),
"X-Github-Event": headers.get("X-Github-Event"),
"gh-payload": payload
}
gh_handler(event, {})
class LocalIRCHooky(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "LocalIRCHooky/0.1"
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
payload = json.loads(post_data)
if (self.path == "/github"):
handle_github_hook(payload, self.headers)
self.send_response(200)
self.end_headers()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), LocalIRCHooky)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
<commit_before><commit_msg>Add the ability to do basic testing locally<commit_after>
|
import BaseHTTPServer
import sys
import time
import json
from irc_hooky.github.main import handler as gh_handler
HOST_NAME = sys.argv[1]
PORT_NUMBER = int(sys.argv[2])
def handle_github_hook(payload, headers):
event = {
"X-Hub-Signature": headers.get("X-Hub-Signature"),
"X-Github-Event": headers.get("X-Github-Event"),
"gh-payload": payload
}
gh_handler(event, {})
class LocalIRCHooky(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "LocalIRCHooky/0.1"
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
payload = json.loads(post_data)
if (self.path == "/github"):
handle_github_hook(payload, self.headers)
self.send_response(200)
self.end_headers()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), LocalIRCHooky)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
Add the ability to do basic testing locallyimport BaseHTTPServer
import sys
import time
import json
from irc_hooky.github.main import handler as gh_handler
HOST_NAME = sys.argv[1]
PORT_NUMBER = int(sys.argv[2])
def handle_github_hook(payload, headers):
event = {
"X-Hub-Signature": headers.get("X-Hub-Signature"),
"X-Github-Event": headers.get("X-Github-Event"),
"gh-payload": payload
}
gh_handler(event, {})
class LocalIRCHooky(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "LocalIRCHooky/0.1"
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
payload = json.loads(post_data)
if (self.path == "/github"):
handle_github_hook(payload, self.headers)
self.send_response(200)
self.end_headers()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), LocalIRCHooky)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
<commit_before><commit_msg>Add the ability to do basic testing locally<commit_after>import BaseHTTPServer
import sys
import time
import json
from irc_hooky.github.main import handler as gh_handler
HOST_NAME = sys.argv[1]
PORT_NUMBER = int(sys.argv[2])
def handle_github_hook(payload, headers):
event = {
"X-Hub-Signature": headers.get("X-Hub-Signature"),
"X-Github-Event": headers.get("X-Github-Event"),
"gh-payload": payload
}
gh_handler(event, {})
class LocalIRCHooky(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "LocalIRCHooky/0.1"
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
payload = json.loads(post_data)
if (self.path == "/github"):
handle_github_hook(payload, self.headers)
self.send_response(200)
self.end_headers()
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), LocalIRCHooky)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
|
d08b0a11176225302b01657e9d396066923ae7ce
|
polyfilter.py
|
polyfilter.py
|
#!/usr/bin/python
# Polyfilter - filters polygon files for very small polygons
import sys
f = open(sys.argv[1])
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
allAreas = []
maxArea = 0
while True:
line = f.readline().strip()
if line=="": break
points = line.split(" ")
corners = []
for p in points:
ip = map(float,p.split(","))
corners.append(ip)
area = PolygonArea(corners)
if (area > 500):
print " ".join(points)
|
Add filter program to remove small polygons
|
Add filter program to remove small polygons
|
Python
|
mit
|
jmacarthur/ld34,jmacarthur/ld34,jmacarthur/ld34
|
Add filter program to remove small polygons
|
#!/usr/bin/python
# Polyfilter - filters polygon files for very small polygons
import sys
f = open(sys.argv[1])
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
allAreas = []
maxArea = 0
while True:
line = f.readline().strip()
if line=="": break
points = line.split(" ")
corners = []
for p in points:
ip = map(float,p.split(","))
corners.append(ip)
area = PolygonArea(corners)
if (area > 500):
print " ".join(points)
|
<commit_before><commit_msg>Add filter program to remove small polygons<commit_after>
|
#!/usr/bin/python
# Polyfilter - filters polygon files for very small polygons
import sys
f = open(sys.argv[1])
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
allAreas = []
maxArea = 0
while True:
line = f.readline().strip()
if line=="": break
points = line.split(" ")
corners = []
for p in points:
ip = map(float,p.split(","))
corners.append(ip)
area = PolygonArea(corners)
if (area > 500):
print " ".join(points)
|
Add filter program to remove small polygons#!/usr/bin/python
# Polyfilter - filters polygon files for very small polygons
import sys
f = open(sys.argv[1])
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
allAreas = []
maxArea = 0
while True:
line = f.readline().strip()
if line=="": break
points = line.split(" ")
corners = []
for p in points:
ip = map(float,p.split(","))
corners.append(ip)
area = PolygonArea(corners)
if (area > 500):
print " ".join(points)
|
<commit_before><commit_msg>Add filter program to remove small polygons<commit_after>#!/usr/bin/python
# Polyfilter - filters polygon files for very small polygons
import sys
f = open(sys.argv[1])
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
allAreas = []
maxArea = 0
while True:
line = f.readline().strip()
if line=="": break
points = line.split(" ")
corners = []
for p in points:
ip = map(float,p.split(","))
corners.append(ip)
area = PolygonArea(corners)
if (area > 500):
print " ".join(points)
|
|
b877cbafc3bf3a1109b168a2a9a7c0bb1da476da
|
problem_33.py
|
problem_33.py
|
from time import time
import fractions
def digit_cancel():
digit_cancelling_fractions = []
for numerator in range(10, 100):
for denominator in range(numerator+1, 100):
if not (numerator % 10 or denominator % 10):
continue
frac = fractions.Fraction(numerator, denominator)
for digit in str(numerator):
if digit in str(denominator):
num = int(str(numerator).replace(digit, '', 1))
denom = int(str(denominator).replace(digit, '', 1))
try:
if frac == fractions.Fraction(num, denom):
digit_cancelling_fractions.append((numerator, denominator, frac))
except ZeroDivisionError:
break
return digit_cancelling_fractions
t = time()
digit_cancelling_fractions = digit_cancel()
print 'Fractions:'
product = fractions.Fraction(1, 1)
for fr in digit_cancelling_fractions:
product *= fr[2]
print '\t{}/{} = {}'.format(fr[0], fr[1], fr[2])
print 'Product:', product
print 'Time:', time() - t
|
Add problem 33, digit cancelling fractions
|
Add problem 33, digit cancelling fractions
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 33, digit cancelling fractions
|
from time import time
import fractions
def digit_cancel():
digit_cancelling_fractions = []
for numerator in range(10, 100):
for denominator in range(numerator+1, 100):
if not (numerator % 10 or denominator % 10):
continue
frac = fractions.Fraction(numerator, denominator)
for digit in str(numerator):
if digit in str(denominator):
num = int(str(numerator).replace(digit, '', 1))
denom = int(str(denominator).replace(digit, '', 1))
try:
if frac == fractions.Fraction(num, denom):
digit_cancelling_fractions.append((numerator, denominator, frac))
except ZeroDivisionError:
break
return digit_cancelling_fractions
t = time()
digit_cancelling_fractions = digit_cancel()
print 'Fractions:'
product = fractions.Fraction(1, 1)
for fr in digit_cancelling_fractions:
product *= fr[2]
print '\t{}/{} = {}'.format(fr[0], fr[1], fr[2])
print 'Product:', product
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 33, digit cancelling fractions<commit_after>
|
from time import time
import fractions
def digit_cancel():
digit_cancelling_fractions = []
for numerator in range(10, 100):
for denominator in range(numerator+1, 100):
if not (numerator % 10 or denominator % 10):
continue
frac = fractions.Fraction(numerator, denominator)
for digit in str(numerator):
if digit in str(denominator):
num = int(str(numerator).replace(digit, '', 1))
denom = int(str(denominator).replace(digit, '', 1))
try:
if frac == fractions.Fraction(num, denom):
digit_cancelling_fractions.append((numerator, denominator, frac))
except ZeroDivisionError:
break
return digit_cancelling_fractions
t = time()
digit_cancelling_fractions = digit_cancel()
print 'Fractions:'
product = fractions.Fraction(1, 1)
for fr in digit_cancelling_fractions:
product *= fr[2]
print '\t{}/{} = {}'.format(fr[0], fr[1], fr[2])
print 'Product:', product
print 'Time:', time() - t
|
Add problem 33, digit cancelling fractionsfrom time import time
import fractions
def digit_cancel():
digit_cancelling_fractions = []
for numerator in range(10, 100):
for denominator in range(numerator+1, 100):
if not (numerator % 10 or denominator % 10):
continue
frac = fractions.Fraction(numerator, denominator)
for digit in str(numerator):
if digit in str(denominator):
num = int(str(numerator).replace(digit, '', 1))
denom = int(str(denominator).replace(digit, '', 1))
try:
if frac == fractions.Fraction(num, denom):
digit_cancelling_fractions.append((numerator, denominator, frac))
except ZeroDivisionError:
break
return digit_cancelling_fractions
t = time()
digit_cancelling_fractions = digit_cancel()
print 'Fractions:'
product = fractions.Fraction(1, 1)
for fr in digit_cancelling_fractions:
product *= fr[2]
print '\t{}/{} = {}'.format(fr[0], fr[1], fr[2])
print 'Product:', product
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 33, digit cancelling fractions<commit_after>from time import time
import fractions
def digit_cancel():
digit_cancelling_fractions = []
for numerator in range(10, 100):
for denominator in range(numerator+1, 100):
if not (numerator % 10 or denominator % 10):
continue
frac = fractions.Fraction(numerator, denominator)
for digit in str(numerator):
if digit in str(denominator):
num = int(str(numerator).replace(digit, '', 1))
denom = int(str(denominator).replace(digit, '', 1))
try:
if frac == fractions.Fraction(num, denom):
digit_cancelling_fractions.append((numerator, denominator, frac))
except ZeroDivisionError:
break
return digit_cancelling_fractions
t = time()
digit_cancelling_fractions = digit_cancel()
print 'Fractions:'
product = fractions.Fraction(1, 1)
for fr in digit_cancelling_fractions:
product *= fr[2]
print '\t{}/{} = {}'.format(fr[0], fr[1], fr[2])
print 'Product:', product
print 'Time:', time() - t
|
|
9ebe165f7534539012eddb70e8e4ba5b73280ce0
|
scripts/sendfile.py
|
scripts/sendfile.py
|
import os, socket, sys, struct, getopt
def sendfile(filename, ip):
statinfo = os.stat(filename)
fbiinfo = struct.pack('!q', statinfo.st_size)
with open(filename, 'rb') as f:
sock = socket.socket()
sock.connect((ip, 5000))
sock.send(fbiinfo)
while True:
chunk = f.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
def show_usage_exit():
print('sendfile.py -f <inputfile> -i <ip address>')
sys.exit(2)
def main(argv):
filename = None
ip = None
try:
opts, args = getopt.getopt(argv, "hf:i:")
except getopt.GetoptError:
show_usage_exit()
for opt, arg in opts:
if opt == '-h':
show_usage_exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-i", "--ip"):
ip = arg
if not (filename and ip):
show_usage_exit()
sendfile(filename, ip)
if __name__ == "__main__":
main(sys.argv[1:])
|
Add script for network CIA installation using FBI
|
Add script for network CIA installation using FBI
|
Python
|
mit
|
cpp3ds/cpp3ds,cpp3ds/cpp3ds,cpp3ds/cpp3ds,cpp3ds/cpp3ds
|
Add script for network CIA installation using FBI
|
import os, socket, sys, struct, getopt
def sendfile(filename, ip):
statinfo = os.stat(filename)
fbiinfo = struct.pack('!q', statinfo.st_size)
with open(filename, 'rb') as f:
sock = socket.socket()
sock.connect((ip, 5000))
sock.send(fbiinfo)
while True:
chunk = f.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
def show_usage_exit():
print('sendfile.py -f <inputfile> -i <ip address>')
sys.exit(2)
def main(argv):
filename = None
ip = None
try:
opts, args = getopt.getopt(argv, "hf:i:")
except getopt.GetoptError:
show_usage_exit()
for opt, arg in opts:
if opt == '-h':
show_usage_exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-i", "--ip"):
ip = arg
if not (filename and ip):
show_usage_exit()
sendfile(filename, ip)
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script for network CIA installation using FBI<commit_after>
|
import os, socket, sys, struct, getopt
def sendfile(filename, ip):
statinfo = os.stat(filename)
fbiinfo = struct.pack('!q', statinfo.st_size)
with open(filename, 'rb') as f:
sock = socket.socket()
sock.connect((ip, 5000))
sock.send(fbiinfo)
while True:
chunk = f.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
def show_usage_exit():
print('sendfile.py -f <inputfile> -i <ip address>')
sys.exit(2)
def main(argv):
filename = None
ip = None
try:
opts, args = getopt.getopt(argv, "hf:i:")
except getopt.GetoptError:
show_usage_exit()
for opt, arg in opts:
if opt == '-h':
show_usage_exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-i", "--ip"):
ip = arg
if not (filename and ip):
show_usage_exit()
sendfile(filename, ip)
if __name__ == "__main__":
main(sys.argv[1:])
|
Add script for network CIA installation using FBIimport os, socket, sys, struct, getopt
def sendfile(filename, ip):
statinfo = os.stat(filename)
fbiinfo = struct.pack('!q', statinfo.st_size)
with open(filename, 'rb') as f:
sock = socket.socket()
sock.connect((ip, 5000))
sock.send(fbiinfo)
while True:
chunk = f.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
def show_usage_exit():
print('sendfile.py -f <inputfile> -i <ip address>')
sys.exit(2)
def main(argv):
filename = None
ip = None
try:
opts, args = getopt.getopt(argv, "hf:i:")
except getopt.GetoptError:
show_usage_exit()
for opt, arg in opts:
if opt == '-h':
show_usage_exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-i", "--ip"):
ip = arg
if not (filename and ip):
show_usage_exit()
sendfile(filename, ip)
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script for network CIA installation using FBI<commit_after>import os, socket, sys, struct, getopt
def sendfile(filename, ip):
statinfo = os.stat(filename)
fbiinfo = struct.pack('!q', statinfo.st_size)
with open(filename, 'rb') as f:
sock = socket.socket()
sock.connect((ip, 5000))
sock.send(fbiinfo)
while True:
chunk = f.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
def show_usage_exit():
print('sendfile.py -f <inputfile> -i <ip address>')
sys.exit(2)
def main(argv):
filename = None
ip = None
try:
opts, args = getopt.getopt(argv, "hf:i:")
except getopt.GetoptError:
show_usage_exit()
for opt, arg in opts:
if opt == '-h':
show_usage_exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-i", "--ip"):
ip = arg
if not (filename and ip):
show_usage_exit()
sendfile(filename, ip)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
3008cfd977fa265c6069231436f103e6be801929
|
db/migrations/dedupe_vimorg_id.py
|
db/migrations/dedupe_vimorg_id.py
|
"""De-duplicate the vimorg_id column in the plugins table."""
import re
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
if __name__ == '__main__':
grouped_plugins = r.table('plugins').group('vimorg_id').run(r_conn())
for vimorg_id, plugins in grouped_plugins.iteritems():
if not vimorg_id:
continue
# We only need to concern ourselves with duplicated vim.org IDs
if len(plugins) == 1:
continue
print '\nPlugin with vim.org ID %s occurs %s times' % (vimorg_id,
len(plugins))
to_delete = []
to_keep = []
for plugin in plugins:
# Plugins scraped from github.com/vim-scripts that don't have
# a vimorg_url must have not been successfully matched to the
# corresponding vim.org plugin. These can be removed.
if (plugin['github_vim_scripts_repo_name'] and not
plugin['vimorg_url']):
print ('Delete %s because it is an unmatched '
'github.com/vim-scripts plugin' % (plugin['slug']))
to_delete.append(plugin)
continue
# If no GitHub info is available for this plugin, it's probably
# a duplicate vim.org plugin scraped after the original
if (not plugin['github_owner'] and not
plugin['github_vim_scripts_repo_name']):
print ('Delete %s because it is an extra vim.org plugin' %
plugin['slug'])
to_delete.append(plugin)
continue
# Otherwise, we have an original plugin that should be preserved
print 'Keep plugin %s' % plugin['slug']
to_keep.append(plugin)
# We expect to keep at least two plugins because plugins with the same
# vim.org ID can only accumulate when there is already a duplicate
# (these original duplicates arose due to mistakenly associating
# vim.org IDs with GitHub repos whose homepages were vim.org URLs).
assert len(to_keep) >= 2
# Delete the newly-scraped extra plugins that accumulated due to
# existing duplicates.
for plugin in to_delete:
r.table('plugins').get(plugin['slug']).delete().run(r_conn())
# Out of the ones to keep, only one should get the vim.org ID. Pick the
# one that has the most users.
most_used = max(to_keep, key=lambda p: max(p['github_bundles'],
p['github_vim_scripts_bundles']))
for plugin in to_keep:
if plugin['slug'] != most_used['slug']:
r.table('plugins').get(plugin['slug']).update(
{'vimorg_id': ''}).run(r_conn())
print 'Plugin %s gets to keep its vim.org ID' % most_used['slug']
|
Add script to fix plugins with vim.org ID duplicates
|
Add script to fix plugins with vim.org ID duplicates
Summary:
If you look at vimawesome.com/?p=3 right now, you will see many duplicated
"taglist.vim" plugins.
A long time ago, when we scraped a github repo whose homepage was a vim.org
plugin, we would assume that was the definitive github repo associated with
that vim.org plugin and set that repo's vim.org ID. But it turns out, of
course, that there are many different github repos that all set their homepage
to the same vim.org plugin. This led to multiple plugins with the same vim.org
ID in our DB, which violates an invariant assumed throughout the codebase that
vim.org ID is unique in the plugins table.
This was fixed the night before launching, but I forgot/didn't have time to update the data,
and so the data was in an inconsistent state.
With the invariant violated, scraping the same set of sources no longer
guaranteed idempotence, and so each new scrape would accumulate new duplicates.
Basically, when the scraper tries to match a source that has a vim.org ID and
it sees multiple existing plugins with the same vim.org ID, it doesn't know
which one to match to, so it inserts a new row instead of attempting a match.
This led to the proliferation of duplicates you see now.
So this script removes the extra accumulated plugins, and also fixes duplicate
vim.org IDs by only allowing the plugin with the most users to keep its vim.org
ID.
Test Plan:
Locally. Before fix, ran
`PYTHONPATH=. python tools/scrape/scrape.py -s vim.org`, and saw that a bunch of new plugins were inserted when they should
have been associated. Ran
`PYTHONPATH=. python tool/scrape/scrape.py -s github-vim-scripts`,
and saw new plugins were inserted when they should've been associated.
After fix, ran the two commands above again, and ensured no new plugins were
inserted. Also ensured there were no duplicated vim.org IDs
Also did a dry run of the script and looked at script output. For the first
6 plugins or so, manually verified the script made the right decisions in which
plugins to delete, which to keep, and which to assign the vim.org ID.
Ran the script twice to ensure the second time it outputted nothing (meaning it successfully de-duped). Also ran the script after re-scraping to verify there were no new dupes introduced.
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D212
|
Python
|
mit
|
vim-awesome/vim-awesome,divad12/vim-awesome,starcraftman/vim-awesome,shaialon/vim-awesome,starcraftman/vim-awesome,shaialon/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,vim-awesome/vim-awesome,starcraftman/vim-awesome,jonafato/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,shaialon/vim-awesome,shaialon/vim-awesome,starcraftman/vim-awesome,divad12/vim-awesome
|
Add script to fix plugins with vim.org ID duplicates
Summary:
If you look at vimawesome.com/?p=3 right now, you will see many duplicated
"taglist.vim" plugins.
A long time ago, when we scraped a github repo whose homepage was a vim.org
plugin, we would assume that was the definitive github repo associated with
that vim.org plugin and set that repo's vim.org ID. But it turns out, of
course, that there are many different github repos that all set their homepage
to the same vim.org plugin. This led to multiple plugins with the same vim.org
ID in our DB, which violates an invariant assumed throughout the codebase that
vim.org ID is unique in the plugins table.
This was fixed the night before launching, but I forgot/didn't have time to update the data,
and so the data was in an inconsistent state.
With the invariant violated, scraping the same set of sources no longer
guaranteed idempotence, and so each new scrape would accumulate new duplicates.
Basically, when the scraper tries to match a source that has a vim.org ID and
it sees multiple existing plugins with the same vim.org ID, it doesn't know
which one to match to, so it inserts a new row instead of attempting a match.
This led to the proliferation of duplicates you see now.
So this script removes the extra accumulated plugins, and also fixes duplicate
vim.org IDs by only allowing the plugin with the most users to keep its vim.org
ID.
Test Plan:
Locally. Before fix, ran
`PYTHONPATH=. python tools/scrape/scrape.py -s vim.org`, and saw that a bunch of new plugins were inserted when they should
have been associated. Ran
`PYTHONPATH=. python tool/scrape/scrape.py -s github-vim-scripts`,
and saw new plugins were inserted when they should've been associated.
After fix, ran the two commands above again, and ensured no new plugins were
inserted. Also ensured there were no duplicated vim.org IDs
Also did a dry run of the script and looked at script output. For the first
6 plugins or so, manually verified the script made the right decisions in which
plugins to delete, which to keep, and which to assign the vim.org ID.
Ran the script twice to ensure the second time it outputted nothing (meaning it successfully de-duped). Also ran the script after re-scraping to verify there were no new dupes introduced.
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D212
|
"""De-duplicate the vimorg_id column in the plugins table."""
import re
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
if __name__ == '__main__':
grouped_plugins = r.table('plugins').group('vimorg_id').run(r_conn())
for vimorg_id, plugins in grouped_plugins.iteritems():
if not vimorg_id:
continue
# We only need to concern ourselves with duplicated vim.org IDs
if len(plugins) == 1:
continue
print '\nPlugin with vim.org ID %s occurs %s times' % (vimorg_id,
len(plugins))
to_delete = []
to_keep = []
for plugin in plugins:
# Plugins scraped from github.com/vim-scripts that don't have
# a vimorg_url must have not been successfully matched to the
# corresponding vim.org plugin. These can be removed.
if (plugin['github_vim_scripts_repo_name'] and not
plugin['vimorg_url']):
print ('Delete %s because it is an unmatched '
'github.com/vim-scripts plugin' % (plugin['slug']))
to_delete.append(plugin)
continue
# If no GitHub info is available for this plugin, it's probably
# a duplicate vim.org plugin scraped after the original
if (not plugin['github_owner'] and not
plugin['github_vim_scripts_repo_name']):
print ('Delete %s because it is an extra vim.org plugin' %
plugin['slug'])
to_delete.append(plugin)
continue
# Otherwise, we have an original plugin that should be preserved
print 'Keep plugin %s' % plugin['slug']
to_keep.append(plugin)
# We expect to keep at least two plugins because plugins with the same
# vim.org ID can only accumulate when there is already a duplicate
# (these original duplicates arose due to mistakenly associating
# vim.org IDs with GitHub repos whose homepages were vim.org URLs).
assert len(to_keep) >= 2
# Delete the newly-scraped extra plugins that accumulated due to
# existing duplicates.
for plugin in to_delete:
r.table('plugins').get(plugin['slug']).delete().run(r_conn())
# Out of the ones to keep, only one should get the vim.org ID. Pick the
# one that has the most users.
most_used = max(to_keep, key=lambda p: max(p['github_bundles'],
p['github_vim_scripts_bundles']))
for plugin in to_keep:
if plugin['slug'] != most_used['slug']:
r.table('plugins').get(plugin['slug']).update(
{'vimorg_id': ''}).run(r_conn())
print 'Plugin %s gets to keep its vim.org ID' % most_used['slug']
|
<commit_before><commit_msg>Add script to fix plugins with vim.org ID duplicates
Summary:
If you look at vimawesome.com/?p=3 right now, you will see many duplicated
"taglist.vim" plugins.
A long time ago, when we scraped a github repo whose homepage was a vim.org
plugin, we would assume that was the definitive github repo associated with
that vim.org plugin and set that repo's vim.org ID. But it turns out, of
course, that there are many different github repos that all set their homepage
to the same vim.org plugin. This led to multiple plugins with the same vim.org
ID in our DB, which violates an invariant assumed throughout the codebase that
vim.org ID is unique in the plugins table.
This was fixed the night before launching, but I forgot/didn't have time to update the data,
and so the data was in an inconsistent state.
With the invariant violated, scraping the same set of sources no longer
guaranteed idempotence, and so each new scrape would accumulate new duplicates.
Basically, when the scraper tries to match a source that has a vim.org ID and
it sees multiple existing plugins with the same vim.org ID, it doesn't know
which one to match to, so it inserts a new row instead of attempting a match.
This led to the proliferation of duplicates you see now.
So this script removes the extra accumulated plugins, and also fixes duplicate
vim.org IDs by only allowing the plugin with the most users to keep its vim.org
ID.
Test Plan:
Locally. Before fix, ran
`PYTHONPATH=. python tools/scrape/scrape.py -s vim.org`, and saw that a bunch of new plugins were inserted when they should
have been associated. Ran
`PYTHONPATH=. python tool/scrape/scrape.py -s github-vim-scripts`,
and saw new plugins were inserted when they should've been associated.
After fix, ran the two commands above again, and ensured no new plugins were
inserted. Also ensured there were no duplicated vim.org IDs
Also did a dry run of the script and looked at script output. For the first
6 plugins or so, manually verified the script made the right decisions in which
plugins to delete, which to keep, and which to assign the vim.org ID.
Ran the script twice to ensure the second time it outputted nothing (meaning it successfully de-duped). Also ran the script after re-scraping to verify there were no new dupes introduced.
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D212<commit_after>
|
"""De-duplicate the vimorg_id column in the plugins table."""
import re
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
if __name__ == '__main__':
grouped_plugins = r.table('plugins').group('vimorg_id').run(r_conn())
for vimorg_id, plugins in grouped_plugins.iteritems():
if not vimorg_id:
continue
# We only need to concern ourselves with duplicated vim.org IDs
if len(plugins) == 1:
continue
print '\nPlugin with vim.org ID %s occurs %s times' % (vimorg_id,
len(plugins))
to_delete = []
to_keep = []
for plugin in plugins:
# Plugins scraped from github.com/vim-scripts that don't have
# a vimorg_url must have not been successfully matched to the
# corresponding vim.org plugin. These can be removed.
if (plugin['github_vim_scripts_repo_name'] and not
plugin['vimorg_url']):
print ('Delete %s because it is an unmatched '
'github.com/vim-scripts plugin' % (plugin['slug']))
to_delete.append(plugin)
continue
# If no GitHub info is available for this plugin, it's probably
# a duplicate vim.org plugin scraped after the original
if (not plugin['github_owner'] and not
plugin['github_vim_scripts_repo_name']):
print ('Delete %s because it is an extra vim.org plugin' %
plugin['slug'])
to_delete.append(plugin)
continue
# Otherwise, we have an original plugin that should be preserved
print 'Keep plugin %s' % plugin['slug']
to_keep.append(plugin)
# We expect to keep at least two plugins because plugins with the same
# vim.org ID can only accumulate when there is already a duplicate
# (these original duplicates arose due to mistakenly associating
# vim.org IDs with GitHub repos whose homepages were vim.org URLs).
assert len(to_keep) >= 2
# Delete the newly-scraped extra plugins that accumulated due to
# existing duplicates.
for plugin in to_delete:
r.table('plugins').get(plugin['slug']).delete().run(r_conn())
# Out of the ones to keep, only one should get the vim.org ID. Pick the
# one that has the most users.
most_used = max(to_keep, key=lambda p: max(p['github_bundles'],
p['github_vim_scripts_bundles']))
for plugin in to_keep:
if plugin['slug'] != most_used['slug']:
r.table('plugins').get(plugin['slug']).update(
{'vimorg_id': ''}).run(r_conn())
print 'Plugin %s gets to keep its vim.org ID' % most_used['slug']
|
Add script to fix plugins with vim.org ID duplicates
Summary:
If you look at vimawesome.com/?p=3 right now, you will see many duplicated
"taglist.vim" plugins.
A long time ago, when we scraped a github repo whose homepage was a vim.org
plugin, we would assume that was the definitive github repo associated with
that vim.org plugin and set that repo's vim.org ID. But it turns out, of
course, that there are many different github repos that all set their homepage
to the same vim.org plugin. This led to multiple plugins with the same vim.org
ID in our DB, which violates an invariant assumed throughout the codebase that
vim.org ID is unique in the plugins table.
This was fixed the night before launching, but I forgot/didn't have time to update the data,
and so the data was in an inconsistent state.
With the invariant violated, scraping the same set of sources no longer
guaranteed idempotence, and so each new scrape would accumulate new duplicates.
Basically, when the scraper tries to match a source that has a vim.org ID and
it sees multiple existing plugins with the same vim.org ID, it doesn't know
which one to match to, so it inserts a new row instead of attempting a match.
This led to the proliferation of duplicates you see now.
So this script removes the extra accumulated plugins, and also fixes duplicate
vim.org IDs by only allowing the plugin with the most users to keep its vim.org
ID.
Test Plan:
Locally. Before fix, ran
`PYTHONPATH=. python tools/scrape/scrape.py -s vim.org`, and saw that a bunch of new plugins were inserted when they should
have been associated. Ran
`PYTHONPATH=. python tool/scrape/scrape.py -s github-vim-scripts`,
and saw new plugins were inserted when they should've been associated.
After fix, ran the two commands above again, and ensured no new plugins were
inserted. Also ensured there were no duplicated vim.org IDs
Also did a dry run of the script and looked at script output. For the first
6 plugins or so, manually verified the script made the right decisions in which
plugins to delete, which to keep, and which to assign the vim.org ID.
Ran the script twice to ensure the second time it outputted nothing (meaning it successfully de-duped). Also ran the script after re-scraping to verify there were no new dupes introduced.
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D212"""De-duplicate the vimorg_id column in the plugins table."""
import re
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
if __name__ == '__main__':
grouped_plugins = r.table('plugins').group('vimorg_id').run(r_conn())
for vimorg_id, plugins in grouped_plugins.iteritems():
if not vimorg_id:
continue
# We only need to concern ourselves with duplicated vim.org IDs
if len(plugins) == 1:
continue
print '\nPlugin with vim.org ID %s occurs %s times' % (vimorg_id,
len(plugins))
to_delete = []
to_keep = []
for plugin in plugins:
# Plugins scraped from github.com/vim-scripts that don't have
# a vimorg_url must have not been successfully matched to the
# corresponding vim.org plugin. These can be removed.
if (plugin['github_vim_scripts_repo_name'] and not
plugin['vimorg_url']):
print ('Delete %s because it is an unmatched '
'github.com/vim-scripts plugin' % (plugin['slug']))
to_delete.append(plugin)
continue
# If no GitHub info is available for this plugin, it's probably
# a duplicate vim.org plugin scraped after the original
if (not plugin['github_owner'] and not
plugin['github_vim_scripts_repo_name']):
print ('Delete %s because it is an extra vim.org plugin' %
plugin['slug'])
to_delete.append(plugin)
continue
# Otherwise, we have an original plugin that should be preserved
print 'Keep plugin %s' % plugin['slug']
to_keep.append(plugin)
# We expect to keep at least two plugins because plugins with the same
# vim.org ID can only accumulate when there is already a duplicate
# (these original duplicates arose due to mistakenly associating
# vim.org IDs with GitHub repos whose homepages were vim.org URLs).
assert len(to_keep) >= 2
# Delete the newly-scraped extra plugins that accumulated due to
# existing duplicates.
for plugin in to_delete:
r.table('plugins').get(plugin['slug']).delete().run(r_conn())
# Out of the ones to keep, only one should get the vim.org ID. Pick the
# one that has the most users.
most_used = max(to_keep, key=lambda p: max(p['github_bundles'],
p['github_vim_scripts_bundles']))
for plugin in to_keep:
if plugin['slug'] != most_used['slug']:
r.table('plugins').get(plugin['slug']).update(
{'vimorg_id': ''}).run(r_conn())
print 'Plugin %s gets to keep its vim.org ID' % most_used['slug']
|
<commit_before><commit_msg>Add script to fix plugins with vim.org ID duplicates
Summary:
If you look at vimawesome.com/?p=3 right now, you will see many duplicated
"taglist.vim" plugins.
A long time ago, when we scraped a github repo whose homepage was a vim.org
plugin, we would assume that was the definitive github repo associated with
that vim.org plugin and set that repo's vim.org ID. But it turns out, of
course, that there are many different github repos that all set their homepage
to the same vim.org plugin. This led to multiple plugins with the same vim.org
ID in our DB, which violates an invariant assumed throughout the codebase that
vim.org ID is unique in the plugins table.
This was fixed the night before launching, but I forgot/didn't have time to update the data,
and so the data was in an inconsistent state.
With the invariant violated, scraping the same set of sources no longer
guaranteed idempotence, and so each new scrape would accumulate new duplicates.
Basically, when the scraper tries to match a source that has a vim.org ID and
it sees multiple existing plugins with the same vim.org ID, it doesn't know
which one to match to, so it inserts a new row instead of attempting a match.
This led to the proliferation of duplicates you see now.
So this script removes the extra accumulated plugins, and also fixes duplicate
vim.org IDs by only allowing the plugin with the most users to keep its vim.org
ID.
Test Plan:
Locally. Before fix, ran
`PYTHONPATH=. python tools/scrape/scrape.py -s vim.org`, and saw that a bunch of new plugins were inserted when they should
have been associated. Ran
`PYTHONPATH=. python tool/scrape/scrape.py -s github-vim-scripts`,
and saw new plugins were inserted when they should've been associated.
After fix, ran the two commands above again, and ensured no new plugins were
inserted. Also ensured there were no duplicated vim.org IDs
Also did a dry run of the script and looked at script output. For the first
6 plugins or so, manually verified the script made the right decisions in which
plugins to delete, which to keep, and which to assign the vim.org ID.
Ran the script twice to ensure the second time it outputted nothing (meaning it successfully de-duped). Also ran the script after re-scraping to verify there were no new dupes introduced.
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D212<commit_after>"""De-duplicate the vimorg_id column in the plugins table."""
import re
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
if __name__ == '__main__':
grouped_plugins = r.table('plugins').group('vimorg_id').run(r_conn())
for vimorg_id, plugins in grouped_plugins.iteritems():
if not vimorg_id:
continue
# We only need to concern ourselves with duplicated vim.org IDs
if len(plugins) == 1:
continue
print '\nPlugin with vim.org ID %s occurs %s times' % (vimorg_id,
len(plugins))
to_delete = []
to_keep = []
for plugin in plugins:
# Plugins scraped from github.com/vim-scripts that don't have
# a vimorg_url must have not been successfully matched to the
# corresponding vim.org plugin. These can be removed.
if (plugin['github_vim_scripts_repo_name'] and not
plugin['vimorg_url']):
print ('Delete %s because it is an unmatched '
'github.com/vim-scripts plugin' % (plugin['slug']))
to_delete.append(plugin)
continue
# If no GitHub info is available for this plugin, it's probably
# a duplicate vim.org plugin scraped after the original
if (not plugin['github_owner'] and not
plugin['github_vim_scripts_repo_name']):
print ('Delete %s because it is an extra vim.org plugin' %
plugin['slug'])
to_delete.append(plugin)
continue
# Otherwise, we have an original plugin that should be preserved
print 'Keep plugin %s' % plugin['slug']
to_keep.append(plugin)
# We expect to keep at least two plugins because plugins with the same
# vim.org ID can only accumulate when there is already a duplicate
# (these original duplicates arose due to mistakenly associating
# vim.org IDs with GitHub repos whose homepages were vim.org URLs).
assert len(to_keep) >= 2
# Delete the newly-scraped extra plugins that accumulated due to
# existing duplicates.
for plugin in to_delete:
r.table('plugins').get(plugin['slug']).delete().run(r_conn())
# Out of the ones to keep, only one should get the vim.org ID. Pick the
# one that has the most users.
most_used = max(to_keep, key=lambda p: max(p['github_bundles'],
p['github_vim_scripts_bundles']))
for plugin in to_keep:
if plugin['slug'] != most_used['slug']:
r.table('plugins').get(plugin['slug']).update(
{'vimorg_id': ''}).run(r_conn())
print 'Plugin %s gets to keep its vim.org ID' % most_used['slug']
|
|
f93aaab606f799e338de1b1161ee5cdf50405c24
|
salt/states/disk.py
|
salt/states/disk.py
|
'''
Disk monitoring state
Monitor the state of disk resources
'''
def status(name, max=None, min=None):
'''
Return the current disk usage stats for the named device
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = disk.usage()
if not name in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if max:
try:
if isinstance(max, basestring):
max = int(max.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if min:
try:
if isinstance(min, basestring):
min = int(min.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if min and max:
if min >= max:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if min:
if cap < min:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
min, cap)
return ret
if max:
if cap > max:
ret['comment'] = 'Disk is below maximum of {0} at {1}'.format(
max, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
|
Add first monitoring state - experimental
|
Add first monitoring state - experimental
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add first monitoring state - experimental
|
'''
Disk monitoring state
Monitor the state of disk resources
'''
def status(name, max=None, min=None):
'''
Return the current disk usage stats for the named device
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = disk.usage()
if not name in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if max:
try:
if isinstance(max, basestring):
max = int(max.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if min:
try:
if isinstance(min, basestring):
min = int(min.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if min and max:
if min >= max:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if min:
if cap < min:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
min, cap)
return ret
if max:
if cap > max:
ret['comment'] = 'Disk is below maximum of {0} at {1}'.format(
max, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
|
<commit_before><commit_msg>Add first monitoring state - experimental<commit_after>
|
'''
Disk monitoring state
Monitor the state of disk resources
'''
def status(name, max=None, min=None):
'''
Return the current disk usage stats for the named device
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = disk.usage()
if not name in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if max:
try:
if isinstance(max, basestring):
max = int(max.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if min:
try:
if isinstance(min, basestring):
min = int(min.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if min and max:
if min >= max:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if min:
if cap < min:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
min, cap)
return ret
if max:
if cap > max:
ret['comment'] = 'Disk is below maximum of {0} at {1}'.format(
max, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
|
Add first monitoring state - experimental'''
Disk monitoring state
Monitor the state of disk resources
'''
def status(name, max=None, min=None):
'''
Return the current disk usage stats for the named device
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = disk.usage()
if not name in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if max:
try:
if isinstance(max, basestring):
max = int(max.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if min:
try:
if isinstance(min, basestring):
min = int(min.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if min and max:
if min >= max:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if min:
if cap < min:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
min, cap)
return ret
if max:
if cap > max:
ret['comment'] = 'Disk is below maximum of {0} at {1}'.format(
max, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
|
<commit_before><commit_msg>Add first monitoring state - experimental<commit_after>'''
Disk monitoring state
Monitor the state of disk resources
'''
def status(name, max=None, min=None):
'''
Return the current disk usage stats for the named device
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = disk.usage()
if not name in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if max:
try:
if isinstance(max, basestring):
max = int(max.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if min:
try:
if isinstance(min, basestring):
min = int(min.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if min and max:
if min >= max:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if min:
if cap < min:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
min, cap)
return ret
if max:
if cap > max:
ret['comment'] = 'Disk is below maximum of {0} at {1}'.format(
max, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
|
|
b7e0a6767445acb856661238d3e73e6aa514e360
|
plot_graph.py
|
plot_graph.py
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
Add naive code for generating and rendering casual model and state graph visualization
|
Add naive code for generating and rendering casual model and state graph visualization
|
Python
|
mit
|
Kaleidophon/puzzled-platypus
|
Add naive code for generating and rendering casual model and state graph visualization
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
<commit_before><commit_msg>Add naive code for generating and rendering casual model and state graph visualization<commit_after>
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
Add naive code for generating and rendering casual model and state graph visualizationfrom graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
<commit_before><commit_msg>Add naive code for generating and rendering casual model and state graph visualization<commit_after>from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
|
7a12c23c59dddbefa201644bf52260d3ad812842
|
isVowel.py
|
isVowel.py
|
""" Q5- Define a function isVowel(char) that returns True if char is a vowel ('a', 'e', 'i', 'o', or 'u'), and False
otherwise. You can assume that char is a single letter of any case (ie, 'A' and 'a' are both valid). Do not use the
keyword in. Your function should take in a single string and return a boolean.
"""
def isVowel( char ):
str.lower( char )
return char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u'
def isStringContainsVowel( string ):
i = 0
while i < len( string ):
letter = string[i]
if isVowel( letter ):
return True
i += 1
return False
string = input( "Enter a string: " )
print( "Is the string, '" + string + "' contains a vowel in it? Answer: " + str( isStringContainsVowel( string ) ) )
|
Add the answer to the fifth question of Assignment 3
|
Add the answer to the fifth question of Assignment 3
|
Python
|
mit
|
SuyashD95/python-assignments
|
Add the answer to the fifth question of Assignment 3
|
""" Q5- Define a function isVowel(char) that returns True if char is a vowel ('a', 'e', 'i', 'o', or 'u'), and False
otherwise. You can assume that char is a single letter of any case (ie, 'A' and 'a' are both valid). Do not use the
keyword in. Your function should take in a single string and return a boolean.
"""
def isVowel( char ):
str.lower( char )
return char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u'
def isStringContainsVowel( string ):
i = 0
while i < len( string ):
letter = string[i]
if isVowel( letter ):
return True
i += 1
return False
string = input( "Enter a string: " )
print( "Is the string, '" + string + "' contains a vowel in it? Answer: " + str( isStringContainsVowel( string ) ) )
|
<commit_before><commit_msg>Add the answer to the fifth question of Assignment 3<commit_after>
|
""" Q5- Define a function isVowel(char) that returns True if char is a vowel ('a', 'e', 'i', 'o', or 'u'), and False
otherwise. You can assume that char is a single letter of any case (ie, 'A' and 'a' are both valid). Do not use the
keyword in. Your function should take in a single string and return a boolean.
"""
def isVowel( char ):
str.lower( char )
return char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u'
def isStringContainsVowel( string ):
i = 0
while i < len( string ):
letter = string[i]
if isVowel( letter ):
return True
i += 1
return False
string = input( "Enter a string: " )
print( "Is the string, '" + string + "' contains a vowel in it? Answer: " + str( isStringContainsVowel( string ) ) )
|
Add the answer to the fifth question of Assignment 3""" Q5- Define a function isVowel(char) that returns True if char is a vowel ('a', 'e', 'i', 'o', or 'u'), and False
otherwise. You can assume that char is a single letter of any case (ie, 'A' and 'a' are both valid). Do not use the
keyword in. Your function should take in a single string and return a boolean.
"""
def isVowel( char ):
str.lower( char )
return char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u'
def isStringContainsVowel( string ):
i = 0
while i < len( string ):
letter = string[i]
if isVowel( letter ):
return True
i += 1
return False
string = input( "Enter a string: " )
print( "Is the string, '" + string + "' contains a vowel in it? Answer: " + str( isStringContainsVowel( string ) ) )
|
<commit_before><commit_msg>Add the answer to the fifth question of Assignment 3<commit_after>""" Q5- Define a function isVowel(char) that returns True if char is a vowel ('a', 'e', 'i', 'o', or 'u'), and False
otherwise. You can assume that char is a single letter of any case (ie, 'A' and 'a' are both valid). Do not use the
keyword in. Your function should take in a single string and return a boolean.
"""
def isVowel( char ):
str.lower( char )
return char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u'
def isStringContainsVowel( string ):
i = 0
while i < len( string ):
letter = string[i]
if isVowel( letter ):
return True
i += 1
return False
string = input( "Enter a string: " )
print( "Is the string, '" + string + "' contains a vowel in it? Answer: " + str( isStringContainsVowel( string ) ) )
|
|
33f9196d69ad9d7fa6b45b41f0110780e1ab6c5f
|
dj/main/migrations/0009_auto_20170820_0105.py
|
dj/main/migrations/0009_auto_20170820_0105.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:05
from __future__ import unicode_literals
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20161014_1424'),
]
operations = [
migrations.AddField(
model_name='episode',
name='reviewers',
field=models.TextField(blank=True, help_text='email(s) of the reviewers(s)'),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default=main.models.generate_edit_key, help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
migrations.AlterField(
model_name='episode',
name='name',
field=models.CharField(help_text='Video Title (shows in video search results)', max_length=170),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='room name', max_length=135),
),
]
|
Update migrations to match current model
|
Update migrations to match current model
|
Python
|
mit
|
xfxf/veyepar,CarlFK/veyepar,xfxf/veyepar,xfxf/veyepar,CarlFK/veyepar,CarlFK/veyepar,CarlFK/veyepar,xfxf/veyepar,xfxf/veyepar,CarlFK/veyepar
|
Update migrations to match current model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:05
from __future__ import unicode_literals
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20161014_1424'),
]
operations = [
migrations.AddField(
model_name='episode',
name='reviewers',
field=models.TextField(blank=True, help_text='email(s) of the reviewers(s)'),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default=main.models.generate_edit_key, help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
migrations.AlterField(
model_name='episode',
name='name',
field=models.CharField(help_text='Video Title (shows in video search results)', max_length=170),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='room name', max_length=135),
),
]
|
<commit_before><commit_msg>Update migrations to match current model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:05
from __future__ import unicode_literals
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20161014_1424'),
]
operations = [
migrations.AddField(
model_name='episode',
name='reviewers',
field=models.TextField(blank=True, help_text='email(s) of the reviewers(s)'),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default=main.models.generate_edit_key, help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
migrations.AlterField(
model_name='episode',
name='name',
field=models.CharField(help_text='Video Title (shows in video search results)', max_length=170),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='room name', max_length=135),
),
]
|
Update migrations to match current model# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:05
from __future__ import unicode_literals
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20161014_1424'),
]
operations = [
migrations.AddField(
model_name='episode',
name='reviewers',
field=models.TextField(blank=True, help_text='email(s) of the reviewers(s)'),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default=main.models.generate_edit_key, help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
migrations.AlterField(
model_name='episode',
name='name',
field=models.CharField(help_text='Video Title (shows in video search results)', max_length=170),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='room name', max_length=135),
),
]
|
<commit_before><commit_msg>Update migrations to match current model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 01:05
from __future__ import unicode_literals
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20161014_1424'),
]
operations = [
migrations.AddField(
model_name='episode',
name='reviewers',
field=models.TextField(blank=True, help_text='email(s) of the reviewers(s)'),
),
migrations.AlterField(
model_name='episode',
name='edit_key',
field=models.CharField(blank=True, default=main.models.generate_edit_key, help_text='key to allow unauthenticated users to edit this item.', max_length=32, null=True),
),
migrations.AlterField(
model_name='episode',
name='name',
field=models.CharField(help_text='Video Title (shows in video search results)', max_length=170),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='room name', max_length=135),
),
]
|
|
751485671b6abb88878d62bbbb473cf6bae30fc8
|
Attic/calculate_activity_freqs.py
|
Attic/calculate_activity_freqs.py
|
from parliament.models import *
types = MemberActivityType.objects.all()
st_count = 0
start_date = '2014-02-06'
start_date = '2011-04-20'
for t in types:
act_qs = MemberActivity.objects.filter(type=t, time__gte=start_date)
act_count = act_qs.count()
mp_count = Member.objects.filter(memberactivity__in=act_qs).distinct().count()
print("%s: %d %d" % (t.type, act_count, mp_count))
if mp_count:
t.count = act_count / (1.0 * mp_count)
else:
t.count = 0
if t.type == 'ST':
st_count = t.count
for t in types:
if not t.count:
continue
print("%s: %f" % (t.type, st_count * 1.0 / t.count))
|
Add script to calculate activity frequencies
|
Add script to calculate activity frequencies
|
Python
|
agpl-3.0
|
kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu
|
Add script to calculate activity frequencies
|
from parliament.models import *
types = MemberActivityType.objects.all()
st_count = 0
start_date = '2014-02-06'
start_date = '2011-04-20'
for t in types:
act_qs = MemberActivity.objects.filter(type=t, time__gte=start_date)
act_count = act_qs.count()
mp_count = Member.objects.filter(memberactivity__in=act_qs).distinct().count()
print("%s: %d %d" % (t.type, act_count, mp_count))
if mp_count:
t.count = act_count / (1.0 * mp_count)
else:
t.count = 0
if t.type == 'ST':
st_count = t.count
for t in types:
if not t.count:
continue
print("%s: %f" % (t.type, st_count * 1.0 / t.count))
|
<commit_before><commit_msg>Add script to calculate activity frequencies<commit_after>
|
from parliament.models import *
types = MemberActivityType.objects.all()
st_count = 0
start_date = '2014-02-06'
start_date = '2011-04-20'
for t in types:
act_qs = MemberActivity.objects.filter(type=t, time__gte=start_date)
act_count = act_qs.count()
mp_count = Member.objects.filter(memberactivity__in=act_qs).distinct().count()
print("%s: %d %d" % (t.type, act_count, mp_count))
if mp_count:
t.count = act_count / (1.0 * mp_count)
else:
t.count = 0
if t.type == 'ST':
st_count = t.count
for t in types:
if not t.count:
continue
print("%s: %f" % (t.type, st_count * 1.0 / t.count))
|
Add script to calculate activity frequenciesfrom parliament.models import *
types = MemberActivityType.objects.all()
st_count = 0
start_date = '2014-02-06'
start_date = '2011-04-20'
for t in types:
act_qs = MemberActivity.objects.filter(type=t, time__gte=start_date)
act_count = act_qs.count()
mp_count = Member.objects.filter(memberactivity__in=act_qs).distinct().count()
print("%s: %d %d" % (t.type, act_count, mp_count))
if mp_count:
t.count = act_count / (1.0 * mp_count)
else:
t.count = 0
if t.type == 'ST':
st_count = t.count
for t in types:
if not t.count:
continue
print("%s: %f" % (t.type, st_count * 1.0 / t.count))
|
<commit_before><commit_msg>Add script to calculate activity frequencies<commit_after>from parliament.models import *
types = MemberActivityType.objects.all()
st_count = 0
start_date = '2014-02-06'
start_date = '2011-04-20'
for t in types:
act_qs = MemberActivity.objects.filter(type=t, time__gte=start_date)
act_count = act_qs.count()
mp_count = Member.objects.filter(memberactivity__in=act_qs).distinct().count()
print("%s: %d %d" % (t.type, act_count, mp_count))
if mp_count:
t.count = act_count / (1.0 * mp_count)
else:
t.count = 0
if t.type == 'ST':
st_count = t.count
for t in types:
if not t.count:
continue
print("%s: %f" % (t.type, st_count * 1.0 / t.count))
|
|
75973597cc2542d589033eecc9169f09fec57857
|
gulf/bathy/get_bathy.py
|
gulf/bathy/get_bathy.py
|
#!/usr/bin/env python
"""Simple implementation of a file fetcher"""
import sys
import os
import urllib
import subprocess
def get_bathy(url, destination=os.getcwd(), force=False):
r"""Get bathymetry file located at `url`
Will check downloaded file's suffix to see if the file needs to be extracted
"""
file_name = os.path.basename(url)
output_path = os.path.join(destination, file_name)
if not os.path.exists(output_path) or force:
print "Downloading %s to %s..." % (url, output_path)
urllib.urlretrieve(url, output_path)
print "Finished downloading."
else:
print "Skipping %s, file already exists." % file_name
tar = False
gunzip = False
split_file_name = file_name.split('.')
if split_file_name[-1] == 'gz':
gunzip = True
if split_file_name[-2] == 'tar':
tar = True
if split_file_name[-1] == 'tgz':
gunzip = True
tar = True
if gunzip or tar:
print "Extracting %s" % file_name
if gunzip and tar:
subprocess.Popen('tar xvzf %s' % output_path, shell=True)
elif gunzip:
subprocess.Popen('gunzip %s' % output_path, shell=True)
elif tar:
subprocess.Popen('tar xvf %s' % output_path, shell=True)
if __name__ == "__main__":
# Default URLs
base_url = "http://users.ices.utexas.edu/~kyle/bathy/"
# Override base_url
if len(sys.argv) > 1:
base_url = sys.argv[1]
urls = [os.path.join(base_url, 'gulf_caribbean.tt3'),
os.path.join(base_url, 'NOAA_Galveston_Houston.tt3')]
for url in urls:
get_bathy(url)
|
Add bathy download script for Gulf
|
Add bathy download script for Gulf
|
Python
|
mit
|
mandli/surge-examples
|
Add bathy download script for Gulf
|
#!/usr/bin/env python
"""Simple implementation of a file fetcher"""
import sys
import os
import urllib
import subprocess
def get_bathy(url, destination=os.getcwd(), force=False):
r"""Get bathymetry file located at `url`
Will check downloaded file's suffix to see if the file needs to be extracted
"""
file_name = os.path.basename(url)
output_path = os.path.join(destination, file_name)
if not os.path.exists(output_path) or force:
print "Downloading %s to %s..." % (url, output_path)
urllib.urlretrieve(url, output_path)
print "Finished downloading."
else:
print "Skipping %s, file already exists." % file_name
tar = False
gunzip = False
split_file_name = file_name.split('.')
if split_file_name[-1] == 'gz':
gunzip = True
if split_file_name[-2] == 'tar':
tar = True
if split_file_name[-1] == 'tgz':
gunzip = True
tar = True
if gunzip or tar:
print "Extracting %s" % file_name
if gunzip and tar:
subprocess.Popen('tar xvzf %s' % output_path, shell=True)
elif gunzip:
subprocess.Popen('gunzip %s' % output_path, shell=True)
elif tar:
subprocess.Popen('tar xvf %s' % output_path, shell=True)
if __name__ == "__main__":
# Default URLs
base_url = "http://users.ices.utexas.edu/~kyle/bathy/"
# Override base_url
if len(sys.argv) > 1:
base_url = sys.argv[1]
urls = [os.path.join(base_url, 'gulf_caribbean.tt3'),
os.path.join(base_url, 'NOAA_Galveston_Houston.tt3')]
for url in urls:
get_bathy(url)
|
<commit_before><commit_msg>Add bathy download script for Gulf<commit_after>
|
#!/usr/bin/env python
"""Simple implementation of a file fetcher"""
import sys
import os
import urllib
import subprocess
def get_bathy(url, destination=os.getcwd(), force=False):
r"""Get bathymetry file located at `url`
Will check downloaded file's suffix to see if the file needs to be extracted
"""
file_name = os.path.basename(url)
output_path = os.path.join(destination, file_name)
if not os.path.exists(output_path) or force:
print "Downloading %s to %s..." % (url, output_path)
urllib.urlretrieve(url, output_path)
print "Finished downloading."
else:
print "Skipping %s, file already exists." % file_name
tar = False
gunzip = False
split_file_name = file_name.split('.')
if split_file_name[-1] == 'gz':
gunzip = True
if split_file_name[-2] == 'tar':
tar = True
if split_file_name[-1] == 'tgz':
gunzip = True
tar = True
if gunzip or tar:
print "Extracting %s" % file_name
if gunzip and tar:
subprocess.Popen('tar xvzf %s' % output_path, shell=True)
elif gunzip:
subprocess.Popen('gunzip %s' % output_path, shell=True)
elif tar:
subprocess.Popen('tar xvf %s' % output_path, shell=True)
if __name__ == "__main__":
# Default URLs
base_url = "http://users.ices.utexas.edu/~kyle/bathy/"
# Override base_url
if len(sys.argv) > 1:
base_url = sys.argv[1]
urls = [os.path.join(base_url, 'gulf_caribbean.tt3'),
os.path.join(base_url, 'NOAA_Galveston_Houston.tt3')]
for url in urls:
get_bathy(url)
|
Add bathy download script for Gulf#!/usr/bin/env python
"""Simple implementation of a file fetcher"""
import sys
import os
import urllib
import subprocess
def get_bathy(url, destination=os.getcwd(), force=False):
r"""Get bathymetry file located at `url`
Will check downloaded file's suffix to see if the file needs to be extracted
"""
file_name = os.path.basename(url)
output_path = os.path.join(destination, file_name)
if not os.path.exists(output_path) or force:
print "Downloading %s to %s..." % (url, output_path)
urllib.urlretrieve(url, output_path)
print "Finished downloading."
else:
print "Skipping %s, file already exists." % file_name
tar = False
gunzip = False
split_file_name = file_name.split('.')
if split_file_name[-1] == 'gz':
gunzip = True
if split_file_name[-2] == 'tar':
tar = True
if split_file_name[-1] == 'tgz':
gunzip = True
tar = True
if gunzip or tar:
print "Extracting %s" % file_name
if gunzip and tar:
subprocess.Popen('tar xvzf %s' % output_path, shell=True)
elif gunzip:
subprocess.Popen('gunzip %s' % output_path, shell=True)
elif tar:
subprocess.Popen('tar xvf %s' % output_path, shell=True)
if __name__ == "__main__":
# Default URLs
base_url = "http://users.ices.utexas.edu/~kyle/bathy/"
# Override base_url
if len(sys.argv) > 1:
base_url = sys.argv[1]
urls = [os.path.join(base_url, 'gulf_caribbean.tt3'),
os.path.join(base_url, 'NOAA_Galveston_Houston.tt3')]
for url in urls:
get_bathy(url)
|
<commit_before><commit_msg>Add bathy download script for Gulf<commit_after>#!/usr/bin/env python
"""Simple implementation of a file fetcher"""
import sys
import os
import urllib
import subprocess
def get_bathy(url, destination=os.getcwd(), force=False):
r"""Get bathymetry file located at `url`
Will check downloaded file's suffix to see if the file needs to be extracted
"""
file_name = os.path.basename(url)
output_path = os.path.join(destination, file_name)
if not os.path.exists(output_path) or force:
print "Downloading %s to %s..." % (url, output_path)
urllib.urlretrieve(url, output_path)
print "Finished downloading."
else:
print "Skipping %s, file already exists." % file_name
tar = False
gunzip = False
split_file_name = file_name.split('.')
if split_file_name[-1] == 'gz':
gunzip = True
if split_file_name[-2] == 'tar':
tar = True
if split_file_name[-1] == 'tgz':
gunzip = True
tar = True
if gunzip or tar:
print "Extracting %s" % file_name
if gunzip and tar:
subprocess.Popen('tar xvzf %s' % output_path, shell=True)
elif gunzip:
subprocess.Popen('gunzip %s' % output_path, shell=True)
elif tar:
subprocess.Popen('tar xvf %s' % output_path, shell=True)
if __name__ == "__main__":
# Default URLs
base_url = "http://users.ices.utexas.edu/~kyle/bathy/"
# Override base_url
if len(sys.argv) > 1:
base_url = sys.argv[1]
urls = [os.path.join(base_url, 'gulf_caribbean.tt3'),
os.path.join(base_url, 'NOAA_Galveston_Houston.tt3')]
for url in urls:
get_bathy(url)
|
|
f0f3e03931560106276be9d9054127cdab6e0d5d
|
migrations/versions/0160_another_letter_org.py
|
migrations/versions/0160_another_letter_org.py
|
"""empty message
Revision ID: 0160_another_letter_org
Revises: 0159_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0160_another_letter_org'
down_revision = '0159_add_historical_redact'
from alembic import op
NEW_ORGANISATIONS = [
('501', 'Environment Agency (PDF letters ONLY)'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for environment agency
|
Add letter logo for environment agency
Depends on:
https://github.com/alphagov/notifications-template-preview/pull/87
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter logo for environment agency
Depends on:
https://github.com/alphagov/notifications-template-preview/pull/87
|
"""empty message
Revision ID: 0160_another_letter_org
Revises: 0159_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0160_another_letter_org'
down_revision = '0159_add_historical_redact'
from alembic import op
NEW_ORGANISATIONS = [
('501', 'Environment Agency (PDF letters ONLY)'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for environment agency
Depends on:
https://github.com/alphagov/notifications-template-preview/pull/87<commit_after>
|
"""empty message
Revision ID: 0160_another_letter_org
Revises: 0159_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0160_another_letter_org'
down_revision = '0159_add_historical_redact'
from alembic import op
NEW_ORGANISATIONS = [
('501', 'Environment Agency (PDF letters ONLY)'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for environment agency
Depends on:
https://github.com/alphagov/notifications-template-preview/pull/87"""empty message
Revision ID: 0160_another_letter_org
Revises: 0159_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0160_another_letter_org'
down_revision = '0159_add_historical_redact'
from alembic import op
NEW_ORGANISATIONS = [
('501', 'Environment Agency (PDF letters ONLY)'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for environment agency
Depends on:
https://github.com/alphagov/notifications-template-preview/pull/87<commit_after>"""empty message
Revision ID: 0160_another_letter_org
Revises: 0159_add_historical_redact
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0160_another_letter_org'
down_revision = '0159_add_historical_redact'
from alembic import op
NEW_ORGANISATIONS = [
('501', 'Environment Agency (PDF letters ONLY)'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
d1a5402bfa8779e3857ce4330b1a9e7b0baec153
|
buffer/tests/test_response.py
|
buffer/tests/test_response.py
|
from nose.tools import eq_
from buffer.response import ResponseObject
def test_reponse_check_for_inception():
'''
Given a dict with a dict, it should convert all the dicts to ResponseObject
'''
awesome_dict = {
'key': 'value',
'second_dict': {
'key2': 'value2'
}
}
response = ResponseObject(awesome_dict)
response.key3 = 'value3'
eq_(response.key, 'value')
eq_(response.key3, 'value3')
eq_(response.second_dict, {'key2': 'value2'})
eq_(response.second_dict.key2, 'value2')
|
Test response oject for dict into dict into dict
|
Test response oject for dict into dict into dict
|
Python
|
mit
|
vtemian/buffpy,bufferapp/buffer-python
|
Test response oject for dict into dict into dict
|
from nose.tools import eq_
from buffer.response import ResponseObject
def test_reponse_check_for_inception():
'''
Given a dict with a dict, it should convert all the dicts to ResponseObject
'''
awesome_dict = {
'key': 'value',
'second_dict': {
'key2': 'value2'
}
}
response = ResponseObject(awesome_dict)
response.key3 = 'value3'
eq_(response.key, 'value')
eq_(response.key3, 'value3')
eq_(response.second_dict, {'key2': 'value2'})
eq_(response.second_dict.key2, 'value2')
|
<commit_before><commit_msg>Test response oject for dict into dict into dict<commit_after>
|
from nose.tools import eq_
from buffer.response import ResponseObject
def test_reponse_check_for_inception():
'''
Given a dict with a dict, it should convert all the dicts to ResponseObject
'''
awesome_dict = {
'key': 'value',
'second_dict': {
'key2': 'value2'
}
}
response = ResponseObject(awesome_dict)
response.key3 = 'value3'
eq_(response.key, 'value')
eq_(response.key3, 'value3')
eq_(response.second_dict, {'key2': 'value2'})
eq_(response.second_dict.key2, 'value2')
|
Test response oject for dict into dict into dictfrom nose.tools import eq_
from buffer.response import ResponseObject
def test_reponse_check_for_inception():
'''
Given a dict with a dict, it should convert all the dicts to ResponseObject
'''
awesome_dict = {
'key': 'value',
'second_dict': {
'key2': 'value2'
}
}
response = ResponseObject(awesome_dict)
response.key3 = 'value3'
eq_(response.key, 'value')
eq_(response.key3, 'value3')
eq_(response.second_dict, {'key2': 'value2'})
eq_(response.second_dict.key2, 'value2')
|
<commit_before><commit_msg>Test response oject for dict into dict into dict<commit_after>from nose.tools import eq_
from buffer.response import ResponseObject
def test_reponse_check_for_inception():
'''
Given a dict with a dict, it should convert all the dicts to ResponseObject
'''
awesome_dict = {
'key': 'value',
'second_dict': {
'key2': 'value2'
}
}
response = ResponseObject(awesome_dict)
response.key3 = 'value3'
eq_(response.key, 'value')
eq_(response.key3, 'value3')
eq_(response.second_dict, {'key2': 'value2'})
eq_(response.second_dict.key2, 'value2')
|
|
2d8648a9295a49c03cdb0d8afa19629f89ae08bc
|
changes/models/testmessage.py
|
changes/models/testmessage.py
|
import uuid
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index
from changes.config import db
from changes.db.types.guid import GUID
class TestMessage(db.model):
"""
The message produced by a run of a test.
This is generally captured from standard output/error by the test machine, and is extracted from the junit.xml file.
We record it as a byte offset in the junit.xml artifact entry.
"""
__tablename__ = 'testmessage'
__table_args__ = (
Index('idx_testmessage_test_id', 'test_id'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
artifact_id = Column(GUID, ForeignKey('artifact.id', ondelete="CASCADE"), nullable=False)
start_offset = Column(Integer, default=0, nullable=False)
length = Column(Integer, nullable=False)
test = relationship('TestCase')
artifact = relationship('Artifact')
def __init__(self, **kwargs):
super(TestMessage, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
def get_message(self):
with self.artifact.file.get_file(self.start_offset, self.length) as message:
return message.read()
|
Add db model for TestMessage database
|
Add db model for TestMessage database
Summary:
Adds a model for the testmessage table and support for getting the
message
Depends on D206477
Depends on D206407
Reviewers: anupc, paulruan
Reviewed By: paulruan
Subscribers: changesbot, kylec
Differential Revision: https://tails.corp.dropbox.com/D206480
|
Python
|
apache-2.0
|
dropbox/changes,dropbox/changes,dropbox/changes,dropbox/changes
|
Add db model for TestMessage database
Summary:
Adds a model for the testmessage table and support for getting the
message
Depends on D206477
Depends on D206407
Reviewers: anupc, paulruan
Reviewed By: paulruan
Subscribers: changesbot, kylec
Differential Revision: https://tails.corp.dropbox.com/D206480
|
import uuid
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index
from changes.config import db
from changes.db.types.guid import GUID
class TestMessage(db.model):
"""
The message produced by a run of a test.
This is generally captured from standard output/error by the test machine, and is extracted from the junit.xml file.
We record it as a byte offset in the junit.xml artifact entry.
"""
__tablename__ = 'testmessage'
__table_args__ = (
Index('idx_testmessage_test_id', 'test_id'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
artifact_id = Column(GUID, ForeignKey('artifact.id', ondelete="CASCADE"), nullable=False)
start_offset = Column(Integer, default=0, nullable=False)
length = Column(Integer, nullable=False)
test = relationship('TestCase')
artifact = relationship('Artifact')
def __init__(self, **kwargs):
super(TestMessage, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
def get_message(self):
with self.artifact.file.get_file(self.start_offset, self.length) as message:
return message.read()
|
<commit_before><commit_msg>Add db model for TestMessage database
Summary:
Adds a model for the testmessage table and support for getting the
message
Depends on D206477
Depends on D206407
Reviewers: anupc, paulruan
Reviewed By: paulruan
Subscribers: changesbot, kylec
Differential Revision: https://tails.corp.dropbox.com/D206480<commit_after>
|
import uuid
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index
from changes.config import db
from changes.db.types.guid import GUID
class TestMessage(db.model):
"""
The message produced by a run of a test.
This is generally captured from standard output/error by the test machine, and is extracted from the junit.xml file.
We record it as a byte offset in the junit.xml artifact entry.
"""
__tablename__ = 'testmessage'
__table_args__ = (
Index('idx_testmessage_test_id', 'test_id'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
artifact_id = Column(GUID, ForeignKey('artifact.id', ondelete="CASCADE"), nullable=False)
start_offset = Column(Integer, default=0, nullable=False)
length = Column(Integer, nullable=False)
test = relationship('TestCase')
artifact = relationship('Artifact')
def __init__(self, **kwargs):
super(TestMessage, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
def get_message(self):
with self.artifact.file.get_file(self.start_offset, self.length) as message:
return message.read()
|
Add db model for TestMessage database
Summary:
Adds a model for the testmessage table and support for getting the
message
Depends on D206477
Depends on D206407
Reviewers: anupc, paulruan
Reviewed By: paulruan
Subscribers: changesbot, kylec
Differential Revision: https://tails.corp.dropbox.com/D206480import uuid
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index
from changes.config import db
from changes.db.types.guid import GUID
class TestMessage(db.model):
"""
The message produced by a run of a test.
This is generally captured from standard output/error by the test machine, and is extracted from the junit.xml file.
We record it as a byte offset in the junit.xml artifact entry.
"""
__tablename__ = 'testmessage'
__table_args__ = (
Index('idx_testmessage_test_id', 'test_id'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
artifact_id = Column(GUID, ForeignKey('artifact.id', ondelete="CASCADE"), nullable=False)
start_offset = Column(Integer, default=0, nullable=False)
length = Column(Integer, nullable=False)
test = relationship('TestCase')
artifact = relationship('Artifact')
def __init__(self, **kwargs):
super(TestMessage, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
def get_message(self):
with self.artifact.file.get_file(self.start_offset, self.length) as message:
return message.read()
|
<commit_before><commit_msg>Add db model for TestMessage database
Summary:
Adds a model for the testmessage table and support for getting the
message
Depends on D206477
Depends on D206407
Reviewers: anupc, paulruan
Reviewed By: paulruan
Subscribers: changesbot, kylec
Differential Revision: https://tails.corp.dropbox.com/D206480<commit_after>import uuid
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index
from changes.config import db
from changes.db.types.guid import GUID
class TestMessage(db.model):
"""
The message produced by a run of a test.
This is generally captured from standard output/error by the test machine, and is extracted from the junit.xml file.
We record it as a byte offset in the junit.xml artifact entry.
"""
__tablename__ = 'testmessage'
__table_args__ = (
Index('idx_testmessage_test_id', 'test_id'),
)
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
test_id = Column(GUID, ForeignKey('test.id', ondelete="CASCADE"), nullable=False)
artifact_id = Column(GUID, ForeignKey('artifact.id', ondelete="CASCADE"), nullable=False)
start_offset = Column(Integer, default=0, nullable=False)
length = Column(Integer, nullable=False)
test = relationship('TestCase')
artifact = relationship('Artifact')
def __init__(self, **kwargs):
super(TestMessage, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
def get_message(self):
with self.artifact.file.get_file(self.start_offset, self.length) as message:
return message.read()
|
|
dd7686b609245c4c37364167603a7b5b0165ebbc
|
migrations/versions/588336e02ca_use_native_postgres_json_type_for_entry_.py
|
migrations/versions/588336e02ca_use_native_postgres_json_type_for_entry_.py
|
"""Use native postgres JSON type for Entry.content
Revision ID: 588336e02ca
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 22:40:07.690000
"""
# revision identifiers, used by Alembic.
revision = '588336e02ca'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Couldn't find a way to specify a USING clause with alembic's alter_table
op.execute('ALTER TABLE entry ALTER COLUMN content TYPE json USING content::json')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
type_=sa.TEXT(),
nullable=False)
### end Alembic commands ###
|
Add migration for native postgres JSON type
|
Add migration for native postgres JSON type
|
Python
|
mit
|
streamr/marvin,streamr/marvin,streamr/marvin
|
Add migration for native postgres JSON type
|
"""Use native postgres JSON type for Entry.content
Revision ID: 588336e02ca
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 22:40:07.690000
"""
# revision identifiers, used by Alembic.
revision = '588336e02ca'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Couldn't find a way to specify a USING clause with alembic's alter_table
op.execute('ALTER TABLE entry ALTER COLUMN content TYPE json USING content::json')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
type_=sa.TEXT(),
nullable=False)
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for native postgres JSON type<commit_after>
|
"""Use native postgres JSON type for Entry.content
Revision ID: 588336e02ca
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 22:40:07.690000
"""
# revision identifiers, used by Alembic.
revision = '588336e02ca'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Couldn't find a way to specify a USING clause with alembic's alter_table
op.execute('ALTER TABLE entry ALTER COLUMN content TYPE json USING content::json')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
type_=sa.TEXT(),
nullable=False)
### end Alembic commands ###
|
Add migration for native postgres JSON type"""Use native postgres JSON type for Entry.content
Revision ID: 588336e02ca
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 22:40:07.690000
"""
# revision identifiers, used by Alembic.
revision = '588336e02ca'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Couldn't find a way to specify a USING clause with alembic's alter_table
op.execute('ALTER TABLE entry ALTER COLUMN content TYPE json USING content::json')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
type_=sa.TEXT(),
nullable=False)
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for native postgres JSON type<commit_after>"""Use native postgres JSON type for Entry.content
Revision ID: 588336e02ca
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 22:40:07.690000
"""
# revision identifiers, used by Alembic.
revision = '588336e02ca'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# Couldn't find a way to specify a USING clause with alembic's alter_table
op.execute('ALTER TABLE entry ALTER COLUMN content TYPE json USING content::json')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
type_=sa.TEXT(),
nullable=False)
### end Alembic commands ###
|
|
de75d383b9b6461c25e71bcc71958282d2265147
|
googleapis/rules/rule_lib_test.py
|
googleapis/rules/rule_lib_test.py
|
#!/usr/bin/env python3
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Placeholder for tests."""
import unittest
class RuleLibTest(unittest.TestCase):
def test_placeholder(self):
pass
|
Add (currently empty) test placeholder.
|
Add (currently empty) test placeholder.
|
Python
|
apache-2.0
|
chronicle/api-samples-python
|
Add (currently empty) test placeholder.
|
#!/usr/bin/env python3
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Placeholder for tests."""
import unittest
class RuleLibTest(unittest.TestCase):
def test_placeholder(self):
pass
|
<commit_before><commit_msg>Add (currently empty) test placeholder.<commit_after>
|
#!/usr/bin/env python3
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Placeholder for tests."""
import unittest
class RuleLibTest(unittest.TestCase):
def test_placeholder(self):
pass
|
Add (currently empty) test placeholder.#!/usr/bin/env python3
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Placeholder for tests."""
import unittest
class RuleLibTest(unittest.TestCase):
def test_placeholder(self):
pass
|
<commit_before><commit_msg>Add (currently empty) test placeholder.<commit_after>#!/usr/bin/env python3
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Placeholder for tests."""
import unittest
class RuleLibTest(unittest.TestCase):
def test_placeholder(self):
pass
|
|
0170835d59519a7f69badf17d0be6bc49f92ea58
|
lintcode/Medium/170_Rotate_List.py
|
lintcode/Medium/170_Rotate_List.py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head: the list
# @param k: rotate to the right k places
# @return: the list after rotation
def rotateRight(self, head, k):
# write your code here
# Solution 1
# tmp = head
# arr= []
# while(tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# arr = arr[len(arr) - k:] + arr[:len(arr) - k]
# arr = map(lambda i: ListNode(i), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
if (head is None):
return None
l = 1
tmp = head
while (tmp and tmp.next):
tmp = tmp.next
l += 1
tmp.next = head
for i in range(l - (k % l)):
tmp = tmp.next
res = tmp.next
tmp.next = None
return res
|
Add solution to lintcode question 170
|
Add solution to lintcode question 170
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 170
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head: the list
# @param k: rotate to the right k places
# @return: the list after rotation
def rotateRight(self, head, k):
# write your code here
# Solution 1
# tmp = head
# arr= []
# while(tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# arr = arr[len(arr) - k:] + arr[:len(arr) - k]
# arr = map(lambda i: ListNode(i), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
if (head is None):
return None
l = 1
tmp = head
while (tmp and tmp.next):
tmp = tmp.next
l += 1
tmp.next = head
for i in range(l - (k % l)):
tmp = tmp.next
res = tmp.next
tmp.next = None
return res
|
<commit_before><commit_msg>Add solution to lintcode question 170<commit_after>
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head: the list
# @param k: rotate to the right k places
# @return: the list after rotation
def rotateRight(self, head, k):
# write your code here
# Solution 1
# tmp = head
# arr= []
# while(tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# arr = arr[len(arr) - k:] + arr[:len(arr) - k]
# arr = map(lambda i: ListNode(i), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
if (head is None):
return None
l = 1
tmp = head
while (tmp and tmp.next):
tmp = tmp.next
l += 1
tmp.next = head
for i in range(l - (k % l)):
tmp = tmp.next
res = tmp.next
tmp.next = None
return res
|
Add solution to lintcode question 170# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head: the list
# @param k: rotate to the right k places
# @return: the list after rotation
def rotateRight(self, head, k):
# write your code here
# Solution 1
# tmp = head
# arr= []
# while(tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# arr = arr[len(arr) - k:] + arr[:len(arr) - k]
# arr = map(lambda i: ListNode(i), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
if (head is None):
return None
l = 1
tmp = head
while (tmp and tmp.next):
tmp = tmp.next
l += 1
tmp.next = head
for i in range(l - (k % l)):
tmp = tmp.next
res = tmp.next
tmp.next = None
return res
|
<commit_before><commit_msg>Add solution to lintcode question 170<commit_after># Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head: the list
# @param k: rotate to the right k places
# @return: the list after rotation
def rotateRight(self, head, k):
# write your code here
# Solution 1
# tmp = head
# arr= []
# while(tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# arr = arr[len(arr) - k:] + arr[:len(arr) - k]
# arr = map(lambda i: ListNode(i), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
if (head is None):
return None
l = 1
tmp = head
while (tmp and tmp.next):
tmp = tmp.next
l += 1
tmp.next = head
for i in range(l - (k % l)):
tmp = tmp.next
res = tmp.next
tmp.next = None
return res
|
|
230bf08f9aac697fc4c8dc092348be3b7026046f
|
test/requests/parametrized_test.py
|
test/requests/parametrized_test.py
|
import logging
import unittest
from elasticsearch import Elasticsearch, TransportError
class ParametrizedTest(unittest.TestCase):
def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"):
super(ParametrizedTest, self).__init__(methodName=methodName)
self.gn2_url = gn2_url
self.es_url = es_url
def setUp(self):
self.es = Elasticsearch([self.es_url])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
self.es.delete_by_query(
index="users"
, doc_type="local"
, body={"query":{"match":{"email_address":"test@user.com"}}})
|
Create parametrized superclass for tests
|
Create parametrized superclass for tests
* Since the tests require that some parameters be provided while running
the tests, create a class that helps abstract away the details of
retrieving and setting the expected parameters.
|
Python
|
agpl-3.0
|
DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2
|
Create parametrized superclass for tests
* Since the tests require that some parameters be provided while running
the tests, create a class that helps abstract away the details of
retrieving and setting the expected parameters.
|
import logging
import unittest
from elasticsearch import Elasticsearch, TransportError
class ParametrizedTest(unittest.TestCase):
def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"):
super(ParametrizedTest, self).__init__(methodName=methodName)
self.gn2_url = gn2_url
self.es_url = es_url
def setUp(self):
self.es = Elasticsearch([self.es_url])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
self.es.delete_by_query(
index="users"
, doc_type="local"
, body={"query":{"match":{"email_address":"test@user.com"}}})
|
<commit_before><commit_msg>Create parametrized superclass for tests
* Since the tests require that some parameters be provided while running
the tests, create a class that helps abstract away the details of
retrieving and setting the expected parameters.<commit_after>
|
import logging
import unittest
from elasticsearch import Elasticsearch, TransportError
class ParametrizedTest(unittest.TestCase):
def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"):
super(ParametrizedTest, self).__init__(methodName=methodName)
self.gn2_url = gn2_url
self.es_url = es_url
def setUp(self):
self.es = Elasticsearch([self.es_url])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
self.es.delete_by_query(
index="users"
, doc_type="local"
, body={"query":{"match":{"email_address":"test@user.com"}}})
|
Create parametrized superclass for tests
* Since the tests require that some parameters be provided while running
the tests, create a class that helps abstract away the details of
retrieving and setting the expected parameters.import logging
import unittest
from elasticsearch import Elasticsearch, TransportError
class ParametrizedTest(unittest.TestCase):
def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"):
super(ParametrizedTest, self).__init__(methodName=methodName)
self.gn2_url = gn2_url
self.es_url = es_url
def setUp(self):
self.es = Elasticsearch([self.es_url])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
self.es.delete_by_query(
index="users"
, doc_type="local"
, body={"query":{"match":{"email_address":"test@user.com"}}})
|
<commit_before><commit_msg>Create parametrized superclass for tests
* Since the tests require that some parameters be provided while running
the tests, create a class that helps abstract away the details of
retrieving and setting the expected parameters.<commit_after>import logging
import unittest
from elasticsearch import Elasticsearch, TransportError
class ParametrizedTest(unittest.TestCase):
def __init__(self, methodName='runTest', gn2_url="http://localhost:5003", es_url="localhost:9200"):
super(ParametrizedTest, self).__init__(methodName=methodName)
self.gn2_url = gn2_url
self.es_url = es_url
def setUp(self):
self.es = Elasticsearch([self.es_url])
self.es_cleanup = []
es_logger = logging.getLogger("elasticsearch")
es_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationInfo.log"))
es_trace_logger = logging.getLogger("elasticsearch.trace")
es_trace_logger.addHandler(
logging.FileHandler("/tmp/es_TestRegistrationTrace.log"))
def tearDown(self):
self.es.delete_by_query(
index="users"
, doc_type="local"
, body={"query":{"match":{"email_address":"test@user.com"}}})
|
|
4bde5fdb6eb8d87be42621c8c5ec9d68e5810d3a
|
glaciercmd/command_create_inventory_job_for_vault.py
|
glaciercmd/command_create_inventory_job_for_vault.py
|
import boto
import datetime
class CommandCreateInventoryJobForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
job_id = vault.retrieve_inventory()
print "Inventory job initiated with ID {}".format(job_id)
def accept(self, args):
return len(args) >= 6 and args[0] == 'create' and args[1] == 'inventory' and args[2] == 'job' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "create inventory job for vault <vault name>"
def command_init():
return CommandCreateInventoryJobForVault()
|
Add inventory job creation command
|
Add inventory job creation command
|
Python
|
mit
|
carsonmcdonald/glacier-cmd
|
Add inventory job creation command
|
import boto
import datetime
class CommandCreateInventoryJobForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
job_id = vault.retrieve_inventory()
print "Inventory job initiated with ID {}".format(job_id)
def accept(self, args):
return len(args) >= 6 and args[0] == 'create' and args[1] == 'inventory' and args[2] == 'job' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "create inventory job for vault <vault name>"
def command_init():
return CommandCreateInventoryJobForVault()
|
<commit_before><commit_msg>Add inventory job creation command<commit_after>
|
import boto
import datetime
class CommandCreateInventoryJobForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
job_id = vault.retrieve_inventory()
print "Inventory job initiated with ID {}".format(job_id)
def accept(self, args):
return len(args) >= 6 and args[0] == 'create' and args[1] == 'inventory' and args[2] == 'job' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "create inventory job for vault <vault name>"
def command_init():
return CommandCreateInventoryJobForVault()
|
Add inventory job creation commandimport boto
import datetime
class CommandCreateInventoryJobForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
job_id = vault.retrieve_inventory()
print "Inventory job initiated with ID {}".format(job_id)
def accept(self, args):
return len(args) >= 6 and args[0] == 'create' and args[1] == 'inventory' and args[2] == 'job' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "create inventory job for vault <vault name>"
def command_init():
return CommandCreateInventoryJobForVault()
|
<commit_before><commit_msg>Add inventory job creation command<commit_after>import boto
import datetime
class CommandCreateInventoryJobForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
job_id = vault.retrieve_inventory()
print "Inventory job initiated with ID {}".format(job_id)
def accept(self, args):
return len(args) >= 6 and args[0] == 'create' and args[1] == 'inventory' and args[2] == 'job' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "create inventory job for vault <vault name>"
def command_init():
return CommandCreateInventoryJobForVault()
|
|
85378ec16884089074c527d05da564d596f07bef
|
tests/test_tags.py
|
tests/test_tags.py
|
# tests/test_vendor.py
import unittest
from tests.base import BaseTestCase
from project.models import LineItem, Component
from project.inventory.forms import VendorCreateForm, PurchaseOrderForm, \
ComponentCreateForm
import json
class TestTagManagement(BaseTestCase):
def login(self):
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
def create_component(self, description="widget"):
self.login()
return self.client.post(
'/component/create',
data=dict(sku="12345", description=description),
follow_redirects=True)
def test_create_component_database_insert(self):
with self.client:
self.login()
self.create_component()
response = self.client.get('/component/', follow_redirects=True)
self.assertIn(b'<h1>Components</h1>', response.data)
self.assertIn(b'widget', response.data)
def test_tag_component(self):
with self.client:
self.login()
self.create_component()
response = self.client.post(
'/tag-component/1',
data=dict(category='Region',
tag_name='west coast'),
follow_redirects=True)
self.assertIn(b'has been tagged with', response.data)
self.assertIn(b'WEST COAST', response.data)
def test_create_single_tag(self):
with self.client:
self.login()
response = self.client.get(
'/manage-tags',
follow_redirects=True)
self.assertIn(b'Tag Manager', response.data)
# creating cats and tags should strip down form data
new_single_tag = 'lonely tag '
response = self.client.post(
'/manage-tags',
data=dict(category='',
tag_name=new_single_tag,
make="OK"),
follow_redirects=True)
self.assertIn(new_single_tag.strip().upper(), response.data)
response = self.client.get(
'/api/single-tags')
self.assertIn(new_single_tag.strip().upper(), response.data)
def test_create_tag_in_category(self):
with self.client:
self.login()
# creating cats and tags should strip down form data
tag_in_cat = ' tag in cat '
the_cat = ' the category '
response = self.client.post(
'/manage-tags',
data=dict(category=the_cat,
tag_name=tag_in_cat,
make="OK"),
follow_redirects=True)
self.assertIn("%s</span>:<span>%s" % (the_cat.strip().upper(), tag_in_cat.strip().upper()), response.data)
response = self.client.get(
'/api/categories')
print 'the json'
the_cat_with_tag = json.loads(response.data)
print the_cat_with_tag
self.assertEqual(
the_cat_with_tag[0]['tags'][0]['name'],
tag_in_cat.strip().upper())
|
Create tags test. test for create single tag and create tag in category
|
Create tags test. test for create single tag and create tag in category
|
Python
|
mit
|
paris3200/flask-inventory,paris3200/flask-inventory,cisko3000/flask-inventory,cisko3000/flask-inventory,cisko3000/flask-inventory,paris3200/flask-inventory
|
Create tags test. test for create single tag and create tag in category
|
# tests/test_vendor.py
import unittest
from tests.base import BaseTestCase
from project.models import LineItem, Component
from project.inventory.forms import VendorCreateForm, PurchaseOrderForm, \
ComponentCreateForm
import json
class TestTagManagement(BaseTestCase):
def login(self):
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
def create_component(self, description="widget"):
self.login()
return self.client.post(
'/component/create',
data=dict(sku="12345", description=description),
follow_redirects=True)
def test_create_component_database_insert(self):
with self.client:
self.login()
self.create_component()
response = self.client.get('/component/', follow_redirects=True)
self.assertIn(b'<h1>Components</h1>', response.data)
self.assertIn(b'widget', response.data)
def test_tag_component(self):
with self.client:
self.login()
self.create_component()
response = self.client.post(
'/tag-component/1',
data=dict(category='Region',
tag_name='west coast'),
follow_redirects=True)
self.assertIn(b'has been tagged with', response.data)
self.assertIn(b'WEST COAST', response.data)
def test_create_single_tag(self):
with self.client:
self.login()
response = self.client.get(
'/manage-tags',
follow_redirects=True)
self.assertIn(b'Tag Manager', response.data)
# creating cats and tags should strip down form data
new_single_tag = 'lonely tag '
response = self.client.post(
'/manage-tags',
data=dict(category='',
tag_name=new_single_tag,
make="OK"),
follow_redirects=True)
self.assertIn(new_single_tag.strip().upper(), response.data)
response = self.client.get(
'/api/single-tags')
self.assertIn(new_single_tag.strip().upper(), response.data)
def test_create_tag_in_category(self):
with self.client:
self.login()
# creating cats and tags should strip down form data
tag_in_cat = ' tag in cat '
the_cat = ' the category '
response = self.client.post(
'/manage-tags',
data=dict(category=the_cat,
tag_name=tag_in_cat,
make="OK"),
follow_redirects=True)
self.assertIn("%s</span>:<span>%s" % (the_cat.strip().upper(), tag_in_cat.strip().upper()), response.data)
response = self.client.get(
'/api/categories')
print 'the json'
the_cat_with_tag = json.loads(response.data)
print the_cat_with_tag
self.assertEqual(
the_cat_with_tag[0]['tags'][0]['name'],
tag_in_cat.strip().upper())
|
<commit_before><commit_msg>Create tags test. test for create single tag and create tag in category<commit_after>
|
# tests/test_vendor.py
import unittest
from tests.base import BaseTestCase
from project.models import LineItem, Component
from project.inventory.forms import VendorCreateForm, PurchaseOrderForm, \
ComponentCreateForm
import json
class TestTagManagement(BaseTestCase):
def login(self):
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
def create_component(self, description="widget"):
self.login()
return self.client.post(
'/component/create',
data=dict(sku="12345", description=description),
follow_redirects=True)
def test_create_component_database_insert(self):
with self.client:
self.login()
self.create_component()
response = self.client.get('/component/', follow_redirects=True)
self.assertIn(b'<h1>Components</h1>', response.data)
self.assertIn(b'widget', response.data)
def test_tag_component(self):
with self.client:
self.login()
self.create_component()
response = self.client.post(
'/tag-component/1',
data=dict(category='Region',
tag_name='west coast'),
follow_redirects=True)
self.assertIn(b'has been tagged with', response.data)
self.assertIn(b'WEST COAST', response.data)
def test_create_single_tag(self):
with self.client:
self.login()
response = self.client.get(
'/manage-tags',
follow_redirects=True)
self.assertIn(b'Tag Manager', response.data)
# creating cats and tags should strip down form data
new_single_tag = 'lonely tag '
response = self.client.post(
'/manage-tags',
data=dict(category='',
tag_name=new_single_tag,
make="OK"),
follow_redirects=True)
self.assertIn(new_single_tag.strip().upper(), response.data)
response = self.client.get(
'/api/single-tags')
self.assertIn(new_single_tag.strip().upper(), response.data)
def test_create_tag_in_category(self):
with self.client:
self.login()
# creating cats and tags should strip down form data
tag_in_cat = ' tag in cat '
the_cat = ' the category '
response = self.client.post(
'/manage-tags',
data=dict(category=the_cat,
tag_name=tag_in_cat,
make="OK"),
follow_redirects=True)
self.assertIn("%s</span>:<span>%s" % (the_cat.strip().upper(), tag_in_cat.strip().upper()), response.data)
response = self.client.get(
'/api/categories')
print 'the json'
the_cat_with_tag = json.loads(response.data)
print the_cat_with_tag
self.assertEqual(
the_cat_with_tag[0]['tags'][0]['name'],
tag_in_cat.strip().upper())
|
Create tags test. test for create single tag and create tag in category# tests/test_vendor.py
import unittest
from tests.base import BaseTestCase
from project.models import LineItem, Component
from project.inventory.forms import VendorCreateForm, PurchaseOrderForm, \
ComponentCreateForm
import json
class TestTagManagement(BaseTestCase):
def login(self):
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
def create_component(self, description="widget"):
self.login()
return self.client.post(
'/component/create',
data=dict(sku="12345", description=description),
follow_redirects=True)
def test_create_component_database_insert(self):
with self.client:
self.login()
self.create_component()
response = self.client.get('/component/', follow_redirects=True)
self.assertIn(b'<h1>Components</h1>', response.data)
self.assertIn(b'widget', response.data)
def test_tag_component(self):
with self.client:
self.login()
self.create_component()
response = self.client.post(
'/tag-component/1',
data=dict(category='Region',
tag_name='west coast'),
follow_redirects=True)
self.assertIn(b'has been tagged with', response.data)
self.assertIn(b'WEST COAST', response.data)
def test_create_single_tag(self):
with self.client:
self.login()
response = self.client.get(
'/manage-tags',
follow_redirects=True)
self.assertIn(b'Tag Manager', response.data)
# creating cats and tags should strip down form data
new_single_tag = 'lonely tag '
response = self.client.post(
'/manage-tags',
data=dict(category='',
tag_name=new_single_tag,
make="OK"),
follow_redirects=True)
self.assertIn(new_single_tag.strip().upper(), response.data)
response = self.client.get(
'/api/single-tags')
self.assertIn(new_single_tag.strip().upper(), response.data)
def test_create_tag_in_category(self):
with self.client:
self.login()
# creating cats and tags should strip down form data
tag_in_cat = ' tag in cat '
the_cat = ' the category '
response = self.client.post(
'/manage-tags',
data=dict(category=the_cat,
tag_name=tag_in_cat,
make="OK"),
follow_redirects=True)
self.assertIn("%s</span>:<span>%s" % (the_cat.strip().upper(), tag_in_cat.strip().upper()), response.data)
response = self.client.get(
'/api/categories')
print 'the json'
the_cat_with_tag = json.loads(response.data)
print the_cat_with_tag
self.assertEqual(
the_cat_with_tag[0]['tags'][0]['name'],
tag_in_cat.strip().upper())
|
<commit_before><commit_msg>Create tags test. test for create single tag and create tag in category<commit_after># tests/test_vendor.py
import unittest
from tests.base import BaseTestCase
from project.models import LineItem, Component
from project.inventory.forms import VendorCreateForm, PurchaseOrderForm, \
ComponentCreateForm
import json
class TestTagManagement(BaseTestCase):
def login(self):
self.client.post(
'/login',
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True
)
def create_component(self, description="widget"):
self.login()
return self.client.post(
'/component/create',
data=dict(sku="12345", description=description),
follow_redirects=True)
def test_create_component_database_insert(self):
with self.client:
self.login()
self.create_component()
response = self.client.get('/component/', follow_redirects=True)
self.assertIn(b'<h1>Components</h1>', response.data)
self.assertIn(b'widget', response.data)
def test_tag_component(self):
with self.client:
self.login()
self.create_component()
response = self.client.post(
'/tag-component/1',
data=dict(category='Region',
tag_name='west coast'),
follow_redirects=True)
self.assertIn(b'has been tagged with', response.data)
self.assertIn(b'WEST COAST', response.data)
def test_create_single_tag(self):
with self.client:
self.login()
response = self.client.get(
'/manage-tags',
follow_redirects=True)
self.assertIn(b'Tag Manager', response.data)
# creating cats and tags should strip down form data
new_single_tag = 'lonely tag '
response = self.client.post(
'/manage-tags',
data=dict(category='',
tag_name=new_single_tag,
make="OK"),
follow_redirects=True)
self.assertIn(new_single_tag.strip().upper(), response.data)
response = self.client.get(
'/api/single-tags')
self.assertIn(new_single_tag.strip().upper(), response.data)
def test_create_tag_in_category(self):
with self.client:
self.login()
# creating cats and tags should strip down form data
tag_in_cat = ' tag in cat '
the_cat = ' the category '
response = self.client.post(
'/manage-tags',
data=dict(category=the_cat,
tag_name=tag_in_cat,
make="OK"),
follow_redirects=True)
self.assertIn("%s</span>:<span>%s" % (the_cat.strip().upper(), tag_in_cat.strip().upper()), response.data)
response = self.client.get(
'/api/categories')
print 'the json'
the_cat_with_tag = json.loads(response.data)
print the_cat_with_tag
self.assertEqual(
the_cat_with_tag[0]['tags'][0]['name'],
tag_in_cat.strip().upper())
|
|
53e082344a85544fae84c6ffc2ad221dc33a184f
|
algorithms/decision_tree.py
|
algorithms/decision_tree.py
|
def http://www.arthotel.bg/premium-paket-vino-i-lyubovmode(dataset):
counts = { 0: 0, 1:0 }
for entity in dataset:
counts[entity[0]]++
# What if they are equal?
if counts[0] > counts[1]:
return 0
else:
return 1
# Attribute must be an index
def entities_with_attribute_value(attribute, value, dataset):
subset = []
for entity in dataset:
if entity[attribute] == value:
subset.push_back(entity)
return subset
def entropy(dataset):
counts = { 0:0, 1:0 }
for entity in dataset:
counts[entity[0]]++
p0 = counts[0]/len(dataset)
p1 = counts[1]/len(dataset)
entropy = - p1 * log(p1, 2) - p2 * log(p2, 2)
return entropy
def choose_best_attribute(dataset, attributes_with_values)
best_gain = 0
best_attribute = None
for atrribute, values in attributes_with_values:
gain = entropy(dataset)
for value in values:
subset = entities_with_attribute_value(attribute, value, dataset)
gain -= (len(subset)/len(dataset)) * entropy(subset)
if best_gain < gain or best_attribute == None:
best_gain, best_attribute = gain, attribute
return attribute
|
Add decision tree helper functions
|
Add decision tree helper functions
|
Python
|
mit
|
victoria92/survival-predictor,victoria92/survival-predictor
|
Add decision tree helper functions
|
def http://www.arthotel.bg/premium-paket-vino-i-lyubovmode(dataset):
counts = { 0: 0, 1:0 }
for entity in dataset:
counts[entity[0]]++
# What if they are equal?
if counts[0] > counts[1]:
return 0
else:
return 1
# Attribute must be an index
def entities_with_attribute_value(attribute, value, dataset):
subset = []
for entity in dataset:
if entity[attribute] == value:
subset.push_back(entity)
return subset
def entropy(dataset):
counts = { 0:0, 1:0 }
for entity in dataset:
counts[entity[0]]++
p0 = counts[0]/len(dataset)
p1 = counts[1]/len(dataset)
entropy = - p1 * log(p1, 2) - p2 * log(p2, 2)
return entropy
def choose_best_attribute(dataset, attributes_with_values)
best_gain = 0
best_attribute = None
for atrribute, values in attributes_with_values:
gain = entropy(dataset)
for value in values:
subset = entities_with_attribute_value(attribute, value, dataset)
gain -= (len(subset)/len(dataset)) * entropy(subset)
if best_gain < gain or best_attribute == None:
best_gain, best_attribute = gain, attribute
return attribute
|
<commit_before><commit_msg>Add decision tree helper functions<commit_after>
|
def http://www.arthotel.bg/premium-paket-vino-i-lyubovmode(dataset):
counts = { 0: 0, 1:0 }
for entity in dataset:
counts[entity[0]]++
# What if they are equal?
if counts[0] > counts[1]:
return 0
else:
return 1
# Attribute must be an index
def entities_with_attribute_value(attribute, value, dataset):
subset = []
for entity in dataset:
if entity[attribute] == value:
subset.push_back(entity)
return subset
def entropy(dataset):
counts = { 0:0, 1:0 }
for entity in dataset:
counts[entity[0]]++
p0 = counts[0]/len(dataset)
p1 = counts[1]/len(dataset)
entropy = - p1 * log(p1, 2) - p2 * log(p2, 2)
return entropy
def choose_best_attribute(dataset, attributes_with_values)
best_gain = 0
best_attribute = None
for atrribute, values in attributes_with_values:
gain = entropy(dataset)
for value in values:
subset = entities_with_attribute_value(attribute, value, dataset)
gain -= (len(subset)/len(dataset)) * entropy(subset)
if best_gain < gain or best_attribute == None:
best_gain, best_attribute = gain, attribute
return attribute
|
Add decision tree helper functionsdef http://www.arthotel.bg/premium-paket-vino-i-lyubovmode(dataset):
counts = { 0: 0, 1:0 }
for entity in dataset:
counts[entity[0]]++
# What if they are equal?
if counts[0] > counts[1]:
return 0
else:
return 1
# Attribute must be an index
def entities_with_attribute_value(attribute, value, dataset):
subset = []
for entity in dataset:
if entity[attribute] == value:
subset.push_back(entity)
return subset
def entropy(dataset):
counts = { 0:0, 1:0 }
for entity in dataset:
counts[entity[0]]++
p0 = counts[0]/len(dataset)
p1 = counts[1]/len(dataset)
entropy = - p1 * log(p1, 2) - p2 * log(p2, 2)
return entropy
def choose_best_attribute(dataset, attributes_with_values)
best_gain = 0
best_attribute = None
for atrribute, values in attributes_with_values:
gain = entropy(dataset)
for value in values:
subset = entities_with_attribute_value(attribute, value, dataset)
gain -= (len(subset)/len(dataset)) * entropy(subset)
if best_gain < gain or best_attribute == None:
best_gain, best_attribute = gain, attribute
return attribute
|
<commit_before><commit_msg>Add decision tree helper functions<commit_after>def http://www.arthotel.bg/premium-paket-vino-i-lyubovmode(dataset):
counts = { 0: 0, 1:0 }
for entity in dataset:
counts[entity[0]]++
# What if they are equal?
if counts[0] > counts[1]:
return 0
else:
return 1
# Attribute must be an index
def entities_with_attribute_value(attribute, value, dataset):
subset = []
for entity in dataset:
if entity[attribute] == value:
subset.push_back(entity)
return subset
def entropy(dataset):
counts = { 0:0, 1:0 }
for entity in dataset:
counts[entity[0]]++
p0 = counts[0]/len(dataset)
p1 = counts[1]/len(dataset)
entropy = - p1 * log(p1, 2) - p2 * log(p2, 2)
return entropy
def choose_best_attribute(dataset, attributes_with_values)
best_gain = 0
best_attribute = None
for atrribute, values in attributes_with_values:
gain = entropy(dataset)
for value in values:
subset = entities_with_attribute_value(attribute, value, dataset)
gain -= (len(subset)/len(dataset)) * entropy(subset)
if best_gain < gain or best_attribute == None:
best_gain, best_attribute = gain, attribute
return attribute
|
|
f8ab7808643358d1c01a8d6d75a3ccdbf7b83fbd
|
app-page-generating-time.py
|
app-page-generating-time.py
|
# -*- coding: utf-8 -*-
##############################
# Reqirement:
# pip install web.py
##############################
import web
urls = (
r'/', 'index',
r'/notime', 'notime'
)
app = web.application(urls, globals())
class index:
def GET(self):
web.header('Content-Type', 'text/html')
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, click page source menu to get page generating time!</h1>
</body>
</html>
'''
class notime:
def GET(self):
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, there is no page generating time with wrong content-type!</h1>
</body>
</html>
'''
def is_html():
headers = [(k, v) for k, v in web.ctx.headers if 'content-type'==k.lower()]
if headers:
ctype = headers[0][1]
return ctype.startswith('text/html') or ctype.startswith('application/xhtml+xml')
import time
def page_generating_time(handler):
stime = time.time()
resp = handler()
stime = time.time() - stime
if is_html():
return resp + '\n\r<!-- generating time: %s seconds -->\n\r' % (str(stime),)
else:
return resp
app.add_processor(page_generating_time)
if __name__ == '__main__':
app.run()
# Local Variables: **
# comment-column: 56 **
# indent-tabs-mode: nil **
# python-indent: 4 **
# End: **
|
Use processor to implement page generating time
|
Use processor to implement page generating time
|
Python
|
apache-2.0
|
goncha/webpy-examples
|
Use processor to implement page generating time
|
# -*- coding: utf-8 -*-
##############################
# Reqirement:
# pip install web.py
##############################
import web
urls = (
r'/', 'index',
r'/notime', 'notime'
)
app = web.application(urls, globals())
class index:
def GET(self):
web.header('Content-Type', 'text/html')
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, click page source menu to get page generating time!</h1>
</body>
</html>
'''
class notime:
def GET(self):
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, there is no page generating time with wrong content-type!</h1>
</body>
</html>
'''
def is_html():
headers = [(k, v) for k, v in web.ctx.headers if 'content-type'==k.lower()]
if headers:
ctype = headers[0][1]
return ctype.startswith('text/html') or ctype.startswith('application/xhtml+xml')
import time
def page_generating_time(handler):
stime = time.time()
resp = handler()
stime = time.time() - stime
if is_html():
return resp + '\n\r<!-- generating time: %s seconds -->\n\r' % (str(stime),)
else:
return resp
app.add_processor(page_generating_time)
if __name__ == '__main__':
app.run()
# Local Variables: **
# comment-column: 56 **
# indent-tabs-mode: nil **
# python-indent: 4 **
# End: **
|
<commit_before><commit_msg>Use processor to implement page generating time<commit_after>
|
# -*- coding: utf-8 -*-
##############################
# Reqirement:
# pip install web.py
##############################
import web
urls = (
r'/', 'index',
r'/notime', 'notime'
)
app = web.application(urls, globals())
class index:
def GET(self):
web.header('Content-Type', 'text/html')
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, click page source menu to get page generating time!</h1>
</body>
</html>
'''
class notime:
def GET(self):
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, there is no page generating time with wrong content-type!</h1>
</body>
</html>
'''
def is_html():
headers = [(k, v) for k, v in web.ctx.headers if 'content-type'==k.lower()]
if headers:
ctype = headers[0][1]
return ctype.startswith('text/html') or ctype.startswith('application/xhtml+xml')
import time
def page_generating_time(handler):
stime = time.time()
resp = handler()
stime = time.time() - stime
if is_html():
return resp + '\n\r<!-- generating time: %s seconds -->\n\r' % (str(stime),)
else:
return resp
app.add_processor(page_generating_time)
if __name__ == '__main__':
app.run()
# Local Variables: **
# comment-column: 56 **
# indent-tabs-mode: nil **
# python-indent: 4 **
# End: **
|
Use processor to implement page generating time# -*- coding: utf-8 -*-
##############################
# Reqirement:
# pip install web.py
##############################
import web
urls = (
r'/', 'index',
r'/notime', 'notime'
)
app = web.application(urls, globals())
class index:
def GET(self):
web.header('Content-Type', 'text/html')
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, click page source menu to get page generating time!</h1>
</body>
</html>
'''
class notime:
def GET(self):
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, there is no page generating time with wrong content-type!</h1>
</body>
</html>
'''
def is_html():
headers = [(k, v) for k, v in web.ctx.headers if 'content-type'==k.lower()]
if headers:
ctype = headers[0][1]
return ctype.startswith('text/html') or ctype.startswith('application/xhtml+xml')
import time
def page_generating_time(handler):
stime = time.time()
resp = handler()
stime = time.time() - stime
if is_html():
return resp + '\n\r<!-- generating time: %s seconds -->\n\r' % (str(stime),)
else:
return resp
app.add_processor(page_generating_time)
if __name__ == '__main__':
app.run()
# Local Variables: **
# comment-column: 56 **
# indent-tabs-mode: nil **
# python-indent: 4 **
# End: **
|
<commit_before><commit_msg>Use processor to implement page generating time<commit_after># -*- coding: utf-8 -*-
##############################
# Reqirement:
# pip install web.py
##############################
import web
urls = (
r'/', 'index',
r'/notime', 'notime'
)
app = web.application(urls, globals())
class index:
def GET(self):
web.header('Content-Type', 'text/html')
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, click page source menu to get page generating time!</h1>
</body>
</html>
'''
class notime:
def GET(self):
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Web.py page generating time</title>
</head>
<body>
<h1>Hello, there is no page generating time with wrong content-type!</h1>
</body>
</html>
'''
def is_html():
headers = [(k, v) for k, v in web.ctx.headers if 'content-type'==k.lower()]
if headers:
ctype = headers[0][1]
return ctype.startswith('text/html') or ctype.startswith('application/xhtml+xml')
import time
def page_generating_time(handler):
stime = time.time()
resp = handler()
stime = time.time() - stime
if is_html():
return resp + '\n\r<!-- generating time: %s seconds -->\n\r' % (str(stime),)
else:
return resp
app.add_processor(page_generating_time)
if __name__ == '__main__':
app.run()
# Local Variables: **
# comment-column: 56 **
# indent-tabs-mode: nil **
# python-indent: 4 **
# End: **
|
|
bd6a5843b1e1ec38959655e93082e96c9cbf501d
|
test/lib/unzip_test.py
|
test/lib/unzip_test.py
|
# Run the following command to test:
#
# (in /usr/local/googkit)
# $ python -m {test_module_name}
#
# See also: http://docs.python.org/3.3/library/unittest.html#command-line-interface
#
# We cannot use unittest.mock on python 2.x!
# Please install the Mock module when you use Python 2.x.
#
# $ easy_install -U Mock
#
# See also: http://www.voidspace.org.uk/python/mock/#installing
import unittest
import os
try:
# Python 3.3 or later
import unittest.mock as mock
except ImportError:
# Python 2.x or 3.2-
import mock
import lib.unzip
import zipfile
class TestClone(unittest.TestCase):
def setUp(self):
self.ZipFile = mock.MagicMock()
zipfile.ZipFile = self.ZipFile
self.extractall = mock.MagicMock()
zipfile.ZipFile.return_value.__enter__.return_value.extractall = self.extractall
# run {{{
def test_run(self):
lib.unzip.run('/dir1/dir2/test.zip', '/dir1/dir2/dir3/dir4')
self.ZipFile.assert_called_once_with('/dir1/dir2/test.zip')
self.extractall.assert_called_once_with('/dir1/dir2/dir3/dir4')
# }}}
if __name__ == '__main__':
unittest.main()
# vim: fdm=marker
|
Add a test for lib.unzip
|
Add a test for lib.unzip
|
Python
|
mit
|
googkit/googkit,googkit/googkit,googkit/googkit
|
Add a test for lib.unzip
|
# Run the following command to test:
#
# (in /usr/local/googkit)
# $ python -m {test_module_name}
#
# See also: http://docs.python.org/3.3/library/unittest.html#command-line-interface
#
# We cannot use unittest.mock on python 2.x!
# Please install the Mock module when you use Python 2.x.
#
# $ easy_install -U Mock
#
# See also: http://www.voidspace.org.uk/python/mock/#installing
import unittest
import os
try:
# Python 3.3 or later
import unittest.mock as mock
except ImportError:
# Python 2.x or 3.2-
import mock
import lib.unzip
import zipfile
class TestClone(unittest.TestCase):
def setUp(self):
self.ZipFile = mock.MagicMock()
zipfile.ZipFile = self.ZipFile
self.extractall = mock.MagicMock()
zipfile.ZipFile.return_value.__enter__.return_value.extractall = self.extractall
# run {{{
def test_run(self):
lib.unzip.run('/dir1/dir2/test.zip', '/dir1/dir2/dir3/dir4')
self.ZipFile.assert_called_once_with('/dir1/dir2/test.zip')
self.extractall.assert_called_once_with('/dir1/dir2/dir3/dir4')
# }}}
if __name__ == '__main__':
unittest.main()
# vim: fdm=marker
|
<commit_before><commit_msg>Add a test for lib.unzip<commit_after>
|
# Run the following command to test:
#
# (in /usr/local/googkit)
# $ python -m {test_module_name}
#
# See also: http://docs.python.org/3.3/library/unittest.html#command-line-interface
#
# We cannot use unittest.mock on python 2.x!
# Please install the Mock module when you use Python 2.x.
#
# $ easy_install -U Mock
#
# See also: http://www.voidspace.org.uk/python/mock/#installing
import unittest
import os
try:
# Python 3.3 or later
import unittest.mock as mock
except ImportError:
# Python 2.x or 3.2-
import mock
import lib.unzip
import zipfile
class TestClone(unittest.TestCase):
def setUp(self):
self.ZipFile = mock.MagicMock()
zipfile.ZipFile = self.ZipFile
self.extractall = mock.MagicMock()
zipfile.ZipFile.return_value.__enter__.return_value.extractall = self.extractall
# run {{{
def test_run(self):
lib.unzip.run('/dir1/dir2/test.zip', '/dir1/dir2/dir3/dir4')
self.ZipFile.assert_called_once_with('/dir1/dir2/test.zip')
self.extractall.assert_called_once_with('/dir1/dir2/dir3/dir4')
# }}}
if __name__ == '__main__':
unittest.main()
# vim: fdm=marker
|
Add a test for lib.unzip# Run the following command to test:
#
# (in /usr/local/googkit)
# $ python -m {test_module_name}
#
# See also: http://docs.python.org/3.3/library/unittest.html#command-line-interface
#
# We cannot use unittest.mock on python 2.x!
# Please install the Mock module when you use Python 2.x.
#
# $ easy_install -U Mock
#
# See also: http://www.voidspace.org.uk/python/mock/#installing
import unittest
import os
try:
# Python 3.3 or later
import unittest.mock as mock
except ImportError:
# Python 2.x or 3.2-
import mock
import lib.unzip
import zipfile
class TestClone(unittest.TestCase):
def setUp(self):
self.ZipFile = mock.MagicMock()
zipfile.ZipFile = self.ZipFile
self.extractall = mock.MagicMock()
zipfile.ZipFile.return_value.__enter__.return_value.extractall = self.extractall
# run {{{
def test_run(self):
lib.unzip.run('/dir1/dir2/test.zip', '/dir1/dir2/dir3/dir4')
self.ZipFile.assert_called_once_with('/dir1/dir2/test.zip')
self.extractall.assert_called_once_with('/dir1/dir2/dir3/dir4')
# }}}
if __name__ == '__main__':
unittest.main()
# vim: fdm=marker
|
<commit_before><commit_msg>Add a test for lib.unzip<commit_after># Run the following command to test:
#
# (in /usr/local/googkit)
# $ python -m {test_module_name}
#
# See also: http://docs.python.org/3.3/library/unittest.html#command-line-interface
#
# We cannot use unittest.mock on python 2.x!
# Please install the Mock module when you use Python 2.x.
#
# $ easy_install -U Mock
#
# See also: http://www.voidspace.org.uk/python/mock/#installing
import unittest
import os
try:
# Python 3.3 or later
import unittest.mock as mock
except ImportError:
# Python 2.x or 3.2-
import mock
import lib.unzip
import zipfile
class TestClone(unittest.TestCase):
def setUp(self):
self.ZipFile = mock.MagicMock()
zipfile.ZipFile = self.ZipFile
self.extractall = mock.MagicMock()
zipfile.ZipFile.return_value.__enter__.return_value.extractall = self.extractall
# run {{{
def test_run(self):
lib.unzip.run('/dir1/dir2/test.zip', '/dir1/dir2/dir3/dir4')
self.ZipFile.assert_called_once_with('/dir1/dir2/test.zip')
self.extractall.assert_called_once_with('/dir1/dir2/dir3/dir4')
# }}}
if __name__ == '__main__':
unittest.main()
# vim: fdm=marker
|
|
db94954ccd03d27324ce142009093fdaeca83e98
|
tests/test_util.py
|
tests/test_util.py
|
"""
Tests for dhcp2nest.util
"""
from nose.tools import with_setup, eq_
from tempfile import TemporaryDirectory
import os.path
from dhcp2nest.util import follow_file
TEMPDIR = None
def follow_setup():
"""
Setup for follow_file tests
"""
global TEMPDIR
TEMPDIR = TemporaryDirectory()
def follow_teardown():
"""
Teardown for follow_file tests
"""
TEMPDIR.cleanup()
# #################
# follow_file tests
# #################
@with_setup(follow_setup, follow_teardown)
def test_basic_follow():
tmp_fn = os.path.join(TEMPDIR.name, 'basic.log')
with open(tmp_fn, 'w') as out_file:
q = follow_file(tmp_fn)
out_file.write('test line\n')
eq_(q.get(timeout=5), 'test line\n')
|
Add a basic test for follow_file()
|
Add a basic test for follow_file()
Signed-off-by: Jason Bernardino Alonso <f71c42a1353bbcdbe07e24c2a1c893f8ea1d05ee@hackorp.com>
|
Python
|
mit
|
jbalonso/dhcp2nest
|
Add a basic test for follow_file()
Signed-off-by: Jason Bernardino Alonso <f71c42a1353bbcdbe07e24c2a1c893f8ea1d05ee@hackorp.com>
|
"""
Tests for dhcp2nest.util
"""
from nose.tools import with_setup, eq_
from tempfile import TemporaryDirectory
import os.path
from dhcp2nest.util import follow_file
TEMPDIR = None
def follow_setup():
"""
Setup for follow_file tests
"""
global TEMPDIR
TEMPDIR = TemporaryDirectory()
def follow_teardown():
"""
Teardown for follow_file tests
"""
TEMPDIR.cleanup()
# #################
# follow_file tests
# #################
@with_setup(follow_setup, follow_teardown)
def test_basic_follow():
tmp_fn = os.path.join(TEMPDIR.name, 'basic.log')
with open(tmp_fn, 'w') as out_file:
q = follow_file(tmp_fn)
out_file.write('test line\n')
eq_(q.get(timeout=5), 'test line\n')
|
<commit_before><commit_msg>Add a basic test for follow_file()
Signed-off-by: Jason Bernardino Alonso <f71c42a1353bbcdbe07e24c2a1c893f8ea1d05ee@hackorp.com><commit_after>
|
"""
Tests for dhcp2nest.util
"""
from nose.tools import with_setup, eq_
from tempfile import TemporaryDirectory
import os.path
from dhcp2nest.util import follow_file
TEMPDIR = None
def follow_setup():
"""
Setup for follow_file tests
"""
global TEMPDIR
TEMPDIR = TemporaryDirectory()
def follow_teardown():
"""
Teardown for follow_file tests
"""
TEMPDIR.cleanup()
# #################
# follow_file tests
# #################
@with_setup(follow_setup, follow_teardown)
def test_basic_follow():
tmp_fn = os.path.join(TEMPDIR.name, 'basic.log')
with open(tmp_fn, 'w') as out_file:
q = follow_file(tmp_fn)
out_file.write('test line\n')
eq_(q.get(timeout=5), 'test line\n')
|
Add a basic test for follow_file()
Signed-off-by: Jason Bernardino Alonso <f71c42a1353bbcdbe07e24c2a1c893f8ea1d05ee@hackorp.com>"""
Tests for dhcp2nest.util
"""
from nose.tools import with_setup, eq_
from tempfile import TemporaryDirectory
import os.path
from dhcp2nest.util import follow_file
TEMPDIR = None
def follow_setup():
"""
Setup for follow_file tests
"""
global TEMPDIR
TEMPDIR = TemporaryDirectory()
def follow_teardown():
"""
Teardown for follow_file tests
"""
TEMPDIR.cleanup()
# #################
# follow_file tests
# #################
@with_setup(follow_setup, follow_teardown)
def test_basic_follow():
tmp_fn = os.path.join(TEMPDIR.name, 'basic.log')
with open(tmp_fn, 'w') as out_file:
q = follow_file(tmp_fn)
out_file.write('test line\n')
eq_(q.get(timeout=5), 'test line\n')
|
<commit_before><commit_msg>Add a basic test for follow_file()
Signed-off-by: Jason Bernardino Alonso <f71c42a1353bbcdbe07e24c2a1c893f8ea1d05ee@hackorp.com><commit_after>"""
Tests for dhcp2nest.util
"""
from nose.tools import with_setup, eq_
from tempfile import TemporaryDirectory
import os.path
from dhcp2nest.util import follow_file
TEMPDIR = None
def follow_setup():
"""
Setup for follow_file tests
"""
global TEMPDIR
TEMPDIR = TemporaryDirectory()
def follow_teardown():
"""
Teardown for follow_file tests
"""
TEMPDIR.cleanup()
# #################
# follow_file tests
# #################
@with_setup(follow_setup, follow_teardown)
def test_basic_follow():
tmp_fn = os.path.join(TEMPDIR.name, 'basic.log')
with open(tmp_fn, 'w') as out_file:
q = follow_file(tmp_fn)
out_file.write('test line\n')
eq_(q.get(timeout=5), 'test line\n')
|
|
f05a522301e09b7577959490f8527a226d289d57
|
profile_collection/startup/99-start-xpdacq.py
|
profile_collection/startup/99-start-xpdacq.py
|
#!/usr/bin/env python
##############################################################################
#
# xpdacq by Billinge Group
# Simon J. L. Billinge sb2896@columbia.edu
# (c) 2016 trustees of Columbia University in the City of
# New York.
# All rights reserved
#
# File coded by: Timothy Liu, Simon Billinge
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
'''Configuration of python object and gloabal constents
'''
import os
from xpdacq.glbl import glbl
from xpdacq.beamtimeSetup import _start_beamtime, _end_beamtime
from xpdacq.beamtime import *
from xpdacq.utils import export_userScriptsEtc, import_userScriptsEtc
try:
# if pe1c and other exits, i.e. at XPD
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
except NameError:
pass
from xpdacq.xpdacq import *
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('Initializing the XPD data acquisition simulation environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
#if there is a yml file in the normal place, then this was an existing experiment that was interrupted.
#if os.path.isdir(YAML_DIR):
bt_fname = os.path.join(YAML_DIR, "bt_bt.yml")
if os.path.isfile(bt_fname):
print("loading bt_bt.yml")
tmp = XPD()
bt = tmp.loadyamls()[0]
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
|
Put xpdAcq startup script under version control.
|
Put xpdAcq startup script under version control.
|
Python
|
bsd-2-clause
|
NSLS-II-XPD/ipython_ophyd,NSLS-II-XPD/ipython_ophyd
|
Put xpdAcq startup script under version control.
|
#!/usr/bin/env python
##############################################################################
#
# xpdacq by Billinge Group
# Simon J. L. Billinge sb2896@columbia.edu
# (c) 2016 trustees of Columbia University in the City of
# New York.
# All rights reserved
#
# File coded by: Timothy Liu, Simon Billinge
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
'''Configuration of python object and gloabal constents
'''
import os
from xpdacq.glbl import glbl
from xpdacq.beamtimeSetup import _start_beamtime, _end_beamtime
from xpdacq.beamtime import *
from xpdacq.utils import export_userScriptsEtc, import_userScriptsEtc
try:
# if pe1c and other exits, i.e. at XPD
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
except NameError:
pass
from xpdacq.xpdacq import *
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('Initializing the XPD data acquisition simulation environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
#if there is a yml file in the normal place, then this was an existing experiment that was interrupted.
#if os.path.isdir(YAML_DIR):
bt_fname = os.path.join(YAML_DIR, "bt_bt.yml")
if os.path.isfile(bt_fname):
print("loading bt_bt.yml")
tmp = XPD()
bt = tmp.loadyamls()[0]
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
|
<commit_before><commit_msg>Put xpdAcq startup script under version control.<commit_after>
|
#!/usr/bin/env python
##############################################################################
#
# xpdacq by Billinge Group
# Simon J. L. Billinge sb2896@columbia.edu
# (c) 2016 trustees of Columbia University in the City of
# New York.
# All rights reserved
#
# File coded by: Timothy Liu, Simon Billinge
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
'''Configuration of python object and gloabal constents
'''
import os
from xpdacq.glbl import glbl
from xpdacq.beamtimeSetup import _start_beamtime, _end_beamtime
from xpdacq.beamtime import *
from xpdacq.utils import export_userScriptsEtc, import_userScriptsEtc
try:
# if pe1c and other exits, i.e. at XPD
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
except NameError:
pass
from xpdacq.xpdacq import *
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('Initializing the XPD data acquisition simulation environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
#if there is a yml file in the normal place, then this was an existing experiment that was interrupted.
#if os.path.isdir(YAML_DIR):
bt_fname = os.path.join(YAML_DIR, "bt_bt.yml")
if os.path.isfile(bt_fname):
print("loading bt_bt.yml")
tmp = XPD()
bt = tmp.loadyamls()[0]
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
|
Put xpdAcq startup script under version control.#!/usr/bin/env python
##############################################################################
#
# xpdacq by Billinge Group
# Simon J. L. Billinge sb2896@columbia.edu
# (c) 2016 trustees of Columbia University in the City of
# New York.
# All rights reserved
#
# File coded by: Timothy Liu, Simon Billinge
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
'''Configuration of python object and gloabal constents
'''
import os
from xpdacq.glbl import glbl
from xpdacq.beamtimeSetup import _start_beamtime, _end_beamtime
from xpdacq.beamtime import *
from xpdacq.utils import export_userScriptsEtc, import_userScriptsEtc
try:
# if pe1c and other exits, i.e. at XPD
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
except NameError:
pass
from xpdacq.xpdacq import *
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('Initializing the XPD data acquisition simulation environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
#if there is a yml file in the normal place, then this was an existing experiment that was interrupted.
#if os.path.isdir(YAML_DIR):
bt_fname = os.path.join(YAML_DIR, "bt_bt.yml")
if os.path.isfile(bt_fname):
print("loading bt_bt.yml")
tmp = XPD()
bt = tmp.loadyamls()[0]
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
|
<commit_before><commit_msg>Put xpdAcq startup script under version control.<commit_after>#!/usr/bin/env python
##############################################################################
#
# xpdacq by Billinge Group
# Simon J. L. Billinge sb2896@columbia.edu
# (c) 2016 trustees of Columbia University in the City of
# New York.
# All rights reserved
#
# File coded by: Timothy Liu, Simon Billinge
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
'''Configuration of python object and gloabal constents
'''
import os
from xpdacq.glbl import glbl
from xpdacq.beamtimeSetup import _start_beamtime, _end_beamtime
from xpdacq.beamtime import *
from xpdacq.utils import export_userScriptsEtc, import_userScriptsEtc
try:
# if pe1c and other exits, i.e. at XPD
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
except NameError:
pass
from xpdacq.xpdacq import *
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('Initializing the XPD data acquisition simulation environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
#if there is a yml file in the normal place, then this was an existing experiment that was interrupted.
#if os.path.isdir(YAML_DIR):
bt_fname = os.path.join(YAML_DIR, "bt_bt.yml")
if os.path.isfile(bt_fname):
print("loading bt_bt.yml")
tmp = XPD()
bt = tmp.loadyamls()[0]
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
|
|
3d96b070af8c954c9132f3986e598a0c4f37f570
|
push/tests/test_urls.py
|
push/tests/test_urls.py
|
# coding=utf-8
from django.test.client import Client
from django.test import TestCase
from push.views import *
class UrlResolveTests(TestCase):
def test_url_index(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 302)
def test_url_index_page(self):
c = Client()
response = c.get('/', {'page': 2})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.content, '')
|
Add push urls unit test
|
[WIP] Add push urls unit test
|
Python
|
apache-2.0
|
nnsnodnb/django-mbaas,nnsnodnb/django-mbaas,nnsnodnb/django-mbaas
|
[WIP] Add push urls unit test
|
# coding=utf-8
from django.test.client import Client
from django.test import TestCase
from push.views import *
class UrlResolveTests(TestCase):
def test_url_index(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 302)
def test_url_index_page(self):
c = Client()
response = c.get('/', {'page': 2})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.content, '')
|
<commit_before><commit_msg>[WIP] Add push urls unit test<commit_after>
|
# coding=utf-8
from django.test.client import Client
from django.test import TestCase
from push.views import *
class UrlResolveTests(TestCase):
def test_url_index(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 302)
def test_url_index_page(self):
c = Client()
response = c.get('/', {'page': 2})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.content, '')
|
[WIP] Add push urls unit test# coding=utf-8
from django.test.client import Client
from django.test import TestCase
from push.views import *
class UrlResolveTests(TestCase):
def test_url_index(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 302)
def test_url_index_page(self):
c = Client()
response = c.get('/', {'page': 2})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.content, '')
|
<commit_before><commit_msg>[WIP] Add push urls unit test<commit_after># coding=utf-8
from django.test.client import Client
from django.test import TestCase
from push.views import *
class UrlResolveTests(TestCase):
def test_url_index(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 302)
def test_url_index_page(self):
c = Client()
response = c.get('/', {'page': 2})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.content, '')
|
|
57222e314e78cf5a2894112dc05892a08a297b52
|
virtual_devices.py
|
virtual_devices.py
|
class Output:
"""A device which prints whatever gets passed to it"""
def write(self, index, value):
print(value)
def read(self, index):
pass
class Input:
"""A device triggering on a read to return user data"""
def read(self, index):
f = input('I need input...')
try: # Hacky; Will be taken out once we get an actual parser running
return int(f)
except ValueError:
return f
def write(self, index, value):
pass
|
Add Input and Output virtual devices
|
Add Input and Output virtual devices
|
Python
|
bsd-3-clause
|
darbaga/simple_compiler
|
Add Input and Output virtual devices
|
class Output:
"""A device which prints whatever gets passed to it"""
def write(self, index, value):
print(value)
def read(self, index):
pass
class Input:
"""A device triggering on a read to return user data"""
def read(self, index):
f = input('I need input...')
try: # Hacky; Will be taken out once we get an actual parser running
return int(f)
except ValueError:
return f
def write(self, index, value):
pass
|
<commit_before><commit_msg>Add Input and Output virtual devices<commit_after>
|
class Output:
"""A device which prints whatever gets passed to it"""
def write(self, index, value):
print(value)
def read(self, index):
pass
class Input:
"""A device triggering on a read to return user data"""
def read(self, index):
f = input('I need input...')
try: # Hacky; Will be taken out once we get an actual parser running
return int(f)
except ValueError:
return f
def write(self, index, value):
pass
|
Add Input and Output virtual devicesclass Output:
"""A device which prints whatever gets passed to it"""
def write(self, index, value):
print(value)
def read(self, index):
pass
class Input:
"""A device triggering on a read to return user data"""
def read(self, index):
f = input('I need input...')
try: # Hacky; Will be taken out once we get an actual parser running
return int(f)
except ValueError:
return f
def write(self, index, value):
pass
|
<commit_before><commit_msg>Add Input and Output virtual devices<commit_after>class Output:
"""A device which prints whatever gets passed to it"""
def write(self, index, value):
print(value)
def read(self, index):
pass
class Input:
"""A device triggering on a read to return user data"""
def read(self, index):
f = input('I need input...')
try: # Hacky; Will be taken out once we get an actual parser running
return int(f)
except ValueError:
return f
def write(self, index, value):
pass
|
|
af7db45b89352421be479af74db0469b63cc8717
|
clock/users/migrations/0005_auto_20171208_1413.py
|
clock/users/migrations/0005_auto_20171208_1413.py
|
# Generated by Django 2.0 on 2017-12-08 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20171022_1456'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
Add migration for Django 2.0
|
Add migration for Django 2.0
|
Python
|
mit
|
mimischi/django-clock,mimischi/django-clock,mimischi/django-clock,mimischi/django-clock
|
Add migration for Django 2.0
|
# Generated by Django 2.0 on 2017-12-08 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20171022_1456'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
<commit_before><commit_msg>Add migration for Django 2.0<commit_after>
|
# Generated by Django 2.0 on 2017-12-08 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20171022_1456'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
Add migration for Django 2.0# Generated by Django 2.0 on 2017-12-08 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20171022_1456'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
<commit_before><commit_msg>Add migration for Django 2.0<commit_after># Generated by Django 2.0 on 2017-12-08 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20171022_1456'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
|
3765426ca0afa7e54e95d64831ff12e65c2a92e8
|
tests/integration/test_main.py
|
tests/integration/test_main.py
|
"""
"""
from zazo import main
def test_main(capsys):
retval = main()
assert retval == 0
assert capsys.readouterr() == ("Hello World!\n", "")
|
Add an integration test to satisfy Travis
|
Add an integration test to satisfy Travis
|
Python
|
mit
|
pradyunsg/zazo,pradyunsg/zazo
|
Add an integration test to satisfy Travis
|
"""
"""
from zazo import main
def test_main(capsys):
retval = main()
assert retval == 0
assert capsys.readouterr() == ("Hello World!\n", "")
|
<commit_before><commit_msg>Add an integration test to satisfy Travis<commit_after>
|
"""
"""
from zazo import main
def test_main(capsys):
retval = main()
assert retval == 0
assert capsys.readouterr() == ("Hello World!\n", "")
|
Add an integration test to satisfy Travis"""
"""
from zazo import main
def test_main(capsys):
retval = main()
assert retval == 0
assert capsys.readouterr() == ("Hello World!\n", "")
|
<commit_before><commit_msg>Add an integration test to satisfy Travis<commit_after>"""
"""
from zazo import main
def test_main(capsys):
retval = main()
assert retval == 0
assert capsys.readouterr() == ("Hello World!\n", "")
|
|
8a1eb115fec1345587d75b445c9c6cc9bc1ac1a1
|
test/test_logger.py
|
test/test_logger.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablereader import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger
|
Add test cases for the logger
|
Python
|
mit
|
thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader
|
Add test cases for the logger
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablereader import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after>
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablereader import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablereader import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after># encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablereader import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
|
88cfa6ef3d04e9c2bb8bb0757479d1d191b27059
|
tests/test_enums.py
|
tests/test_enums.py
|
"""
test_enums
~~~~~~~~~~
Contains tests for the :mod:`~adbwp.enums` module.
"""
import pytest
from adbwp import enums
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.Command, (0x434e5953, 0x4e584e43, 0x48545541,
0x4e45504f, 0x59414b4f, 0x45534c43,
0x45545257))))
def test_command_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.Command` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.AuthType, (1, 2, 3))))
def test_auth_type_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.AuthType` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.CommandResponse, ('OKAY', 'FAIL'))))
def test_command_response_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.CommandResponse` defines :meth:`~adbwp.enums.CommandResponse.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.SystemType, ('bootloader', 'device', 'host'))))
def test_system_type_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.SystemType` defines :meth:`~adbwp.enums.SystemType.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
|
Add tests for enums module.
|
Add tests for enums module.
|
Python
|
apache-2.0
|
adbpy/wire-protocol
|
Add tests for enums module.
|
"""
test_enums
~~~~~~~~~~
Contains tests for the :mod:`~adbwp.enums` module.
"""
import pytest
from adbwp import enums
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.Command, (0x434e5953, 0x4e584e43, 0x48545541,
0x4e45504f, 0x59414b4f, 0x45534c43,
0x45545257))))
def test_command_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.Command` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.AuthType, (1, 2, 3))))
def test_auth_type_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.AuthType` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.CommandResponse, ('OKAY', 'FAIL'))))
def test_command_response_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.CommandResponse` defines :meth:`~adbwp.enums.CommandResponse.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.SystemType, ('bootloader', 'device', 'host'))))
def test_system_type_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.SystemType` defines :meth:`~adbwp.enums.SystemType.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
|
<commit_before><commit_msg>Add tests for enums module.<commit_after>
|
"""
test_enums
~~~~~~~~~~
Contains tests for the :mod:`~adbwp.enums` module.
"""
import pytest
from adbwp import enums
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.Command, (0x434e5953, 0x4e584e43, 0x48545541,
0x4e45504f, 0x59414b4f, 0x45534c43,
0x45545257))))
def test_command_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.Command` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.AuthType, (1, 2, 3))))
def test_auth_type_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.AuthType` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.CommandResponse, ('OKAY', 'FAIL'))))
def test_command_response_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.CommandResponse` defines :meth:`~adbwp.enums.CommandResponse.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.SystemType, ('bootloader', 'device', 'host'))))
def test_system_type_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.SystemType` defines :meth:`~adbwp.enums.SystemType.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
|
Add tests for enums module."""
test_enums
~~~~~~~~~~
Contains tests for the :mod:`~adbwp.enums` module.
"""
import pytest
from adbwp import enums
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.Command, (0x434e5953, 0x4e584e43, 0x48545541,
0x4e45504f, 0x59414b4f, 0x45534c43,
0x45545257))))
def test_command_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.Command` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.AuthType, (1, 2, 3))))
def test_auth_type_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.AuthType` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.CommandResponse, ('OKAY', 'FAIL'))))
def test_command_response_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.CommandResponse` defines :meth:`~adbwp.enums.CommandResponse.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.SystemType, ('bootloader', 'device', 'host'))))
def test_system_type_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.SystemType` defines :meth:`~adbwp.enums.SystemType.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
|
<commit_before><commit_msg>Add tests for enums module.<commit_after>"""
test_enums
~~~~~~~~~~
Contains tests for the :mod:`~adbwp.enums` module.
"""
import pytest
from adbwp import enums
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.Command, (0x434e5953, 0x4e584e43, 0x48545541,
0x4e45504f, 0x59414b4f, 0x45534c43,
0x45545257))))
def test_command_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.Command` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'int_value'), list(zip(enums.AuthType, (1, 2, 3))))
def test_auth_type_values_unchanged(enum_value, int_value):
"""
Assert that the :class:`~adbwp.enums.AuthType` integer values remain unchanged. The goal of this
test is to guard against an _accidental_ value change as it will require the test to be modified to pass.
"""
assert enum_value.value == enum_value == int_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.CommandResponse, ('OKAY', 'FAIL'))))
def test_command_response_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.CommandResponse` defines :meth:`~adbwp.enums.CommandResponse.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
@pytest.mark.parametrize(('enum_value', 'str_value'), list(zip(enums.SystemType, ('bootloader', 'device', 'host'))))
def test_system_type_str_returns_value(enum_value, str_value):
"""
Assert that :class:`~adbwp.enums.SystemType` defines :meth:`~adbwp.enums.SystemType.__str__`
and returns the individual enum value.
"""
assert enum_value.value == str(enum_value) == str_value
|
|
a61be6f1106764f3da4c66f37ef8a768208ff9b3
|
glamkit_collections/contrib/work_creator/migrations/0008_auto_20161114_1240.py
|
glamkit_collections/contrib/work_creator/migrations/0008_auto_20161114_1240.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0007_auto_20161028_1904'),
]
operations = [
migrations.AlterField(
model_name='workbase',
name='date_display',
field=models.CharField(blank=True, verbose_name=b'Date of creation (display)', max_length=255, help_text=b'Displays date as formatted for labels and reports, rather than sorting.'),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=models.CharField(null=True, blank=True, verbose_name=b'Date of creation (EDTF)', max_length=64, help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, as best as we could parse from the display date, e.g. '1855/1860-06-04'"),
),
]
|
Change label for date fields
|
Change label for date fields
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/glamkit-collections,ic-labs/django-icekit,ic-labs/glamkit-collections
|
Change label for date fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0007_auto_20161028_1904'),
]
operations = [
migrations.AlterField(
model_name='workbase',
name='date_display',
field=models.CharField(blank=True, verbose_name=b'Date of creation (display)', max_length=255, help_text=b'Displays date as formatted for labels and reports, rather than sorting.'),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=models.CharField(null=True, blank=True, verbose_name=b'Date of creation (EDTF)', max_length=64, help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, as best as we could parse from the display date, e.g. '1855/1860-06-04'"),
),
]
|
<commit_before><commit_msg>Change label for date fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0007_auto_20161028_1904'),
]
operations = [
migrations.AlterField(
model_name='workbase',
name='date_display',
field=models.CharField(blank=True, verbose_name=b'Date of creation (display)', max_length=255, help_text=b'Displays date as formatted for labels and reports, rather than sorting.'),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=models.CharField(null=True, blank=True, verbose_name=b'Date of creation (EDTF)', max_length=64, help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, as best as we could parse from the display date, e.g. '1855/1860-06-04'"),
),
]
|
Change label for date fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0007_auto_20161028_1904'),
]
operations = [
migrations.AlterField(
model_name='workbase',
name='date_display',
field=models.CharField(blank=True, verbose_name=b'Date of creation (display)', max_length=255, help_text=b'Displays date as formatted for labels and reports, rather than sorting.'),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=models.CharField(null=True, blank=True, verbose_name=b'Date of creation (EDTF)', max_length=64, help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, as best as we could parse from the display date, e.g. '1855/1860-06-04'"),
),
]
|
<commit_before><commit_msg>Change label for date fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0007_auto_20161028_1904'),
]
operations = [
migrations.AlterField(
model_name='workbase',
name='date_display',
field=models.CharField(blank=True, verbose_name=b'Date of creation (display)', max_length=255, help_text=b'Displays date as formatted for labels and reports, rather than sorting.'),
),
migrations.AlterField(
model_name='workbase',
name='date_edtf',
field=models.CharField(null=True, blank=True, verbose_name=b'Date of creation (EDTF)', max_length=64, help_text=b"an <a href='http://www.loc.gov/standards/datetime/implementations.html'>EDTF</a>-formatted date, as best as we could parse from the display date, e.g. '1855/1860-06-04'"),
),
]
|
|
39d81fe577d6201b320e84617317170ac5264394
|
genealogio/migrations/0022_auto_20160228_1652.py
|
genealogio/migrations/0022_auto_20160228_1652.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0021_family_comments'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name', 'start_date', 'id'), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
Add migration for changing ordering of Family field.
|
Add migration for changing ordering of Family field.
|
Python
|
bsd-3-clause
|
ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio
|
Add migration for changing ordering of Family field.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0021_family_comments'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name', 'start_date', 'id'), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
<commit_before><commit_msg>Add migration for changing ordering of Family field.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0021_family_comments'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name', 'start_date', 'id'), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
Add migration for changing ordering of Family field.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0021_family_comments'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name', 'start_date', 'id'), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
<commit_before><commit_msg>Add migration for changing ordering of Family field.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0021_family_comments'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name', 'start_date', 'id'), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
|
f67a6748b7282cbc256423b3c98c3321d4c028c0
|
FrontGateManager.py
|
FrontGateManager.py
|
#!/usr/bin/python
import urllib2
import RPi.GPIO as GPIO
from time import sleep
class Sonos:
apiPort = 5005
apiHost = '127.0.0.1'
def say(self, room, str, lang = 'en-us'):
path = 'http://{host}:{port}/{room}/say/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
room = urllib2.quote(room),
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def sayAll(self, str, lang = 'en-us'):
path = 'http://{host}:{port}/sayall/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def __sonosRequest(self, url):
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
print e.reason
class FrontGateState:
open = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.open = False
else:
self.open = True
if self.open != self.lastState:
if self.open:
FrontGateManager.sonos.sayAll('The gate is now open')
else:
FrontGateManager.sonos.sayAll('The gate is now closed')
self.lastState = self.open
class SkyBell:
pressed = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.pressed = False
else:
self.pressed = True
if self.pressed != self.lastState:
if self.pressed:
FrontGateManager.sonos.sayAll('There is someone at the door')
self.lastState = self.pressed
class FrontGateManager:
sonos = Sonos()
def init(self):
self.frontGateState = FrontGateState(22)
self.skyBell = SkyBell(17)
try:
while True:
self.frontGateState.run()
self.skyBell.run()
sleep(0.5)
finally:
GPIO.cleanup()
if __name__ == "__main__":
frontGateManager = FrontGateManager()
frontGateManager.init()
|
Add Front Gate Manager python file
|
Add Front Gate Manager python file
|
Python
|
mit
|
tdlm/front_gate_pi
|
Add Front Gate Manager python file
|
#!/usr/bin/python
import urllib2
import RPi.GPIO as GPIO
from time import sleep
class Sonos:
apiPort = 5005
apiHost = '127.0.0.1'
def say(self, room, str, lang = 'en-us'):
path = 'http://{host}:{port}/{room}/say/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
room = urllib2.quote(room),
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def sayAll(self, str, lang = 'en-us'):
path = 'http://{host}:{port}/sayall/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def __sonosRequest(self, url):
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
print e.reason
class FrontGateState:
open = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.open = False
else:
self.open = True
if self.open != self.lastState:
if self.open:
FrontGateManager.sonos.sayAll('The gate is now open')
else:
FrontGateManager.sonos.sayAll('The gate is now closed')
self.lastState = self.open
class SkyBell:
pressed = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.pressed = False
else:
self.pressed = True
if self.pressed != self.lastState:
if self.pressed:
FrontGateManager.sonos.sayAll('There is someone at the door')
self.lastState = self.pressed
class FrontGateManager:
sonos = Sonos()
def init(self):
self.frontGateState = FrontGateState(22)
self.skyBell = SkyBell(17)
try:
while True:
self.frontGateState.run()
self.skyBell.run()
sleep(0.5)
finally:
GPIO.cleanup()
if __name__ == "__main__":
frontGateManager = FrontGateManager()
frontGateManager.init()
|
<commit_before><commit_msg>Add Front Gate Manager python file<commit_after>
|
#!/usr/bin/python
import urllib2
import RPi.GPIO as GPIO
from time import sleep
class Sonos:
apiPort = 5005
apiHost = '127.0.0.1'
def say(self, room, str, lang = 'en-us'):
path = 'http://{host}:{port}/{room}/say/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
room = urllib2.quote(room),
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def sayAll(self, str, lang = 'en-us'):
path = 'http://{host}:{port}/sayall/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def __sonosRequest(self, url):
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
print e.reason
class FrontGateState:
open = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.open = False
else:
self.open = True
if self.open != self.lastState:
if self.open:
FrontGateManager.sonos.sayAll('The gate is now open')
else:
FrontGateManager.sonos.sayAll('The gate is now closed')
self.lastState = self.open
class SkyBell:
pressed = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.pressed = False
else:
self.pressed = True
if self.pressed != self.lastState:
if self.pressed:
FrontGateManager.sonos.sayAll('There is someone at the door')
self.lastState = self.pressed
class FrontGateManager:
sonos = Sonos()
def init(self):
self.frontGateState = FrontGateState(22)
self.skyBell = SkyBell(17)
try:
while True:
self.frontGateState.run()
self.skyBell.run()
sleep(0.5)
finally:
GPIO.cleanup()
if __name__ == "__main__":
frontGateManager = FrontGateManager()
frontGateManager.init()
|
Add Front Gate Manager python file#!/usr/bin/python
import urllib2
import RPi.GPIO as GPIO
from time import sleep
class Sonos:
apiPort = 5005
apiHost = '127.0.0.1'
def say(self, room, str, lang = 'en-us'):
path = 'http://{host}:{port}/{room}/say/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
room = urllib2.quote(room),
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def sayAll(self, str, lang = 'en-us'):
path = 'http://{host}:{port}/sayall/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def __sonosRequest(self, url):
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
print e.reason
class FrontGateState:
open = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.open = False
else:
self.open = True
if self.open != self.lastState:
if self.open:
FrontGateManager.sonos.sayAll('The gate is now open')
else:
FrontGateManager.sonos.sayAll('The gate is now closed')
self.lastState = self.open
class SkyBell:
pressed = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.pressed = False
else:
self.pressed = True
if self.pressed != self.lastState:
if self.pressed:
FrontGateManager.sonos.sayAll('There is someone at the door')
self.lastState = self.pressed
class FrontGateManager:
sonos = Sonos()
def init(self):
self.frontGateState = FrontGateState(22)
self.skyBell = SkyBell(17)
try:
while True:
self.frontGateState.run()
self.skyBell.run()
sleep(0.5)
finally:
GPIO.cleanup()
if __name__ == "__main__":
frontGateManager = FrontGateManager()
frontGateManager.init()
|
<commit_before><commit_msg>Add Front Gate Manager python file<commit_after>#!/usr/bin/python
import urllib2
import RPi.GPIO as GPIO
from time import sleep
class Sonos:
apiPort = 5005
apiHost = '127.0.0.1'
def say(self, room, str, lang = 'en-us'):
path = 'http://{host}:{port}/{room}/say/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
room = urllib2.quote(room),
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def sayAll(self, str, lang = 'en-us'):
path = 'http://{host}:{port}/sayall/{str}/{lang}'.format(
host = self.apiHost,
port = self.apiPort,
str = urllib2.quote(str),
lang = lang
)
print path
self.__sonosRequest(path)
def __sonosRequest(self, url):
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
print e.reason
class FrontGateState:
open = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.open = False
else:
self.open = True
if self.open != self.lastState:
if self.open:
FrontGateManager.sonos.sayAll('The gate is now open')
else:
FrontGateManager.sonos.sayAll('The gate is now closed')
self.lastState = self.open
class SkyBell:
pressed = False
lastState = False
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def run(self):
if GPIO.input(self.pin):
self.pressed = False
else:
self.pressed = True
if self.pressed != self.lastState:
if self.pressed:
FrontGateManager.sonos.sayAll('There is someone at the door')
self.lastState = self.pressed
class FrontGateManager:
sonos = Sonos()
def init(self):
self.frontGateState = FrontGateState(22)
self.skyBell = SkyBell(17)
try:
while True:
self.frontGateState.run()
self.skyBell.run()
sleep(0.5)
finally:
GPIO.cleanup()
if __name__ == "__main__":
frontGateManager = FrontGateManager()
frontGateManager.init()
|
|
07d01ecba7eaafc71c07cdf1b907201477496846
|
pygments/styles/igor.py
|
pygments/styles/igor.py
|
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class IgorStyle(Style):
default_style = ""
styles = {
Comment: 'italic #FF0000',
Keyword: '#0000FF',
Name.Function: '#C34E00',
Name.Class: '#007575',
String: '#009C00'
}
|
Add custom style which imitates the offical coloring
|
Add custom style which imitates the offical coloring
|
Python
|
bsd-2-clause
|
spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments
|
Add custom style which imitates the offical coloring
|
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class IgorStyle(Style):
default_style = ""
styles = {
Comment: 'italic #FF0000',
Keyword: '#0000FF',
Name.Function: '#C34E00',
Name.Class: '#007575',
String: '#009C00'
}
|
<commit_before><commit_msg>Add custom style which imitates the offical coloring<commit_after>
|
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class IgorStyle(Style):
default_style = ""
styles = {
Comment: 'italic #FF0000',
Keyword: '#0000FF',
Name.Function: '#C34E00',
Name.Class: '#007575',
String: '#009C00'
}
|
Add custom style which imitates the offical coloringfrom pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class IgorStyle(Style):
default_style = ""
styles = {
Comment: 'italic #FF0000',
Keyword: '#0000FF',
Name.Function: '#C34E00',
Name.Class: '#007575',
String: '#009C00'
}
|
<commit_before><commit_msg>Add custom style which imitates the offical coloring<commit_after>from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class IgorStyle(Style):
default_style = ""
styles = {
Comment: 'italic #FF0000',
Keyword: '#0000FF',
Name.Function: '#C34E00',
Name.Class: '#007575',
String: '#009C00'
}
|
|
232c0a600946e2a679947fe638938e56d2fa7709
|
vint/ast/parsing.py
|
vint/ast/parsing.py
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
Add TOPLEVEL pos to unify node pos interface
|
Add TOPLEVEL pos to unify node pos interface
|
Python
|
mit
|
RianFuro/vint,Kuniwak/vint,RianFuro/vint,Kuniwak/vint
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
Add TOPLEVEL pos to unify node pos interface
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
<commit_before>import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
<commit_msg>Add TOPLEVEL pos to unify node pos interface<commit_after>
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
Add TOPLEVEL pos to unify node pos interfaceimport extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
<commit_before>import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
<commit_msg>Add TOPLEVEL pos to unify node pos interface<commit_after>import extlib.vimlparser
class Parser(object):
def __init__(self, plugins=None):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins or []
def parse(self, string):
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = extlib.vimlparser.StringReader(lines)
parser = extlib.vimlparser.VimLParser()
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_file(self, file_path):
""" Parse vim script file and return the AST. """
with file_path.open() as f:
return self.parse(f.read())
|
f12ba8623078322a57f23c55c9ce44883d22d18b
|
py/binary-search-tree-iterator.py
|
py/binary-search-tree-iterator.py
|
# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.cache = None
self.g = BSTIterator.inOrder(root)
def hasNext(self):
"""
:rtype: bool
"""
if self.cache:
return True
try:
self.cache = self.g.next()
except StopIteration:
return False
return True
@staticmethod
def inOrder(node):
if node:
if node.left:
for n in BSTIterator.inOrder(node.left):
yield n
yield node
if node.right:
for n in BSTIterator.inOrder(node.right):
yield n
def next(self):
"""
:rtype: int
"""
if self.cache or self.hasNext():
ret = self.cache.val
self.cache = None
return ret
|
Add py solution for Binary Search Tree Iterator
|
Add py solution for Binary Search Tree Iterator
Binary Search Tree Iterator: https://leetcode.com/problems/binary-search-tree-iterator/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for Binary Search Tree Iterator
Binary Search Tree Iterator: https://leetcode.com/problems/binary-search-tree-iterator/
|
# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.cache = None
self.g = BSTIterator.inOrder(root)
def hasNext(self):
"""
:rtype: bool
"""
if self.cache:
return True
try:
self.cache = self.g.next()
except StopIteration:
return False
return True
@staticmethod
def inOrder(node):
if node:
if node.left:
for n in BSTIterator.inOrder(node.left):
yield n
yield node
if node.right:
for n in BSTIterator.inOrder(node.right):
yield n
def next(self):
"""
:rtype: int
"""
if self.cache or self.hasNext():
ret = self.cache.val
self.cache = None
return ret
|
<commit_before><commit_msg>Add py solution for Binary Search Tree Iterator
Binary Search Tree Iterator: https://leetcode.com/problems/binary-search-tree-iterator/<commit_after>
|
# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.cache = None
self.g = BSTIterator.inOrder(root)
def hasNext(self):
"""
:rtype: bool
"""
if self.cache:
return True
try:
self.cache = self.g.next()
except StopIteration:
return False
return True
@staticmethod
def inOrder(node):
if node:
if node.left:
for n in BSTIterator.inOrder(node.left):
yield n
yield node
if node.right:
for n in BSTIterator.inOrder(node.right):
yield n
def next(self):
"""
:rtype: int
"""
if self.cache or self.hasNext():
ret = self.cache.val
self.cache = None
return ret
|
Add py solution for Binary Search Tree Iterator
Binary Search Tree Iterator: https://leetcode.com/problems/binary-search-tree-iterator/# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.cache = None
self.g = BSTIterator.inOrder(root)
def hasNext(self):
"""
:rtype: bool
"""
if self.cache:
return True
try:
self.cache = self.g.next()
except StopIteration:
return False
return True
@staticmethod
def inOrder(node):
if node:
if node.left:
for n in BSTIterator.inOrder(node.left):
yield n
yield node
if node.right:
for n in BSTIterator.inOrder(node.right):
yield n
def next(self):
"""
:rtype: int
"""
if self.cache or self.hasNext():
ret = self.cache.val
self.cache = None
return ret
|
<commit_before><commit_msg>Add py solution for Binary Search Tree Iterator
Binary Search Tree Iterator: https://leetcode.com/problems/binary-search-tree-iterator/<commit_after># Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.cache = None
self.g = BSTIterator.inOrder(root)
def hasNext(self):
"""
:rtype: bool
"""
if self.cache:
return True
try:
self.cache = self.g.next()
except StopIteration:
return False
return True
@staticmethod
def inOrder(node):
if node:
if node.left:
for n in BSTIterator.inOrder(node.left):
yield n
yield node
if node.right:
for n in BSTIterator.inOrder(node.right):
yield n
def next(self):
"""
:rtype: int
"""
if self.cache or self.hasNext():
ret = self.cache.val
self.cache = None
return ret
|
|
8484bfe2870968e43c46b764688202d3c74213c0
|
ooni/tests/test_reporter.py
|
ooni/tests/test_reporter.py
|
import yaml
import time
from twisted.trial import unittest
from ooni.reporter import YAMLReporter
class MockTest(object):
_start_time = time.time()
report = {'report_content': 'ham'}
input = 'spam'
class TestYAMLReporter(unittest.TestCase):
def setUp(self):
pass
def test_write_report(self):
test_details = {
'test_name': 'spam',
'test_version': '1.0'
}
test = MockTest()
y_reporter = YAMLReporter(test_details)
y_reporter.createReport()
y_reporter.testDone(test, 'spam')
with open(y_reporter.report_path) as f:
report_entries = yaml.safe_load_all(f)
# Check for keys in header
entry = report_entries.next()
assert all(x in entry for x in ['test_name', 'test_version'])
entry = report_entries.next()
# Check for first entry of report
assert all(x in entry \
for x in ['report_content', 'input', \
'test_name', 'test_started', \
'test_runtime'])
|
Add unittest for the YAMLReporter
|
Add unittest for the YAMLReporter
|
Python
|
bsd-2-clause
|
Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe
|
Add unittest for the YAMLReporter
|
import yaml
import time
from twisted.trial import unittest
from ooni.reporter import YAMLReporter
class MockTest(object):
_start_time = time.time()
report = {'report_content': 'ham'}
input = 'spam'
class TestYAMLReporter(unittest.TestCase):
def setUp(self):
pass
def test_write_report(self):
test_details = {
'test_name': 'spam',
'test_version': '1.0'
}
test = MockTest()
y_reporter = YAMLReporter(test_details)
y_reporter.createReport()
y_reporter.testDone(test, 'spam')
with open(y_reporter.report_path) as f:
report_entries = yaml.safe_load_all(f)
# Check for keys in header
entry = report_entries.next()
assert all(x in entry for x in ['test_name', 'test_version'])
entry = report_entries.next()
# Check for first entry of report
assert all(x in entry \
for x in ['report_content', 'input', \
'test_name', 'test_started', \
'test_runtime'])
|
<commit_before><commit_msg>Add unittest for the YAMLReporter<commit_after>
|
import yaml
import time
from twisted.trial import unittest
from ooni.reporter import YAMLReporter
class MockTest(object):
_start_time = time.time()
report = {'report_content': 'ham'}
input = 'spam'
class TestYAMLReporter(unittest.TestCase):
def setUp(self):
pass
def test_write_report(self):
test_details = {
'test_name': 'spam',
'test_version': '1.0'
}
test = MockTest()
y_reporter = YAMLReporter(test_details)
y_reporter.createReport()
y_reporter.testDone(test, 'spam')
with open(y_reporter.report_path) as f:
report_entries = yaml.safe_load_all(f)
# Check for keys in header
entry = report_entries.next()
assert all(x in entry for x in ['test_name', 'test_version'])
entry = report_entries.next()
# Check for first entry of report
assert all(x in entry \
for x in ['report_content', 'input', \
'test_name', 'test_started', \
'test_runtime'])
|
Add unittest for the YAMLReporterimport yaml
import time
from twisted.trial import unittest
from ooni.reporter import YAMLReporter
class MockTest(object):
_start_time = time.time()
report = {'report_content': 'ham'}
input = 'spam'
class TestYAMLReporter(unittest.TestCase):
def setUp(self):
pass
def test_write_report(self):
test_details = {
'test_name': 'spam',
'test_version': '1.0'
}
test = MockTest()
y_reporter = YAMLReporter(test_details)
y_reporter.createReport()
y_reporter.testDone(test, 'spam')
with open(y_reporter.report_path) as f:
report_entries = yaml.safe_load_all(f)
# Check for keys in header
entry = report_entries.next()
assert all(x in entry for x in ['test_name', 'test_version'])
entry = report_entries.next()
# Check for first entry of report
assert all(x in entry \
for x in ['report_content', 'input', \
'test_name', 'test_started', \
'test_runtime'])
|
<commit_before><commit_msg>Add unittest for the YAMLReporter<commit_after>import yaml
import time
from twisted.trial import unittest
from ooni.reporter import YAMLReporter
class MockTest(object):
_start_time = time.time()
report = {'report_content': 'ham'}
input = 'spam'
class TestYAMLReporter(unittest.TestCase):
def setUp(self):
pass
def test_write_report(self):
test_details = {
'test_name': 'spam',
'test_version': '1.0'
}
test = MockTest()
y_reporter = YAMLReporter(test_details)
y_reporter.createReport()
y_reporter.testDone(test, 'spam')
with open(y_reporter.report_path) as f:
report_entries = yaml.safe_load_all(f)
# Check for keys in header
entry = report_entries.next()
assert all(x in entry for x in ['test_name', 'test_version'])
entry = report_entries.next()
# Check for first entry of report
assert all(x in entry \
for x in ['report_content', 'input', \
'test_name', 'test_started', \
'test_runtime'])
|
|
7c1807c7981ab6d934efcf4e3044ee502da81bc5
|
scripts/fix-boost-dylib-id.py
|
scripts/fix-boost-dylib-id.py
|
#!/usr/bin/env python3
# Copyright (c) 2013, Ruslan Baratov
# All rights reserved.
import argparse
import glob
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Fix boost dynamic libraries id')
parser.add_argument(
'--install-dir', required=True, help='Libraries install directory'
)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
if not os.path.exists(install_dir):
sys.exit("Directory '{}' not found".format(install_dir))
pattern = install_dir + '/libboost_*.dylib'
for fix_dylib in glob.glob(pattern):
subprocess.check_call(['install_name_tool', '-id', fix_dylib, fix_dylib])
output = subprocess.check_output(
['otool', '-L', fix_dylib], universal_newlines=True
)
dependent_list = output.split('\n')[:-1] # remove last
for dependent_dylib in dependent_list:
if dependent_dylib.startswith('\tlibboost_'):
libname = re.sub(r'\t(libboost_.*.dylib) .*', r'\1', dependent_dylib)
libname_path = install_dir + '/' + libname
subprocess.check_call(
['install_name_tool', '-change', libname, libname_path, fix_dylib]
)
|
Add script: fix boost dylib id on Mac OSX
|
Add script: fix boost dylib id on Mac OSX [skip ci]
|
Python
|
bsd-2-clause
|
dvirtz/hunter,NeroBurner/hunter,ulricheck/hunter,x10mind/hunter,sumedhghaisas/hunter,isaachier/hunter,ulricheck/hunter,tatraian/hunter,daminetreg/hunter,ErniBrown/hunter,RomanYudintsev/hunter,ikliashchou/hunter,mchiasson/hunter,ThomasFeher/hunter,dvirtz/hunter,madmongo1/hunter,stohrendorf/hunter,madmongo1/hunter,mchiasson/hunter,RomanYudintsev/hunter,jkhoogland/hunter,x10mind/hunter,xsacha/hunter,dmpriso/hunter,caseymcc/hunter,ruslo/hunter,shekharhimanshu/hunter,alamaison/hunter,ikliashchou/hunter,shekharhimanshu/hunter,Knitschi/hunter,zhuhaow/hunter,ulricheck/hunter,akalsi87/hunter,dan-42/hunter,dan-42/hunter,fire-hunter/hunter,fwinnen/hunter,fwinnen/hunter,designerror/hunter,tatraian/hunter,stohrendorf/hunter,mchiasson/hunter,ruslo/hunter,sumedhghaisas/hunter,caseymcc/hunter,RomanYudintsev/hunter,lucmichalski/hunter,jkhoogland/hunter,caseymcc/hunter,isaachier/hunter,ErniBrown/hunter,vdsrd/hunter,mchiasson/hunter,lucmichalski/hunter,pretyman/hunter,dmpriso/hunter,dan-42/hunter,headupinclouds/hunter,vdsrd/hunter,ledocc/hunter,ingenue/hunter,akalsi87/hunter,dan-42/hunter,dmpriso/hunter,sumedhghaisas/hunter,ikliashchou/hunter,x10mind/hunter,madmongo1/hunter,stohrendorf/hunter,shekharhimanshu/hunter,ingenue/hunter,akalsi87/hunter,ingenue/hunter,zhuhaow/hunter,daminetreg/hunter,zhuhaow/hunter,xsacha/hunter,xsacha/hunter,vdsrd/hunter,fwinnen/hunter,dvirtz/hunter,ThomasFeher/hunter,jkhoogland/hunter,pretyman/hunter,ErniBrown/hunter,NeroBurner/hunter,headupinclouds/hunter,ErniBrown/hunter,daminetreg/hunter,tatraian/hunter,alamaison/hunter,pretyman/hunter,NeroBurner/hunter,designerror/hunter,ThomasFeher/hunter,fire-hunter/hunter,lucmichalski/hunter,alamaison/hunter,ingenue/hunter,fire-hunter/hunter,headupinclouds/hunter,Knitschi/hunter,ledocc/hunter,xsacha/hunter,NeroBurner/hunter,Knitschi/hunter,isaachier/hunter,ruslo/hunter,madmongo1/hunter,designerror/hunter,pretyman/hunter,ruslo/hunter,ikliashchou/hunter,ledocc/hunter,isaachier/hunter
|
Add script: fix boost dylib id on Mac OSX [skip ci]
|
#!/usr/bin/env python3
# Copyright (c) 2013, Ruslan Baratov
# All rights reserved.
import argparse
import glob
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Fix boost dynamic libraries id')
parser.add_argument(
'--install-dir', required=True, help='Libraries install directory'
)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
if not os.path.exists(install_dir):
sys.exit("Directory '{}' not found".format(install_dir))
pattern = install_dir + '/libboost_*.dylib'
for fix_dylib in glob.glob(pattern):
subprocess.check_call(['install_name_tool', '-id', fix_dylib, fix_dylib])
output = subprocess.check_output(
['otool', '-L', fix_dylib], universal_newlines=True
)
dependent_list = output.split('\n')[:-1] # remove last
for dependent_dylib in dependent_list:
if dependent_dylib.startswith('\tlibboost_'):
libname = re.sub(r'\t(libboost_.*.dylib) .*', r'\1', dependent_dylib)
libname_path = install_dir + '/' + libname
subprocess.check_call(
['install_name_tool', '-change', libname, libname_path, fix_dylib]
)
|
<commit_before><commit_msg>Add script: fix boost dylib id on Mac OSX [skip ci]<commit_after>
|
#!/usr/bin/env python3
# Copyright (c) 2013, Ruslan Baratov
# All rights reserved.
import argparse
import glob
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Fix boost dynamic libraries id')
parser.add_argument(
'--install-dir', required=True, help='Libraries install directory'
)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
if not os.path.exists(install_dir):
sys.exit("Directory '{}' not found".format(install_dir))
pattern = install_dir + '/libboost_*.dylib'
for fix_dylib in glob.glob(pattern):
subprocess.check_call(['install_name_tool', '-id', fix_dylib, fix_dylib])
output = subprocess.check_output(
['otool', '-L', fix_dylib], universal_newlines=True
)
dependent_list = output.split('\n')[:-1] # remove last
for dependent_dylib in dependent_list:
if dependent_dylib.startswith('\tlibboost_'):
libname = re.sub(r'\t(libboost_.*.dylib) .*', r'\1', dependent_dylib)
libname_path = install_dir + '/' + libname
subprocess.check_call(
['install_name_tool', '-change', libname, libname_path, fix_dylib]
)
|
Add script: fix boost dylib id on Mac OSX [skip ci]#!/usr/bin/env python3
# Copyright (c) 2013, Ruslan Baratov
# All rights reserved.
import argparse
import glob
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Fix boost dynamic libraries id')
parser.add_argument(
'--install-dir', required=True, help='Libraries install directory'
)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
if not os.path.exists(install_dir):
sys.exit("Directory '{}' not found".format(install_dir))
pattern = install_dir + '/libboost_*.dylib'
for fix_dylib in glob.glob(pattern):
subprocess.check_call(['install_name_tool', '-id', fix_dylib, fix_dylib])
output = subprocess.check_output(
['otool', '-L', fix_dylib], universal_newlines=True
)
dependent_list = output.split('\n')[:-1] # remove last
for dependent_dylib in dependent_list:
if dependent_dylib.startswith('\tlibboost_'):
libname = re.sub(r'\t(libboost_.*.dylib) .*', r'\1', dependent_dylib)
libname_path = install_dir + '/' + libname
subprocess.check_call(
['install_name_tool', '-change', libname, libname_path, fix_dylib]
)
|
<commit_before><commit_msg>Add script: fix boost dylib id on Mac OSX [skip ci]<commit_after>#!/usr/bin/env python3
# Copyright (c) 2013, Ruslan Baratov
# All rights reserved.
import argparse
import glob
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Fix boost dynamic libraries id')
parser.add_argument(
'--install-dir', required=True, help='Libraries install directory'
)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
if not os.path.exists(install_dir):
sys.exit("Directory '{}' not found".format(install_dir))
pattern = install_dir + '/libboost_*.dylib'
for fix_dylib in glob.glob(pattern):
subprocess.check_call(['install_name_tool', '-id', fix_dylib, fix_dylib])
output = subprocess.check_output(
['otool', '-L', fix_dylib], universal_newlines=True
)
dependent_list = output.split('\n')[:-1] # remove last
for dependent_dylib in dependent_list:
if dependent_dylib.startswith('\tlibboost_'):
libname = re.sub(r'\t(libboost_.*.dylib) .*', r'\1', dependent_dylib)
libname_path = install_dir + '/' + libname
subprocess.check_call(
['install_name_tool', '-change', libname, libname_path, fix_dylib]
)
|
|
45b42cf32fa82a590e854ac1fe765525ddef4f41
|
sample/python/swift_put_object.py
|
sample/python/swift_put_object.py
|
#!/usr/bin/env python
import pprint
import sys
from os import environ as env
from swiftclient.client import get_auth, put_object, put_container
from swiftclient.exceptions import ClientException
auth_url = env.get('OS_AUTH_URL')
account = env.get('OS_TENANT_NAME')
user = env.get('OS_USERNAME')
key = env.get('OS_PASSWORD')
container = 'test_put_object'
input_file = sys.argv[1]
obj = input_file
class read_wrapper(object):
def __init__(self, fin):
self.fin = fin
def read(self, size=None):
if size:
print "try to read %d" % size
return self.fin.read(size)
def __getattr_(self, name):
return getattr(self.fin, name)
try:
pp = pprint.PrettyPrinter(indent=2)
acuser = "%s:%s" % (account, user)
(storage_url, token) = get_auth(auth_url, acuser, key, auth_version='2.0')
put_container(storage_url, token, container)
resp_dict = {}
with open(input_file, 'rb') as fin:
fin2 = read_wrapper(fin)
resp_etag = put_object(storage_url, token, container, obj,
contents=fin2, chunk_size=65535,
content_type="application/octet-stream",
response_dict=resp_dict)
print "put_object return value:%s" % resp_etag
print "put_object response headers:"
pp.pprint(resp_dict)
except ClientException, e:
print e
except IOError, e:
print e
|
Add a put_object sample script.
|
Add a put_object sample script.
|
Python
|
apache-2.0
|
yoyama/swift_book
|
Add a put_object sample script.
|
#!/usr/bin/env python
import pprint
import sys
from os import environ as env
from swiftclient.client import get_auth, put_object, put_container
from swiftclient.exceptions import ClientException
auth_url = env.get('OS_AUTH_URL')
account = env.get('OS_TENANT_NAME')
user = env.get('OS_USERNAME')
key = env.get('OS_PASSWORD')
container = 'test_put_object'
input_file = sys.argv[1]
obj = input_file
class read_wrapper(object):
def __init__(self, fin):
self.fin = fin
def read(self, size=None):
if size:
print "try to read %d" % size
return self.fin.read(size)
def __getattr_(self, name):
return getattr(self.fin, name)
try:
pp = pprint.PrettyPrinter(indent=2)
acuser = "%s:%s" % (account, user)
(storage_url, token) = get_auth(auth_url, acuser, key, auth_version='2.0')
put_container(storage_url, token, container)
resp_dict = {}
with open(input_file, 'rb') as fin:
fin2 = read_wrapper(fin)
resp_etag = put_object(storage_url, token, container, obj,
contents=fin2, chunk_size=65535,
content_type="application/octet-stream",
response_dict=resp_dict)
print "put_object return value:%s" % resp_etag
print "put_object response headers:"
pp.pprint(resp_dict)
except ClientException, e:
print e
except IOError, e:
print e
|
<commit_before><commit_msg>Add a put_object sample script.<commit_after>
|
#!/usr/bin/env python
import pprint
import sys
from os import environ as env
from swiftclient.client import get_auth, put_object, put_container
from swiftclient.exceptions import ClientException
auth_url = env.get('OS_AUTH_URL')
account = env.get('OS_TENANT_NAME')
user = env.get('OS_USERNAME')
key = env.get('OS_PASSWORD')
container = 'test_put_object'
input_file = sys.argv[1]
obj = input_file
class read_wrapper(object):
def __init__(self, fin):
self.fin = fin
def read(self, size=None):
if size:
print "try to read %d" % size
return self.fin.read(size)
def __getattr_(self, name):
return getattr(self.fin, name)
try:
pp = pprint.PrettyPrinter(indent=2)
acuser = "%s:%s" % (account, user)
(storage_url, token) = get_auth(auth_url, acuser, key, auth_version='2.0')
put_container(storage_url, token, container)
resp_dict = {}
with open(input_file, 'rb') as fin:
fin2 = read_wrapper(fin)
resp_etag = put_object(storage_url, token, container, obj,
contents=fin2, chunk_size=65535,
content_type="application/octet-stream",
response_dict=resp_dict)
print "put_object return value:%s" % resp_etag
print "put_object response headers:"
pp.pprint(resp_dict)
except ClientException, e:
print e
except IOError, e:
print e
|
Add a put_object sample script.#!/usr/bin/env python
import pprint
import sys
from os import environ as env
from swiftclient.client import get_auth, put_object, put_container
from swiftclient.exceptions import ClientException
auth_url = env.get('OS_AUTH_URL')
account = env.get('OS_TENANT_NAME')
user = env.get('OS_USERNAME')
key = env.get('OS_PASSWORD')
container = 'test_put_object'
input_file = sys.argv[1]
obj = input_file
class read_wrapper(object):
def __init__(self, fin):
self.fin = fin
def read(self, size=None):
if size:
print "try to read %d" % size
return self.fin.read(size)
def __getattr_(self, name):
return getattr(self.fin, name)
try:
pp = pprint.PrettyPrinter(indent=2)
acuser = "%s:%s" % (account, user)
(storage_url, token) = get_auth(auth_url, acuser, key, auth_version='2.0')
put_container(storage_url, token, container)
resp_dict = {}
with open(input_file, 'rb') as fin:
fin2 = read_wrapper(fin)
resp_etag = put_object(storage_url, token, container, obj,
contents=fin2, chunk_size=65535,
content_type="application/octet-stream",
response_dict=resp_dict)
print "put_object return value:%s" % resp_etag
print "put_object response headers:"
pp.pprint(resp_dict)
except ClientException, e:
print e
except IOError, e:
print e
|
<commit_before><commit_msg>Add a put_object sample script.<commit_after>#!/usr/bin/env python
import pprint
import sys
from os import environ as env
from swiftclient.client import get_auth, put_object, put_container
from swiftclient.exceptions import ClientException
auth_url = env.get('OS_AUTH_URL')
account = env.get('OS_TENANT_NAME')
user = env.get('OS_USERNAME')
key = env.get('OS_PASSWORD')
container = 'test_put_object'
input_file = sys.argv[1]
obj = input_file
class read_wrapper(object):
def __init__(self, fin):
self.fin = fin
def read(self, size=None):
if size:
print "try to read %d" % size
return self.fin.read(size)
def __getattr_(self, name):
return getattr(self.fin, name)
try:
pp = pprint.PrettyPrinter(indent=2)
acuser = "%s:%s" % (account, user)
(storage_url, token) = get_auth(auth_url, acuser, key, auth_version='2.0')
put_container(storage_url, token, container)
resp_dict = {}
with open(input_file, 'rb') as fin:
fin2 = read_wrapper(fin)
resp_etag = put_object(storage_url, token, container, obj,
contents=fin2, chunk_size=65535,
content_type="application/octet-stream",
response_dict=resp_dict)
print "put_object return value:%s" % resp_etag
print "put_object response headers:"
pp.pprint(resp_dict)
except ClientException, e:
print e
except IOError, e:
print e
|
|
4ec4acf3b70f939e261dfb3425b23dad97a799f6
|
techgig_string_membership.py
|
techgig_string_membership.py
|
def main():
a=raw_input()
c=raw_input()
if a.find(c)>=0:
print "True"
else:
print "False"
# Write code here
main()
|
Print true if the character is present in the string
|
Print true if the character is present in the string
|
Python
|
mit
|
kumarisneha/practice_repo
|
Print true if the character is present in the string
|
def main():
a=raw_input()
c=raw_input()
if a.find(c)>=0:
print "True"
else:
print "False"
# Write code here
main()
|
<commit_before><commit_msg>Print true if the character is present in the string<commit_after>
|
def main():
a=raw_input()
c=raw_input()
if a.find(c)>=0:
print "True"
else:
print "False"
# Write code here
main()
|
Print true if the character is present in the stringdef main():
a=raw_input()
c=raw_input()
if a.find(c)>=0:
print "True"
else:
print "False"
# Write code here
main()
|
<commit_before><commit_msg>Print true if the character is present in the string<commit_after>def main():
a=raw_input()
c=raw_input()
if a.find(c)>=0:
print "True"
else:
print "False"
# Write code here
main()
|
|
4daca355a9ba2807b9f992199e6a1ca78d3678fd
|
wordpress_formatter.py
|
wordpress_formatter.py
|
import sys
def fixFormat(file_name):
original = open(file_name, 'r')
fixed_copy = open("fixed_" + file_name, 'w')
for line in original:
line = line.replace('<', '<')
line = line.replace('>', '>')
line = line.replace(""", '"')
fixed_copy.write(line)
original.close()
fixed_copy.close()
file_name = sys.argv[1]
fixFormat(file_name)
|
Add core functionality for formatter
|
Add core functionality for formatter
|
Python
|
mit
|
HenryDangPRG/WordPress-Code-Formatter
|
Add core functionality for formatter
|
import sys
def fixFormat(file_name):
original = open(file_name, 'r')
fixed_copy = open("fixed_" + file_name, 'w')
for line in original:
line = line.replace('<', '<')
line = line.replace('>', '>')
line = line.replace(""", '"')
fixed_copy.write(line)
original.close()
fixed_copy.close()
file_name = sys.argv[1]
fixFormat(file_name)
|
<commit_before><commit_msg>Add core functionality for formatter<commit_after>
|
import sys
def fixFormat(file_name):
original = open(file_name, 'r')
fixed_copy = open("fixed_" + file_name, 'w')
for line in original:
line = line.replace('<', '<')
line = line.replace('>', '>')
line = line.replace(""", '"')
fixed_copy.write(line)
original.close()
fixed_copy.close()
file_name = sys.argv[1]
fixFormat(file_name)
|
Add core functionality for formatterimport sys
def fixFormat(file_name):
original = open(file_name, 'r')
fixed_copy = open("fixed_" + file_name, 'w')
for line in original:
line = line.replace('<', '<')
line = line.replace('>', '>')
line = line.replace(""", '"')
fixed_copy.write(line)
original.close()
fixed_copy.close()
file_name = sys.argv[1]
fixFormat(file_name)
|
<commit_before><commit_msg>Add core functionality for formatter<commit_after>import sys
def fixFormat(file_name):
original = open(file_name, 'r')
fixed_copy = open("fixed_" + file_name, 'w')
for line in original:
line = line.replace('<', '<')
line = line.replace('>', '>')
line = line.replace(""", '"')
fixed_copy.write(line)
original.close()
fixed_copy.close()
file_name = sys.argv[1]
fixFormat(file_name)
|
|
8976733c9e2cf0fb03d93187fb138c07416648d9
|
ttt.py
|
ttt.py
|
#!/usr/bin/env python3
from collections import namedtuple
import csv
WordList = namedtuple("WordList", ['nouns', 'verbs', 'conjunctions', 'others'])
def load_wordlist(filename):
wordlist = WordList([], [], [], []) # A totally empty list.
with open(filename) as wordlist_file:
for line in wordlist_file:
# Check that the line is valid; if not, skip it.
split_line = line.strip('\n').split(',')
if len(split_line) != 2:
continue
else:
# Line was valid; get the part of speech and put it in the right bin
if split_line[1] == 'n':
wordlist.nouns.append(split_line[0])
elif split_line[1] == 'v':
wordlist.verbs.append(split_line[0])
elif split_line[1] == 'c':
wordlist.conjunctions.append(split_line[0])
else:
wordlist.others.append(split_line[0])
return wordlist
|
Add ability to load wordlists into useable datastructures.
|
Add ability to load wordlists into useable datastructures.
|
Python
|
mit
|
SilverWingedSeraph/tor-to-terabithia
|
Add ability to load wordlists into useable datastructures.
|
#!/usr/bin/env python3
from collections import namedtuple
import csv
WordList = namedtuple("WordList", ['nouns', 'verbs', 'conjunctions', 'others'])
def load_wordlist(filename):
wordlist = WordList([], [], [], []) # A totally empty list.
with open(filename) as wordlist_file:
for line in wordlist_file:
# Check that the line is valid; if not, skip it.
split_line = line.strip('\n').split(',')
if len(split_line) != 2:
continue
else:
# Line was valid; get the part of speech and put it in the right bin
if split_line[1] == 'n':
wordlist.nouns.append(split_line[0])
elif split_line[1] == 'v':
wordlist.verbs.append(split_line[0])
elif split_line[1] == 'c':
wordlist.conjunctions.append(split_line[0])
else:
wordlist.others.append(split_line[0])
return wordlist
|
<commit_before><commit_msg>Add ability to load wordlists into useable datastructures.<commit_after>
|
#!/usr/bin/env python3
from collections import namedtuple
import csv
WordList = namedtuple("WordList", ['nouns', 'verbs', 'conjunctions', 'others'])
def load_wordlist(filename):
wordlist = WordList([], [], [], []) # A totally empty list.
with open(filename) as wordlist_file:
for line in wordlist_file:
# Check that the line is valid; if not, skip it.
split_line = line.strip('\n').split(',')
if len(split_line) != 2:
continue
else:
# Line was valid; get the part of speech and put it in the right bin
if split_line[1] == 'n':
wordlist.nouns.append(split_line[0])
elif split_line[1] == 'v':
wordlist.verbs.append(split_line[0])
elif split_line[1] == 'c':
wordlist.conjunctions.append(split_line[0])
else:
wordlist.others.append(split_line[0])
return wordlist
|
Add ability to load wordlists into useable datastructures.#!/usr/bin/env python3
from collections import namedtuple
import csv
WordList = namedtuple("WordList", ['nouns', 'verbs', 'conjunctions', 'others'])
def load_wordlist(filename):
wordlist = WordList([], [], [], []) # A totally empty list.
with open(filename) as wordlist_file:
for line in wordlist_file:
# Check that the line is valid; if not, skip it.
split_line = line.strip('\n').split(',')
if len(split_line) != 2:
continue
else:
# Line was valid; get the part of speech and put it in the right bin
if split_line[1] == 'n':
wordlist.nouns.append(split_line[0])
elif split_line[1] == 'v':
wordlist.verbs.append(split_line[0])
elif split_line[1] == 'c':
wordlist.conjunctions.append(split_line[0])
else:
wordlist.others.append(split_line[0])
return wordlist
|
<commit_before><commit_msg>Add ability to load wordlists into useable datastructures.<commit_after>#!/usr/bin/env python3
from collections import namedtuple
import csv
WordList = namedtuple("WordList", ['nouns', 'verbs', 'conjunctions', 'others'])
def load_wordlist(filename):
wordlist = WordList([], [], [], []) # A totally empty list.
with open(filename) as wordlist_file:
for line in wordlist_file:
# Check that the line is valid; if not, skip it.
split_line = line.strip('\n').split(',')
if len(split_line) != 2:
continue
else:
# Line was valid; get the part of speech and put it in the right bin
if split_line[1] == 'n':
wordlist.nouns.append(split_line[0])
elif split_line[1] == 'v':
wordlist.verbs.append(split_line[0])
elif split_line[1] == 'c':
wordlist.conjunctions.append(split_line[0])
else:
wordlist.others.append(split_line[0])
return wordlist
|
|
9df3c8f04c25866a92b262218ca5976536097837
|
tracpro/orgs_ext/tests/test_utils.py
|
tracpro/orgs_ext/tests/test_utils.py
|
import mock
from requests import HTTPError
from django.test import TestCase
from temba.base import TembaAPIError
from tracpro.test import factories
from .. import utils
class TestRunOrgTask(TestCase):
def setUp(self):
super(TestRunOrgTask, self).setUp()
self.mock_task = mock.Mock(return_value="hello")
self.org = factories.Org(api_token="token")
def test_org_with_api_token(self):
"""Return result of task if API token is valid."""
result = utils.run_org_task(self.org, self.mock_task)
self.assertEqual(result, "hello")
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_with_blank_api_token(self):
"""Do not call task function if API token is blank."""
self.org.api_token = ""
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_null_api_token(self):
"""Do not cal task function if API token is null."""
self.org.api_token = None
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_invalid_api_token(self):
"""Handle invalid API token exception."""
def side_effect(org_id):
err = HTTPError()
err.response = mock.Mock(status_code=403)
raise TembaAPIError(caused_by=err)
self.mock_task.side_effect = side_effect
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_task_unknown_exception(self):
"""Raise unknown errors."""
self.mock_task.side_effect = Exception
with self.assertRaises(Exception):
utils.run_org_task(self.org, self.mock_task)
self.mock_task.assert_called_once_with(self.org.pk)
|
Add tests for org task util
|
Add tests for org task util
|
Python
|
bsd-3-clause
|
xkmato/tracpro,rapidpro/tracpro,rapidpro/tracpro,xkmato/tracpro,xkmato/tracpro,xkmato/tracpro,rapidpro/tracpro
|
Add tests for org task util
|
import mock
from requests import HTTPError
from django.test import TestCase
from temba.base import TembaAPIError
from tracpro.test import factories
from .. import utils
class TestRunOrgTask(TestCase):
def setUp(self):
super(TestRunOrgTask, self).setUp()
self.mock_task = mock.Mock(return_value="hello")
self.org = factories.Org(api_token="token")
def test_org_with_api_token(self):
"""Return result of task if API token is valid."""
result = utils.run_org_task(self.org, self.mock_task)
self.assertEqual(result, "hello")
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_with_blank_api_token(self):
"""Do not call task function if API token is blank."""
self.org.api_token = ""
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_null_api_token(self):
"""Do not cal task function if API token is null."""
self.org.api_token = None
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_invalid_api_token(self):
"""Handle invalid API token exception."""
def side_effect(org_id):
err = HTTPError()
err.response = mock.Mock(status_code=403)
raise TembaAPIError(caused_by=err)
self.mock_task.side_effect = side_effect
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_task_unknown_exception(self):
"""Raise unknown errors."""
self.mock_task.side_effect = Exception
with self.assertRaises(Exception):
utils.run_org_task(self.org, self.mock_task)
self.mock_task.assert_called_once_with(self.org.pk)
|
<commit_before><commit_msg>Add tests for org task util<commit_after>
|
import mock
from requests import HTTPError
from django.test import TestCase
from temba.base import TembaAPIError
from tracpro.test import factories
from .. import utils
class TestRunOrgTask(TestCase):
def setUp(self):
super(TestRunOrgTask, self).setUp()
self.mock_task = mock.Mock(return_value="hello")
self.org = factories.Org(api_token="token")
def test_org_with_api_token(self):
"""Return result of task if API token is valid."""
result = utils.run_org_task(self.org, self.mock_task)
self.assertEqual(result, "hello")
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_with_blank_api_token(self):
"""Do not call task function if API token is blank."""
self.org.api_token = ""
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_null_api_token(self):
"""Do not cal task function if API token is null."""
self.org.api_token = None
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_invalid_api_token(self):
"""Handle invalid API token exception."""
def side_effect(org_id):
err = HTTPError()
err.response = mock.Mock(status_code=403)
raise TembaAPIError(caused_by=err)
self.mock_task.side_effect = side_effect
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_task_unknown_exception(self):
"""Raise unknown errors."""
self.mock_task.side_effect = Exception
with self.assertRaises(Exception):
utils.run_org_task(self.org, self.mock_task)
self.mock_task.assert_called_once_with(self.org.pk)
|
Add tests for org task utilimport mock
from requests import HTTPError
from django.test import TestCase
from temba.base import TembaAPIError
from tracpro.test import factories
from .. import utils
class TestRunOrgTask(TestCase):
def setUp(self):
super(TestRunOrgTask, self).setUp()
self.mock_task = mock.Mock(return_value="hello")
self.org = factories.Org(api_token="token")
def test_org_with_api_token(self):
"""Return result of task if API token is valid."""
result = utils.run_org_task(self.org, self.mock_task)
self.assertEqual(result, "hello")
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_with_blank_api_token(self):
"""Do not call task function if API token is blank."""
self.org.api_token = ""
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_null_api_token(self):
"""Do not cal task function if API token is null."""
self.org.api_token = None
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_invalid_api_token(self):
"""Handle invalid API token exception."""
def side_effect(org_id):
err = HTTPError()
err.response = mock.Mock(status_code=403)
raise TembaAPIError(caused_by=err)
self.mock_task.side_effect = side_effect
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_task_unknown_exception(self):
"""Raise unknown errors."""
self.mock_task.side_effect = Exception
with self.assertRaises(Exception):
utils.run_org_task(self.org, self.mock_task)
self.mock_task.assert_called_once_with(self.org.pk)
|
<commit_before><commit_msg>Add tests for org task util<commit_after>import mock
from requests import HTTPError
from django.test import TestCase
from temba.base import TembaAPIError
from tracpro.test import factories
from .. import utils
class TestRunOrgTask(TestCase):
def setUp(self):
super(TestRunOrgTask, self).setUp()
self.mock_task = mock.Mock(return_value="hello")
self.org = factories.Org(api_token="token")
def test_org_with_api_token(self):
"""Return result of task if API token is valid."""
result = utils.run_org_task(self.org, self.mock_task)
self.assertEqual(result, "hello")
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_with_blank_api_token(self):
"""Do not call task function if API token is blank."""
self.org.api_token = ""
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_null_api_token(self):
"""Do not cal task function if API token is null."""
self.org.api_token = None
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_not_called()
def test_org_with_invalid_api_token(self):
"""Handle invalid API token exception."""
def side_effect(org_id):
err = HTTPError()
err.response = mock.Mock(status_code=403)
raise TembaAPIError(caused_by=err)
self.mock_task.side_effect = side_effect
result = utils.run_org_task(self.org, self.mock_task)
self.assertIsNone(result)
self.mock_task.assert_called_once_with(self.org.pk)
def test_org_task_unknown_exception(self):
"""Raise unknown errors."""
self.mock_task.side_effect = Exception
with self.assertRaises(Exception):
utils.run_org_task(self.org, self.mock_task)
self.mock_task.assert_called_once_with(self.org.pk)
|
|
c13d90c3feeec531ee9ad4a3d9346390782cba7c
|
ex43.py
|
ex43.py
|
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map("central_corridor")
a_game = Engine(a_map)
a_game.play()
|
Add initial code for exercise 43.
|
Add initial code for exercise 43.
|
Python
|
mit
|
hectoregm/lphw
|
Add initial code for exercise 43.
|
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map("central_corridor")
a_game = Engine(a_map)
a_game.play()
|
<commit_before><commit_msg>Add initial code for exercise 43.<commit_after>
|
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map("central_corridor")
a_game = Engine(a_map)
a_game.play()
|
Add initial code for exercise 43.class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map("central_corridor")
a_game = Engine(a_map)
a_game.play()
|
<commit_before><commit_msg>Add initial code for exercise 43.<commit_after>class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map("central_corridor")
a_game = Engine(a_map)
a_game.play()
|
|
a94aab8fa6e795d1e440e20e9751810393ac0a73
|
tests/test_ping.py
|
tests/test_ping.py
|
from flask.ext.resty import Api
import pytest
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, '/api')
api.add_ping('/ping')
# -----------------------------------------------------------------------------
def test_ping(client):
response = client.get('/ping')
assert response.status_code == 200
assert not response.data
|
Add test for ping endpoint
|
Add test for ping endpoint
|
Python
|
mit
|
taion/flask-jsonapiview,4Catalyzer/flask-jsonapiview,4Catalyzer/flask-resty
|
Add test for ping endpoint
|
from flask.ext.resty import Api
import pytest
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, '/api')
api.add_ping('/ping')
# -----------------------------------------------------------------------------
def test_ping(client):
response = client.get('/ping')
assert response.status_code == 200
assert not response.data
|
<commit_before><commit_msg>Add test for ping endpoint<commit_after>
|
from flask.ext.resty import Api
import pytest
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, '/api')
api.add_ping('/ping')
# -----------------------------------------------------------------------------
def test_ping(client):
response = client.get('/ping')
assert response.status_code == 200
assert not response.data
|
Add test for ping endpointfrom flask.ext.resty import Api
import pytest
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, '/api')
api.add_ping('/ping')
# -----------------------------------------------------------------------------
def test_ping(client):
response = client.get('/ping')
assert response.status_code == 200
assert not response.data
|
<commit_before><commit_msg>Add test for ping endpoint<commit_after>from flask.ext.resty import Api
import pytest
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, '/api')
api.add_ping('/ping')
# -----------------------------------------------------------------------------
def test_ping(client):
response = client.get('/ping')
assert response.status_code == 200
assert not response.data
|
|
84796e96d8ad413fd657b6207025a0e64d680598
|
src/maximum_subarray.py
|
src/maximum_subarray.py
|
# brute force solution
# it works, but Time Limit Exceeded in leetcode
class Solution1:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
if not nums:
return None
maxSum = nums[0]
for size in range(1, len(nums)+1):
for i in range(0, len(nums)+1-size):
# print nums[i:i+size]
curSum = sum(nums[i:i+size])
if curSum > maxSum:
maxSum = curSum
return maxSum
if __name__ == '__main__':
test_list = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
result_list = [6]
success = True
solution = Solution1()
for i,nums in enumerate(test_list):
result = solution.maxSubArray(nums)
if result != result_list[i]:
success = False
print nums
print 'Expected value', result_list[i]
print 'Actual value', result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
Add a brute force solution for the maximum subarray
|
Add a brute force solution for the maximum subarray
|
Python
|
mit
|
chancyWu/leetcode
|
Add a brute force solution for the maximum subarray
|
# brute force solution
# it works, but Time Limit Exceeded in leetcode
class Solution1:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
if not nums:
return None
maxSum = nums[0]
for size in range(1, len(nums)+1):
for i in range(0, len(nums)+1-size):
# print nums[i:i+size]
curSum = sum(nums[i:i+size])
if curSum > maxSum:
maxSum = curSum
return maxSum
if __name__ == '__main__':
test_list = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
result_list = [6]
success = True
solution = Solution1()
for i,nums in enumerate(test_list):
result = solution.maxSubArray(nums)
if result != result_list[i]:
success = False
print nums
print 'Expected value', result_list[i]
print 'Actual value', result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
<commit_before><commit_msg>Add a brute force solution for the maximum subarray<commit_after>
|
# brute force solution
# it works, but Time Limit Exceeded in leetcode
class Solution1:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
if not nums:
return None
maxSum = nums[0]
for size in range(1, len(nums)+1):
for i in range(0, len(nums)+1-size):
# print nums[i:i+size]
curSum = sum(nums[i:i+size])
if curSum > maxSum:
maxSum = curSum
return maxSum
if __name__ == '__main__':
test_list = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
result_list = [6]
success = True
solution = Solution1()
for i,nums in enumerate(test_list):
result = solution.maxSubArray(nums)
if result != result_list[i]:
success = False
print nums
print 'Expected value', result_list[i]
print 'Actual value', result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
Add a brute force solution for the maximum subarray# brute force solution
# it works, but Time Limit Exceeded in leetcode
class Solution1:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
if not nums:
return None
maxSum = nums[0]
for size in range(1, len(nums)+1):
for i in range(0, len(nums)+1-size):
# print nums[i:i+size]
curSum = sum(nums[i:i+size])
if curSum > maxSum:
maxSum = curSum
return maxSum
if __name__ == '__main__':
test_list = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
result_list = [6]
success = True
solution = Solution1()
for i,nums in enumerate(test_list):
result = solution.maxSubArray(nums)
if result != result_list[i]:
success = False
print nums
print 'Expected value', result_list[i]
print 'Actual value', result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
<commit_before><commit_msg>Add a brute force solution for the maximum subarray<commit_after># brute force solution
# it works, but Time Limit Exceeded in leetcode
class Solution1:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
if not nums:
return None
maxSum = nums[0]
for size in range(1, len(nums)+1):
for i in range(0, len(nums)+1-size):
# print nums[i:i+size]
curSum = sum(nums[i:i+size])
if curSum > maxSum:
maxSum = curSum
return maxSum
if __name__ == '__main__':
test_list = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
result_list = [6]
success = True
solution = Solution1()
for i,nums in enumerate(test_list):
result = solution.maxSubArray(nums)
if result != result_list[i]:
success = False
print nums
print 'Expected value', result_list[i]
print 'Actual value', result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
|
dbff077245130f175fdabaa347e33de7a2b71450
|
twitch_channels.py
|
twitch_channels.py
|
#!/usr/bin/env python
""" twitch_channels.py - Queries Twitch for channels of interest.
Used in conjunction with livestreamer-completion.
Usage examples:
./twitch_channels.py --follows myname
./twitch_channels.py --team eg --team teamliquid
"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import partial
from itertools import chain
import requests
API_BASE_URL = "api.twitch.tv"
def api(path, *args, **params):
insecure = params.pop("insecure", False)
proto = insecure and "http://" or "https://"
url = proto + API_BASE_URL + path.format(*args)
res = requests.get(url, params=params)
return res.json()
def page_result(res):
for key, values in res.items():
if not key.startswith("_"):
return values
return []
def iterate_pages(func, limit=25):
offset, total = 0, limit
while offset < total:
res = func(limit=limit, offset=offset)
values = page_result(res)
offset += len(values)
total = res.get("_total")
yield values
def iterate_pages_result(*args, **kwargs):
return chain.from_iterable(iterate_pages(*args, **kwargs))
# Twitch APIs
team_channels = partial(api, "/api/team/{0}/all_channels.json", insecure=True)
user_follows = partial(api, "/kraken/users/{0}/follows/channels")
parser = ArgumentParser()
parser.add_argument("-f", "--follows", action="append", default=[],
metavar="user", help="channels a user is following",)
parser.add_argument("-t", "--team", action="append", default=[],
metavar="team", help="channels that are part of a team")
def main():
args = parser.parse_args()
if not (args.follows or args.team):
return parser.print_help()
for user in args.follows:
myuser_follows = partial(user_follows, user)
for channel in iterate_pages_result(myuser_follows, limit=100):
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
for team in args.team:
channels = team_channels(team).get("channels", [])
for channel in channels:
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
if __name__ == "__main__":
main()
|
Add extra utility to generate a URL list.
|
Add extra utility to generate a URL list.
|
Python
|
mit
|
chrippa/livestreamer-completion,chrippa/livestreamer-completion
|
Add extra utility to generate a URL list.
|
#!/usr/bin/env python
""" twitch_channels.py - Queries Twitch for channels of interest.
Used in conjunction with livestreamer-completion.
Usage examples:
./twitch_channels.py --follows myname
./twitch_channels.py --team eg --team teamliquid
"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import partial
from itertools import chain
import requests
API_BASE_URL = "api.twitch.tv"
def api(path, *args, **params):
insecure = params.pop("insecure", False)
proto = insecure and "http://" or "https://"
url = proto + API_BASE_URL + path.format(*args)
res = requests.get(url, params=params)
return res.json()
def page_result(res):
for key, values in res.items():
if not key.startswith("_"):
return values
return []
def iterate_pages(func, limit=25):
offset, total = 0, limit
while offset < total:
res = func(limit=limit, offset=offset)
values = page_result(res)
offset += len(values)
total = res.get("_total")
yield values
def iterate_pages_result(*args, **kwargs):
return chain.from_iterable(iterate_pages(*args, **kwargs))
# Twitch APIs
team_channels = partial(api, "/api/team/{0}/all_channels.json", insecure=True)
user_follows = partial(api, "/kraken/users/{0}/follows/channels")
parser = ArgumentParser()
parser.add_argument("-f", "--follows", action="append", default=[],
metavar="user", help="channels a user is following",)
parser.add_argument("-t", "--team", action="append", default=[],
metavar="team", help="channels that are part of a team")
def main():
args = parser.parse_args()
if not (args.follows or args.team):
return parser.print_help()
for user in args.follows:
myuser_follows = partial(user_follows, user)
for channel in iterate_pages_result(myuser_follows, limit=100):
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
for team in args.team:
channels = team_channels(team).get("channels", [])
for channel in channels:
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add extra utility to generate a URL list.<commit_after>
|
#!/usr/bin/env python
""" twitch_channels.py - Queries Twitch for channels of interest.
Used in conjunction with livestreamer-completion.
Usage examples:
./twitch_channels.py --follows myname
./twitch_channels.py --team eg --team teamliquid
"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import partial
from itertools import chain
import requests
API_BASE_URL = "api.twitch.tv"
def api(path, *args, **params):
insecure = params.pop("insecure", False)
proto = insecure and "http://" or "https://"
url = proto + API_BASE_URL + path.format(*args)
res = requests.get(url, params=params)
return res.json()
def page_result(res):
for key, values in res.items():
if not key.startswith("_"):
return values
return []
def iterate_pages(func, limit=25):
offset, total = 0, limit
while offset < total:
res = func(limit=limit, offset=offset)
values = page_result(res)
offset += len(values)
total = res.get("_total")
yield values
def iterate_pages_result(*args, **kwargs):
return chain.from_iterable(iterate_pages(*args, **kwargs))
# Twitch APIs
team_channels = partial(api, "/api/team/{0}/all_channels.json", insecure=True)
user_follows = partial(api, "/kraken/users/{0}/follows/channels")
parser = ArgumentParser()
parser.add_argument("-f", "--follows", action="append", default=[],
metavar="user", help="channels a user is following",)
parser.add_argument("-t", "--team", action="append", default=[],
metavar="team", help="channels that are part of a team")
def main():
args = parser.parse_args()
if not (args.follows or args.team):
return parser.print_help()
for user in args.follows:
myuser_follows = partial(user_follows, user)
for channel in iterate_pages_result(myuser_follows, limit=100):
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
for team in args.team:
channels = team_channels(team).get("channels", [])
for channel in channels:
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
if __name__ == "__main__":
main()
|
Add extra utility to generate a URL list.#!/usr/bin/env python
""" twitch_channels.py - Queries Twitch for channels of interest.
Used in conjunction with livestreamer-completion.
Usage examples:
./twitch_channels.py --follows myname
./twitch_channels.py --team eg --team teamliquid
"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import partial
from itertools import chain
import requests
API_BASE_URL = "api.twitch.tv"
def api(path, *args, **params):
insecure = params.pop("insecure", False)
proto = insecure and "http://" or "https://"
url = proto + API_BASE_URL + path.format(*args)
res = requests.get(url, params=params)
return res.json()
def page_result(res):
for key, values in res.items():
if not key.startswith("_"):
return values
return []
def iterate_pages(func, limit=25):
offset, total = 0, limit
while offset < total:
res = func(limit=limit, offset=offset)
values = page_result(res)
offset += len(values)
total = res.get("_total")
yield values
def iterate_pages_result(*args, **kwargs):
return chain.from_iterable(iterate_pages(*args, **kwargs))
# Twitch APIs
team_channels = partial(api, "/api/team/{0}/all_channels.json", insecure=True)
user_follows = partial(api, "/kraken/users/{0}/follows/channels")
parser = ArgumentParser()
parser.add_argument("-f", "--follows", action="append", default=[],
metavar="user", help="channels a user is following",)
parser.add_argument("-t", "--team", action="append", default=[],
metavar="team", help="channels that are part of a team")
def main():
args = parser.parse_args()
if not (args.follows or args.team):
return parser.print_help()
for user in args.follows:
myuser_follows = partial(user_follows, user)
for channel in iterate_pages_result(myuser_follows, limit=100):
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
for team in args.team:
channels = team_channels(team).get("channels", [])
for channel in channels:
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add extra utility to generate a URL list.<commit_after>#!/usr/bin/env python
""" twitch_channels.py - Queries Twitch for channels of interest.
Used in conjunction with livestreamer-completion.
Usage examples:
./twitch_channels.py --follows myname
./twitch_channels.py --team eg --team teamliquid
"""
from __future__ import print_function
from argparse import ArgumentParser
from functools import partial
from itertools import chain
import requests
API_BASE_URL = "api.twitch.tv"
def api(path, *args, **params):
insecure = params.pop("insecure", False)
proto = insecure and "http://" or "https://"
url = proto + API_BASE_URL + path.format(*args)
res = requests.get(url, params=params)
return res.json()
def page_result(res):
for key, values in res.items():
if not key.startswith("_"):
return values
return []
def iterate_pages(func, limit=25):
offset, total = 0, limit
while offset < total:
res = func(limit=limit, offset=offset)
values = page_result(res)
offset += len(values)
total = res.get("_total")
yield values
def iterate_pages_result(*args, **kwargs):
return chain.from_iterable(iterate_pages(*args, **kwargs))
# Twitch APIs
team_channels = partial(api, "/api/team/{0}/all_channels.json", insecure=True)
user_follows = partial(api, "/kraken/users/{0}/follows/channels")
parser = ArgumentParser()
parser.add_argument("-f", "--follows", action="append", default=[],
metavar="user", help="channels a user is following",)
parser.add_argument("-t", "--team", action="append", default=[],
metavar="team", help="channels that are part of a team")
def main():
args = parser.parse_args()
if not (args.follows or args.team):
return parser.print_help()
for user in args.follows:
myuser_follows = partial(user_follows, user)
for channel in iterate_pages_result(myuser_follows, limit=100):
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
for team in args.team:
channels = team_channels(team).get("channels", [])
for channel in channels:
channel_name = channel.get("channel").get("name")
print("twitch.tv/{0}".format(channel_name))
if __name__ == "__main__":
main()
|
|
c9e0f57b867404ba5ae7871caa63c4ccd8b90a1d
|
reporting/get_couchdata_for_calisphere_id.py
|
reporting/get_couchdata_for_calisphere_id.py
|
# -*- coding: utf-8 -*-
# Use this to get data from the couchdb instance for a record from calisphere
# defaults to the staging environment
import sys
import argparse
import urllib
import ConfigParser
import requests
from get_solr_json import get_solr_json
url_couchdb = 'https://harvest-stg.cdlib.org/couchdb/ucldc/'
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(objid):
config = ConfigParser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('stg-index', 'solrUrl')
api_key = config.get('stg-index', 'solrAuth')
query = { 'q': objid }
resp = get_solr_json(solr_url, query, api_key=api_key)
doc = resp['response']['docs'][0]
url_couch_doc=url_couchdb+urllib.quote(doc['harvest_id_s'], safe='')
couch_doc = requests.get(url_couch_doc, verify=False).json()
print
print '==========================================================================='
print 'Calisphere/Solr ID: {}'.format(objid)
print 'CouchDB ID: {}'.format(doc['harvest_id_s'])
print 'isShownAt: {}'.format(couch_doc['isShownAt'])
print 'isShownBy: {}'.format(couch_doc.get('isShownBy', None))
print 'object?: {}'.format(couch_doc.get('object', None))
print '==========================================================================='
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objid', nargs=1,)
argv = parser.parse_args()
sys.exit(main(argv.objid[0]))
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
Add tool for debug of Calisphere id.
|
Add tool for debug of Calisphere id.
|
Python
|
bsd-3-clause
|
mredar/ucldc_api_data_quality,mredar/ucldc_api_data_quality,mredar/ucldc_api_data_quality,mredar/ucldc_api_data_quality
|
Add tool for debug of Calisphere id.
|
# -*- coding: utf-8 -*-
# Use this to get data from the couchdb instance for a record from calisphere
# defaults to the staging environment
import sys
import argparse
import urllib
import ConfigParser
import requests
from get_solr_json import get_solr_json
url_couchdb = 'https://harvest-stg.cdlib.org/couchdb/ucldc/'
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(objid):
config = ConfigParser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('stg-index', 'solrUrl')
api_key = config.get('stg-index', 'solrAuth')
query = { 'q': objid }
resp = get_solr_json(solr_url, query, api_key=api_key)
doc = resp['response']['docs'][0]
url_couch_doc=url_couchdb+urllib.quote(doc['harvest_id_s'], safe='')
couch_doc = requests.get(url_couch_doc, verify=False).json()
print
print '==========================================================================='
print 'Calisphere/Solr ID: {}'.format(objid)
print 'CouchDB ID: {}'.format(doc['harvest_id_s'])
print 'isShownAt: {}'.format(couch_doc['isShownAt'])
print 'isShownBy: {}'.format(couch_doc.get('isShownBy', None))
print 'object?: {}'.format(couch_doc.get('object', None))
print '==========================================================================='
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objid', nargs=1,)
argv = parser.parse_args()
sys.exit(main(argv.objid[0]))
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
<commit_before><commit_msg>Add tool for debug of Calisphere id.<commit_after>
|
# -*- coding: utf-8 -*-
# Use this to get data from the couchdb instance for a record from calisphere
# defaults to the staging environment
import sys
import argparse
import urllib
import ConfigParser
import requests
from get_solr_json import get_solr_json
url_couchdb = 'https://harvest-stg.cdlib.org/couchdb/ucldc/'
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(objid):
config = ConfigParser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('stg-index', 'solrUrl')
api_key = config.get('stg-index', 'solrAuth')
query = { 'q': objid }
resp = get_solr_json(solr_url, query, api_key=api_key)
doc = resp['response']['docs'][0]
url_couch_doc=url_couchdb+urllib.quote(doc['harvest_id_s'], safe='')
couch_doc = requests.get(url_couch_doc, verify=False).json()
print
print '==========================================================================='
print 'Calisphere/Solr ID: {}'.format(objid)
print 'CouchDB ID: {}'.format(doc['harvest_id_s'])
print 'isShownAt: {}'.format(couch_doc['isShownAt'])
print 'isShownBy: {}'.format(couch_doc.get('isShownBy', None))
print 'object?: {}'.format(couch_doc.get('object', None))
print '==========================================================================='
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objid', nargs=1,)
argv = parser.parse_args()
sys.exit(main(argv.objid[0]))
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
Add tool for debug of Calisphere id.# -*- coding: utf-8 -*-
# Use this to get data from the couchdb instance for a record from calisphere
# defaults to the staging environment
import sys
import argparse
import urllib
import ConfigParser
import requests
from get_solr_json import get_solr_json
url_couchdb = 'https://harvest-stg.cdlib.org/couchdb/ucldc/'
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(objid):
config = ConfigParser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('stg-index', 'solrUrl')
api_key = config.get('stg-index', 'solrAuth')
query = { 'q': objid }
resp = get_solr_json(solr_url, query, api_key=api_key)
doc = resp['response']['docs'][0]
url_couch_doc=url_couchdb+urllib.quote(doc['harvest_id_s'], safe='')
couch_doc = requests.get(url_couch_doc, verify=False).json()
print
print '==========================================================================='
print 'Calisphere/Solr ID: {}'.format(objid)
print 'CouchDB ID: {}'.format(doc['harvest_id_s'])
print 'isShownAt: {}'.format(couch_doc['isShownAt'])
print 'isShownBy: {}'.format(couch_doc.get('isShownBy', None))
print 'object?: {}'.format(couch_doc.get('object', None))
print '==========================================================================='
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objid', nargs=1,)
argv = parser.parse_args()
sys.exit(main(argv.objid[0]))
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
<commit_before><commit_msg>Add tool for debug of Calisphere id.<commit_after># -*- coding: utf-8 -*-
# Use this to get data from the couchdb instance for a record from calisphere
# defaults to the staging environment
import sys
import argparse
import urllib
import ConfigParser
import requests
from get_solr_json import get_solr_json
url_couchdb = 'https://harvest-stg.cdlib.org/couchdb/ucldc/'
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def main(objid):
config = ConfigParser.SafeConfigParser()
config.read('report.ini')
solr_url = config.get('stg-index', 'solrUrl')
api_key = config.get('stg-index', 'solrAuth')
query = { 'q': objid }
resp = get_solr_json(solr_url, query, api_key=api_key)
doc = resp['response']['docs'][0]
url_couch_doc=url_couchdb+urllib.quote(doc['harvest_id_s'], safe='')
couch_doc = requests.get(url_couch_doc, verify=False).json()
print
print '==========================================================================='
print 'Calisphere/Solr ID: {}'.format(objid)
print 'CouchDB ID: {}'.format(doc['harvest_id_s'])
print 'isShownAt: {}'.format(couch_doc['isShownAt'])
print 'isShownBy: {}'.format(couch_doc.get('isShownBy', None))
print 'object?: {}'.format(couch_doc.get('object', None))
print '==========================================================================='
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objid', nargs=1,)
argv = parser.parse_args()
sys.exit(main(argv.objid[0]))
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
|
07e9f20a190e07650e064985b4eee7b10de442e5
|
build-scripts/pyget.py
|
build-scripts/pyget.py
|
#!/usr/bin/python
###
### Simple HTTP download utility for platforms without wget
###
### Copyright 2008 Steven J. Murdoch <http://www.cl.cam.ac.uk/users/sjm217/>
### See LICENSE for licensing information
###
### $Id$
###
import sys
import os
import urllib
import urlparse
from optparse import OptionParser
## Destination filename when no sensible default can be guessed
DEFAULT_DEST = "index.html"
## Create a URL opener which throws an exception on error
class DebugURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
_ = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
## Set this as the default URL opener
urllib._urlopener = DebugURLopener()
def main():
## Parse command line
usage = "Usage: %prog [options] URL\n\nDownload URL to file."
parser = OptionParser(usage)
parser.set_defaults(verbose=True)
parser.add_option("-O", "--output-document", dest="dest",
help="write document to DEST")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't show debugging information")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Missing URL")
## Get URL
url = args[0]
## Get destination filename
if options.dest:
dest = options.dest
else:
url_components = urlparse.urlsplit(url)
dest = os.path.basename(url_components.path).strip()
if dest == "":
dest = DEFAULT_DEST
## Download URL
if options.verbose:
print "Downloading %s to %s..."%(url, dest)
urllib.urlretrieve(url, dest)
if options.verbose:
print "Download was successful."
if __name__ == "__main__":
main()
|
Add a wget replacement for platforms that don't have wget
|
Add a wget replacement for platforms that don't have wget
svn:r13313
|
Python
|
bsd-3-clause
|
Shondoit/torbrowser,Shondoit/torbrowser,Shondoit/torbrowser,Shondoit/torbrowser
|
Add a wget replacement for platforms that don't have wget
svn:r13313
|
#!/usr/bin/python
###
### Simple HTTP download utility for platforms without wget
###
### Copyright 2008 Steven J. Murdoch <http://www.cl.cam.ac.uk/users/sjm217/>
### See LICENSE for licensing information
###
### $Id$
###
import sys
import os
import urllib
import urlparse
from optparse import OptionParser
## Destination filename when no sensible default can be guessed
DEFAULT_DEST = "index.html"
## Create a URL opener which throws an exception on error
class DebugURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
_ = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
## Set this as the default URL opener
urllib._urlopener = DebugURLopener()
def main():
## Parse command line
usage = "Usage: %prog [options] URL\n\nDownload URL to file."
parser = OptionParser(usage)
parser.set_defaults(verbose=True)
parser.add_option("-O", "--output-document", dest="dest",
help="write document to DEST")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't show debugging information")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Missing URL")
## Get URL
url = args[0]
## Get destination filename
if options.dest:
dest = options.dest
else:
url_components = urlparse.urlsplit(url)
dest = os.path.basename(url_components.path).strip()
if dest == "":
dest = DEFAULT_DEST
## Download URL
if options.verbose:
print "Downloading %s to %s..."%(url, dest)
urllib.urlretrieve(url, dest)
if options.verbose:
print "Download was successful."
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a wget replacement for platforms that don't have wget
svn:r13313<commit_after>
|
#!/usr/bin/python
###
### Simple HTTP download utility for platforms without wget
###
### Copyright 2008 Steven J. Murdoch <http://www.cl.cam.ac.uk/users/sjm217/>
### See LICENSE for licensing information
###
### $Id$
###
import sys
import os
import urllib
import urlparse
from optparse import OptionParser
## Destination filename when no sensible default can be guessed
DEFAULT_DEST = "index.html"
## Create a URL opener which throws an exception on error
class DebugURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
_ = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
## Set this as the default URL opener
urllib._urlopener = DebugURLopener()
def main():
## Parse command line
usage = "Usage: %prog [options] URL\n\nDownload URL to file."
parser = OptionParser(usage)
parser.set_defaults(verbose=True)
parser.add_option("-O", "--output-document", dest="dest",
help="write document to DEST")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't show debugging information")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Missing URL")
## Get URL
url = args[0]
## Get destination filename
if options.dest:
dest = options.dest
else:
url_components = urlparse.urlsplit(url)
dest = os.path.basename(url_components.path).strip()
if dest == "":
dest = DEFAULT_DEST
## Download URL
if options.verbose:
print "Downloading %s to %s..."%(url, dest)
urllib.urlretrieve(url, dest)
if options.verbose:
print "Download was successful."
if __name__ == "__main__":
main()
|
Add a wget replacement for platforms that don't have wget
svn:r13313#!/usr/bin/python
###
### Simple HTTP download utility for platforms without wget
###
### Copyright 2008 Steven J. Murdoch <http://www.cl.cam.ac.uk/users/sjm217/>
### See LICENSE for licensing information
###
### $Id$
###
import sys
import os
import urllib
import urlparse
from optparse import OptionParser
## Destination filename when no sensible default can be guessed
DEFAULT_DEST = "index.html"
## Create a URL opener which throws an exception on error
class DebugURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
_ = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
## Set this as the default URL opener
urllib._urlopener = DebugURLopener()
def main():
## Parse command line
usage = "Usage: %prog [options] URL\n\nDownload URL to file."
parser = OptionParser(usage)
parser.set_defaults(verbose=True)
parser.add_option("-O", "--output-document", dest="dest",
help="write document to DEST")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't show debugging information")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Missing URL")
## Get URL
url = args[0]
## Get destination filename
if options.dest:
dest = options.dest
else:
url_components = urlparse.urlsplit(url)
dest = os.path.basename(url_components.path).strip()
if dest == "":
dest = DEFAULT_DEST
## Download URL
if options.verbose:
print "Downloading %s to %s..."%(url, dest)
urllib.urlretrieve(url, dest)
if options.verbose:
print "Download was successful."
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a wget replacement for platforms that don't have wget
svn:r13313<commit_after>#!/usr/bin/python
###
### Simple HTTP download utility for platforms without wget
###
### Copyright 2008 Steven J. Murdoch <http://www.cl.cam.ac.uk/users/sjm217/>
### See LICENSE for licensing information
###
### $Id$
###
import sys
import os
import urllib
import urlparse
from optparse import OptionParser
## Destination filename when no sensible default can be guessed
DEFAULT_DEST = "index.html"
## Create a URL opener which throws an exception on error
class DebugURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
_ = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
## Set this as the default URL opener
urllib._urlopener = DebugURLopener()
def main():
## Parse command line
usage = "Usage: %prog [options] URL\n\nDownload URL to file."
parser = OptionParser(usage)
parser.set_defaults(verbose=True)
parser.add_option("-O", "--output-document", dest="dest",
help="write document to DEST")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="don't show debugging information")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Missing URL")
## Get URL
url = args[0]
## Get destination filename
if options.dest:
dest = options.dest
else:
url_components = urlparse.urlsplit(url)
dest = os.path.basename(url_components.path).strip()
if dest == "":
dest = DEFAULT_DEST
## Download URL
if options.verbose:
print "Downloading %s to %s..."%(url, dest)
urllib.urlretrieve(url, dest)
if options.verbose:
print "Download was successful."
if __name__ == "__main__":
main()
|
|
1c5bad48cf058022a14756615d0faa5833a63501
|
src/encoded/tests/data/inserts/test_types_pipeline.py
|
src/encoded/tests/data/inserts/test_types_pipeline.py
|
import pytest
def test_analysis_step_version_name_calcprop(testapp, analysis_step_version):
assert analysis_step_version['minor_version'] == 0
assert analysis_step_version['name'] == 'fastqc-step-v-1-0'
assert analysis_step_version['@id'] == '/analysis-step-versions/fastqc-step-v-1-0/'
|
Add test for calcprops in pipeline related objects
|
Add test for calcprops in pipeline related objects
|
Python
|
mit
|
ENCODE-DCC/encoded,ENCODE-DCC/encoded,ENCODE-DCC/encoded,ENCODE-DCC/encoded
|
Add test for calcprops in pipeline related objects
|
import pytest
def test_analysis_step_version_name_calcprop(testapp, analysis_step_version):
assert analysis_step_version['minor_version'] == 0
assert analysis_step_version['name'] == 'fastqc-step-v-1-0'
assert analysis_step_version['@id'] == '/analysis-step-versions/fastqc-step-v-1-0/'
|
<commit_before><commit_msg>Add test for calcprops in pipeline related objects<commit_after>
|
import pytest
def test_analysis_step_version_name_calcprop(testapp, analysis_step_version):
assert analysis_step_version['minor_version'] == 0
assert analysis_step_version['name'] == 'fastqc-step-v-1-0'
assert analysis_step_version['@id'] == '/analysis-step-versions/fastqc-step-v-1-0/'
|
Add test for calcprops in pipeline related objectsimport pytest
def test_analysis_step_version_name_calcprop(testapp, analysis_step_version):
assert analysis_step_version['minor_version'] == 0
assert analysis_step_version['name'] == 'fastqc-step-v-1-0'
assert analysis_step_version['@id'] == '/analysis-step-versions/fastqc-step-v-1-0/'
|
<commit_before><commit_msg>Add test for calcprops in pipeline related objects<commit_after>import pytest
def test_analysis_step_version_name_calcprop(testapp, analysis_step_version):
assert analysis_step_version['minor_version'] == 0
assert analysis_step_version['name'] == 'fastqc-step-v-1-0'
assert analysis_step_version['@id'] == '/analysis-step-versions/fastqc-step-v-1-0/'
|
|
48c9860f506bf08fe733ae2033e0ecbc1472d58e
|
tests/blueprints/user/test_views_current_user.py
|
tests/blueprints/user/test_views_current_user.py
|
"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from tests.base import AbstractAppTestCase
class CurrentUserTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
def test_when_logged_in(self):
user = self.create_user('McFly')
self.create_session_token(user.id)
response = self.send_request(user=user)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(self):
response = self.send_request()
assert response.status_code == 404
assert response.mimetype == 'text/html'
# helpers
def send_request(self, *, user=None):
url = '/users/me'
with self.client(user=user) as client:
return client.get(url)
|
Test response code of current user's (HTML) page
|
Test response code of current user's (HTML) page
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
|
Test response code of current user's (HTML) page
|
"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from tests.base import AbstractAppTestCase
class CurrentUserTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
def test_when_logged_in(self):
user = self.create_user('McFly')
self.create_session_token(user.id)
response = self.send_request(user=user)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(self):
response = self.send_request()
assert response.status_code == 404
assert response.mimetype == 'text/html'
# helpers
def send_request(self, *, user=None):
url = '/users/me'
with self.client(user=user) as client:
return client.get(url)
|
<commit_before><commit_msg>Test response code of current user's (HTML) page<commit_after>
|
"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from tests.base import AbstractAppTestCase
class CurrentUserTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
def test_when_logged_in(self):
user = self.create_user('McFly')
self.create_session_token(user.id)
response = self.send_request(user=user)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(self):
response = self.send_request()
assert response.status_code == 404
assert response.mimetype == 'text/html'
# helpers
def send_request(self, *, user=None):
url = '/users/me'
with self.client(user=user) as client:
return client.get(url)
|
Test response code of current user's (HTML) page"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from tests.base import AbstractAppTestCase
class CurrentUserTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
def test_when_logged_in(self):
user = self.create_user('McFly')
self.create_session_token(user.id)
response = self.send_request(user=user)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(self):
response = self.send_request()
assert response.status_code == 404
assert response.mimetype == 'text/html'
# helpers
def send_request(self, *, user=None):
url = '/users/me'
with self.client(user=user) as client:
return client.get(url)
|
<commit_before><commit_msg>Test response code of current user's (HTML) page<commit_after>"""
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from tests.base import AbstractAppTestCase
class CurrentUserTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.create_brand_and_party()
def test_when_logged_in(self):
user = self.create_user('McFly')
self.create_session_token(user.id)
response = self.send_request(user=user)
assert response.status_code == 200
assert response.mimetype == 'text/html'
def test_when_not_logged_in(self):
response = self.send_request()
assert response.status_code == 404
assert response.mimetype == 'text/html'
# helpers
def send_request(self, *, user=None):
url = '/users/me'
with self.client(user=user) as client:
return client.get(url)
|
|
1b8085059f3c0983e3696e118c718ec44904097c
|
backend/breach/forms.py
|
backend/breach/forms.py
|
from django.forms import ModelForm
from breach.models import Target
class TargetForm(ModelForm):
class Meta:
model = Target
fields = (
'name',
'endpoint',
'prefix',
'alphabet',
'secretlength',
'alignmentalphabet',
'recordscardinality',
'method'
)
|
Add form validation for target
|
Add form validation for target
|
Python
|
mit
|
dimriou/rupture,dimriou/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,esarafianou/rupture,dimkarakostas/rupture,dimkarakostas/rupture
|
Add form validation for target
|
from django.forms import ModelForm
from breach.models import Target
class TargetForm(ModelForm):
class Meta:
model = Target
fields = (
'name',
'endpoint',
'prefix',
'alphabet',
'secretlength',
'alignmentalphabet',
'recordscardinality',
'method'
)
|
<commit_before><commit_msg>Add form validation for target<commit_after>
|
from django.forms import ModelForm
from breach.models import Target
class TargetForm(ModelForm):
class Meta:
model = Target
fields = (
'name',
'endpoint',
'prefix',
'alphabet',
'secretlength',
'alignmentalphabet',
'recordscardinality',
'method'
)
|
Add form validation for targetfrom django.forms import ModelForm
from breach.models import Target
class TargetForm(ModelForm):
class Meta:
model = Target
fields = (
'name',
'endpoint',
'prefix',
'alphabet',
'secretlength',
'alignmentalphabet',
'recordscardinality',
'method'
)
|
<commit_before><commit_msg>Add form validation for target<commit_after>from django.forms import ModelForm
from breach.models import Target
class TargetForm(ModelForm):
class Meta:
model = Target
fields = (
'name',
'endpoint',
'prefix',
'alphabet',
'secretlength',
'alignmentalphabet',
'recordscardinality',
'method'
)
|
|
a9cf7e78edbe4623115b2be43399fb65c4f5bc7e
|
data/migrations/v0.9.0-prune-toi.py
|
data/migrations/v0.9.0-prune-toi.py
|
from redis import StrictRedis
from tilequeue.cache import RedisCacheIndex
from tilequeue.config import make_config_from_argparse
from tilequeue.tile import coord_unmarshall_int
cfg = make_config_from_argparse('/etc/tilequeue/config.yaml')
redis_client = StrictRedis(cfg.redis_host)
cache_index = RedisCacheIndex(redis_client)
tiles_of_interest = cache_index.fetch_tiles_of_interest()
coord_ints_to_remove = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom > 16:
coord_ints_to_remove.add(coord_int)
buf = []
batch_size = 100
for coord_int in coord_ints_to_remove:
buf.append(coord_int)
if len(buf) == batch_size:
redis_client.srem(cache_index.cache_set_key, *buf)
del buf[:]
if buf:
redis_client.srem(cache_index.cache_set_key, *buf)
|
Add migration to prune toi
|
Add migration to prune toi
|
Python
|
mit
|
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
|
Add migration to prune toi
|
from redis import StrictRedis
from tilequeue.cache import RedisCacheIndex
from tilequeue.config import make_config_from_argparse
from tilequeue.tile import coord_unmarshall_int
cfg = make_config_from_argparse('/etc/tilequeue/config.yaml')
redis_client = StrictRedis(cfg.redis_host)
cache_index = RedisCacheIndex(redis_client)
tiles_of_interest = cache_index.fetch_tiles_of_interest()
coord_ints_to_remove = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom > 16:
coord_ints_to_remove.add(coord_int)
buf = []
batch_size = 100
for coord_int in coord_ints_to_remove:
buf.append(coord_int)
if len(buf) == batch_size:
redis_client.srem(cache_index.cache_set_key, *buf)
del buf[:]
if buf:
redis_client.srem(cache_index.cache_set_key, *buf)
|
<commit_before><commit_msg>Add migration to prune toi<commit_after>
|
from redis import StrictRedis
from tilequeue.cache import RedisCacheIndex
from tilequeue.config import make_config_from_argparse
from tilequeue.tile import coord_unmarshall_int
cfg = make_config_from_argparse('/etc/tilequeue/config.yaml')
redis_client = StrictRedis(cfg.redis_host)
cache_index = RedisCacheIndex(redis_client)
tiles_of_interest = cache_index.fetch_tiles_of_interest()
coord_ints_to_remove = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom > 16:
coord_ints_to_remove.add(coord_int)
buf = []
batch_size = 100
for coord_int in coord_ints_to_remove:
buf.append(coord_int)
if len(buf) == batch_size:
redis_client.srem(cache_index.cache_set_key, *buf)
del buf[:]
if buf:
redis_client.srem(cache_index.cache_set_key, *buf)
|
Add migration to prune toifrom redis import StrictRedis
from tilequeue.cache import RedisCacheIndex
from tilequeue.config import make_config_from_argparse
from tilequeue.tile import coord_unmarshall_int
cfg = make_config_from_argparse('/etc/tilequeue/config.yaml')
redis_client = StrictRedis(cfg.redis_host)
cache_index = RedisCacheIndex(redis_client)
tiles_of_interest = cache_index.fetch_tiles_of_interest()
coord_ints_to_remove = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom > 16:
coord_ints_to_remove.add(coord_int)
buf = []
batch_size = 100
for coord_int in coord_ints_to_remove:
buf.append(coord_int)
if len(buf) == batch_size:
redis_client.srem(cache_index.cache_set_key, *buf)
del buf[:]
if buf:
redis_client.srem(cache_index.cache_set_key, *buf)
|
<commit_before><commit_msg>Add migration to prune toi<commit_after>from redis import StrictRedis
from tilequeue.cache import RedisCacheIndex
from tilequeue.config import make_config_from_argparse
from tilequeue.tile import coord_unmarshall_int
cfg = make_config_from_argparse('/etc/tilequeue/config.yaml')
redis_client = StrictRedis(cfg.redis_host)
cache_index = RedisCacheIndex(redis_client)
tiles_of_interest = cache_index.fetch_tiles_of_interest()
coord_ints_to_remove = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom > 16:
coord_ints_to_remove.add(coord_int)
buf = []
batch_size = 100
for coord_int in coord_ints_to_remove:
buf.append(coord_int)
if len(buf) == batch_size:
redis_client.srem(cache_index.cache_set_key, *buf)
del buf[:]
if buf:
redis_client.srem(cache_index.cache_set_key, *buf)
|
|
b22d2e023affc9621235eb447d21c784405577d7
|
Numerics/cmake/explore_python_config.py
|
Numerics/cmake/explore_python_config.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindLibPython.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("version:%s" % sys.version[:3])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
Add missing python script to explore python config and set vars in cmake
|
Add missing python script to explore python config and set vars in cmake
|
Python
|
apache-2.0
|
siconos/siconos-deb,siconos/siconos-deb,radarsat1/siconos,radarsat1/siconos,fperignon/siconos,siconos/siconos-deb,bremond/siconos,bremond/siconos,fperignon/siconos,fperignon/siconos,siconos/siconos-deb,bremond/siconos,siconos/siconos-deb,radarsat1/siconos,fperignon/siconos,siconos/siconos,bremond/siconos,radarsat1/siconos,radarsat1/siconos,siconos/siconos,siconos/siconos,siconos/siconos-deb,siconos/siconos,fperignon/siconos,bremond/siconos
|
Add missing python script to explore python config and set vars in cmake
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindLibPython.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("version:%s" % sys.version[:3])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
<commit_before><commit_msg>Add missing python script to explore python config and set vars in cmake<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindLibPython.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("version:%s" % sys.version[:3])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
Add missing python script to explore python config and set vars in cmake# -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindLibPython.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("version:%s" % sys.version[:3])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
<commit_before><commit_msg>Add missing python script to explore python config and set vars in cmake<commit_after># -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindLibPython.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("version:%s" % sys.version[:3])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
|
ea0675713e3fee74d7cf3ae8345ff1b1d2745c7b
|
Project5/two_dim_solver.py
|
Project5/two_dim_solver.py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
sns.set()
fig = plt.figure()
N = 31
h = 1.0/(N-1)
t = 1
t_steps = 1000000
dt = 0.000001
alpha = dt / h**2
u = np.zeros(shape=(N,N))
n_figs = 200
fig_every = t_steps / n_figs
u[:,0] = u[0,:] = 1
u[:,-1] = u[-1,:] = 0
u_new = np.zeros(u.shape, type(u[0,0]))
ims = []
for k in range(t_steps):
u_new[1:-1,1:-1] = u[1:-1,1:-1] + alpha*(u[2:,1:-1]
- 4*u[1:-1,1:-1]
+ u[:-2,1:-1]
+ u[1:-1,2:]
+ u[1:-1,:-2])
if k % fig_every == 0:
im = plt.imshow(u_new[1:-1,1:-1], cmap='hot', animated=True, interpolation='bicubic')
text = plt.text(0.1, 0.9, str(k), bbox={'facecolor': 'white',
'alpha': 0.5,
'pad': 5})
ims.append([im,text])
temp = u_new
u_new = u
u = temp
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
plt.show()
|
Add two dimensional forward python solver
|
Add two dimensional forward python solver
|
Python
|
mit
|
Caronthir/FYS3150,Caronthir/FYS3150,Caronthir/FYS3150
|
Add two dimensional forward python solver
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
sns.set()
fig = plt.figure()
N = 31
h = 1.0/(N-1)
t = 1
t_steps = 1000000
dt = 0.000001
alpha = dt / h**2
u = np.zeros(shape=(N,N))
n_figs = 200
fig_every = t_steps / n_figs
u[:,0] = u[0,:] = 1
u[:,-1] = u[-1,:] = 0
u_new = np.zeros(u.shape, type(u[0,0]))
ims = []
for k in range(t_steps):
u_new[1:-1,1:-1] = u[1:-1,1:-1] + alpha*(u[2:,1:-1]
- 4*u[1:-1,1:-1]
+ u[:-2,1:-1]
+ u[1:-1,2:]
+ u[1:-1,:-2])
if k % fig_every == 0:
im = plt.imshow(u_new[1:-1,1:-1], cmap='hot', animated=True, interpolation='bicubic')
text = plt.text(0.1, 0.9, str(k), bbox={'facecolor': 'white',
'alpha': 0.5,
'pad': 5})
ims.append([im,text])
temp = u_new
u_new = u
u = temp
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
plt.show()
|
<commit_before><commit_msg>Add two dimensional forward python solver<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
sns.set()
fig = plt.figure()
N = 31
h = 1.0/(N-1)
t = 1
t_steps = 1000000
dt = 0.000001
alpha = dt / h**2
u = np.zeros(shape=(N,N))
n_figs = 200
fig_every = t_steps / n_figs
u[:,0] = u[0,:] = 1
u[:,-1] = u[-1,:] = 0
u_new = np.zeros(u.shape, type(u[0,0]))
ims = []
for k in range(t_steps):
u_new[1:-1,1:-1] = u[1:-1,1:-1] + alpha*(u[2:,1:-1]
- 4*u[1:-1,1:-1]
+ u[:-2,1:-1]
+ u[1:-1,2:]
+ u[1:-1,:-2])
if k % fig_every == 0:
im = plt.imshow(u_new[1:-1,1:-1], cmap='hot', animated=True, interpolation='bicubic')
text = plt.text(0.1, 0.9, str(k), bbox={'facecolor': 'white',
'alpha': 0.5,
'pad': 5})
ims.append([im,text])
temp = u_new
u_new = u
u = temp
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
plt.show()
|
Add two dimensional forward python solverimport numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
sns.set()
fig = plt.figure()
N = 31
h = 1.0/(N-1)
t = 1
t_steps = 1000000
dt = 0.000001
alpha = dt / h**2
u = np.zeros(shape=(N,N))
n_figs = 200
fig_every = t_steps / n_figs
u[:,0] = u[0,:] = 1
u[:,-1] = u[-1,:] = 0
u_new = np.zeros(u.shape, type(u[0,0]))
ims = []
for k in range(t_steps):
u_new[1:-1,1:-1] = u[1:-1,1:-1] + alpha*(u[2:,1:-1]
- 4*u[1:-1,1:-1]
+ u[:-2,1:-1]
+ u[1:-1,2:]
+ u[1:-1,:-2])
if k % fig_every == 0:
im = plt.imshow(u_new[1:-1,1:-1], cmap='hot', animated=True, interpolation='bicubic')
text = plt.text(0.1, 0.9, str(k), bbox={'facecolor': 'white',
'alpha': 0.5,
'pad': 5})
ims.append([im,text])
temp = u_new
u_new = u
u = temp
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
plt.show()
|
<commit_before><commit_msg>Add two dimensional forward python solver<commit_after>import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
sns.set()
fig = plt.figure()
N = 31
h = 1.0/(N-1)
t = 1
t_steps = 1000000
dt = 0.000001
alpha = dt / h**2
u = np.zeros(shape=(N,N))
n_figs = 200
fig_every = t_steps / n_figs
u[:,0] = u[0,:] = 1
u[:,-1] = u[-1,:] = 0
u_new = np.zeros(u.shape, type(u[0,0]))
ims = []
for k in range(t_steps):
u_new[1:-1,1:-1] = u[1:-1,1:-1] + alpha*(u[2:,1:-1]
- 4*u[1:-1,1:-1]
+ u[:-2,1:-1]
+ u[1:-1,2:]
+ u[1:-1,:-2])
if k % fig_every == 0:
im = plt.imshow(u_new[1:-1,1:-1], cmap='hot', animated=True, interpolation='bicubic')
text = plt.text(0.1, 0.9, str(k), bbox={'facecolor': 'white',
'alpha': 0.5,
'pad': 5})
ims.append([im,text])
temp = u_new
u_new = u
u = temp
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
plt.show()
|
|
12d239d62c293cdb1a3fa1a69df06bf9c8e65366
|
grip/github_renderer.py
|
grip/github_renderer.py
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None, username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm', 'context': context}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
Remove duplicate 'context': context in GitHub renderer.
|
Remove duplicate 'context': context in GitHub renderer.
|
Python
|
mit
|
jbarreras/grip,ssundarraj/grip,joeyespo/grip,mgoddard-pivotal/grip,mgoddard-pivotal/grip,jbarreras/grip,joeyespo/grip,ssundarraj/grip
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None, username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm', 'context': context}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
Remove duplicate 'context': context in GitHub renderer.
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
<commit_before>from flask import abort, json
import requests
def render_content(text, gfm=False, context=None, username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm', 'context': context}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
<commit_msg>Remove duplicate 'context': context in GitHub renderer.<commit_after>
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None, username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm', 'context': context}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
Remove duplicate 'context': context in GitHub renderer.from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
<commit_before>from flask import abort, json
import requests
def render_content(text, gfm=False, context=None, username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm', 'context': context}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
<commit_msg>Remove duplicate 'context': context in GitHub renderer.<commit_after>from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
84f7a4c7fcde6875fe5b414f4340cc15fc4e9740
|
tests/test_get_money.py
|
tests/test_get_money.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import, unicode_literals
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestGetMoney():
def test_get_money(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'sign'])
assert result.exit_code == 0
assert result.output.strip() == (
'You have completed the mission today.')
|
Add tests for `v2ex sign`
|
Add tests for `v2ex sign`
|
Python
|
mit
|
lord63/v2ex_daily_mission,lord63/v2ex_daily_mission
|
Add tests for `v2ex sign`
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import, unicode_literals
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestGetMoney():
def test_get_money(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'sign'])
assert result.exit_code == 0
assert result.output.strip() == (
'You have completed the mission today.')
|
<commit_before><commit_msg>Add tests for `v2ex sign`<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import, unicode_literals
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestGetMoney():
def test_get_money(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'sign'])
assert result.exit_code == 0
assert result.output.strip() == (
'You have completed the mission today.')
|
Add tests for `v2ex sign`#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import, unicode_literals
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestGetMoney():
def test_get_money(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'sign'])
assert result.exit_code == 0
assert result.output.strip() == (
'You have completed the mission today.')
|
<commit_before><commit_msg>Add tests for `v2ex sign`<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `v2ex last` function
"""
from __future__ import absolute_import, unicode_literals
import pytest
from v2ex_daily_mission.cli import cli
@pytest.mark.usefixtures('mock_api')
class TestGetMoney():
def test_get_money(self, runner):
result = runner.invoke(cli, ['--config', './tests/v2ex_config.json',
'sign'])
assert result.exit_code == 0
assert result.output.strip() == (
'You have completed the mission today.')
|
|
7df2b5f7fa48955a30fa30aaf25cb2194929bb9d
|
home/migrations/0001_rename_survey_app_to_paiji2_survey.py
|
home/migrations/0001_rename_survey_app_to_paiji2_survey.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('survey_poll', 'paiji2_survey_poll')
db.rename_table('survey_vote', 'paiji2_survey_vote')
db.rename_table('survey_choice', 'paiji2_survey_choice')
def backwards(self, orm):
db.rename_table('paiji2_survey_poll', 'survey_poll')
db.rename_table('paiji2_survey_vote', 'survey_vote')
db.rename_table('paiji2_survey_choice', 'survey_choice')
models = {
}
complete_apps = ['home']
|
Add migration for survey application
|
Add migration for survey application
|
Python
|
agpl-3.0
|
rezometz/paiji2,rezometz/paiji2,rezometz/paiji2
|
Add migration for survey application
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('survey_poll', 'paiji2_survey_poll')
db.rename_table('survey_vote', 'paiji2_survey_vote')
db.rename_table('survey_choice', 'paiji2_survey_choice')
def backwards(self, orm):
db.rename_table('paiji2_survey_poll', 'survey_poll')
db.rename_table('paiji2_survey_vote', 'survey_vote')
db.rename_table('paiji2_survey_choice', 'survey_choice')
models = {
}
complete_apps = ['home']
|
<commit_before><commit_msg>Add migration for survey application<commit_after>
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('survey_poll', 'paiji2_survey_poll')
db.rename_table('survey_vote', 'paiji2_survey_vote')
db.rename_table('survey_choice', 'paiji2_survey_choice')
def backwards(self, orm):
db.rename_table('paiji2_survey_poll', 'survey_poll')
db.rename_table('paiji2_survey_vote', 'survey_vote')
db.rename_table('paiji2_survey_choice', 'survey_choice')
models = {
}
complete_apps = ['home']
|
Add migration for survey application# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('survey_poll', 'paiji2_survey_poll')
db.rename_table('survey_vote', 'paiji2_survey_vote')
db.rename_table('survey_choice', 'paiji2_survey_choice')
def backwards(self, orm):
db.rename_table('paiji2_survey_poll', 'survey_poll')
db.rename_table('paiji2_survey_vote', 'survey_vote')
db.rename_table('paiji2_survey_choice', 'survey_choice')
models = {
}
complete_apps = ['home']
|
<commit_before><commit_msg>Add migration for survey application<commit_after># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('survey_poll', 'paiji2_survey_poll')
db.rename_table('survey_vote', 'paiji2_survey_vote')
db.rename_table('survey_choice', 'paiji2_survey_choice')
def backwards(self, orm):
db.rename_table('paiji2_survey_poll', 'survey_poll')
db.rename_table('paiji2_survey_vote', 'survey_vote')
db.rename_table('paiji2_survey_choice', 'survey_choice')
models = {
}
complete_apps = ['home']
|
|
209699334645a9e5d497ff9adc112e31b798c0b1
|
examples/freq_meter.py
|
examples/freq_meter.py
|
#!/usr/bin/env python3
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
|
Add a real-time frequency meter example
|
Add a real-time frequency meter example
|
Python
|
mit
|
sonusz/PhasorToolBox
|
Add a real-time frequency meter example
|
#!/usr/bin/env python3
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
|
<commit_before><commit_msg>Add a real-time frequency meter example<commit_after>
|
#!/usr/bin/env python3
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
|
Add a real-time frequency meter example#!/usr/bin/env python3
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
|
<commit_before><commit_msg>Add a real-time frequency meter example<commit_after>#!/usr/bin/env python3
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
|
|
adc8a93c46f3efe70b6fafb750470245c1838dc4
|
mozillians/users/migrations/0013_auto_20170302_0230.py
|
mozillians/users/migrations/0013_auto_20170302_0230.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_countries(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
# cities_light data models
Country = apps.get_model('cities_light', 'Country')
for profile in UserProfile.objects.filter(geo_country__isnull=False):
# Query countries based on `name` and `alternate_names`
country_query = (Q(name=profile.geo_country.name) |
Q(alternate_names__icontains=profile.geo_country.name))
cities_countries = Country.objects.filter(country_query)
country = None
if cities_countries.exists():
country = cities_countries[0]
kwargs = {
'country': country,
'privacy_country': profile.privacy_geo_country
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def migrate_cities_regions(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
City = apps.get_model('cities_light', 'City')
for profile in UserProfile.objects.filter(country__isnull=False, geo_city__isnull=False):
# Query cities based on `name`, `alternate_names` and `country`
city_query = ((Q(name=profile.geo_city.name) |
Q(alternate_names__icontains=profile.geo_city.name)) &
Q(country=profile.country))
city = None
region = None
cities = City.objects.filter(city_query)
if cities.exists():
city = cities[0]
region = city.region
kwargs = {
'region': region,
'city': city,
'privacy_region': profile.privacy_geo_region,
'privacy_city': profile.privacy_geo_city
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20170220_0715'),
]
operations = [
migrations.RunPython(migrate_countries, backwards),
migrations.RunPython(migrate_cities_regions, backwards),
]
|
Add datamigration from mozillians.geo data to cities_light.
|
Add datamigration from mozillians.geo data to cities_light.
|
Python
|
bsd-3-clause
|
akatsoulas/mozillians,fxa90id/mozillians,mozilla/mozillians,johngian/mozillians,johngian/mozillians,akatsoulas/mozillians,fxa90id/mozillians,fxa90id/mozillians,fxa90id/mozillians,akatsoulas/mozillians,akatsoulas/mozillians,mozilla/mozillians,johngian/mozillians,mozilla/mozillians,mozilla/mozillians,johngian/mozillians
|
Add datamigration from mozillians.geo data to cities_light.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_countries(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
# cities_light data models
Country = apps.get_model('cities_light', 'Country')
for profile in UserProfile.objects.filter(geo_country__isnull=False):
# Query countries based on `name` and `alternate_names`
country_query = (Q(name=profile.geo_country.name) |
Q(alternate_names__icontains=profile.geo_country.name))
cities_countries = Country.objects.filter(country_query)
country = None
if cities_countries.exists():
country = cities_countries[0]
kwargs = {
'country': country,
'privacy_country': profile.privacy_geo_country
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def migrate_cities_regions(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
City = apps.get_model('cities_light', 'City')
for profile in UserProfile.objects.filter(country__isnull=False, geo_city__isnull=False):
# Query cities based on `name`, `alternate_names` and `country`
city_query = ((Q(name=profile.geo_city.name) |
Q(alternate_names__icontains=profile.geo_city.name)) &
Q(country=profile.country))
city = None
region = None
cities = City.objects.filter(city_query)
if cities.exists():
city = cities[0]
region = city.region
kwargs = {
'region': region,
'city': city,
'privacy_region': profile.privacy_geo_region,
'privacy_city': profile.privacy_geo_city
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20170220_0715'),
]
operations = [
migrations.RunPython(migrate_countries, backwards),
migrations.RunPython(migrate_cities_regions, backwards),
]
|
<commit_before><commit_msg>Add datamigration from mozillians.geo data to cities_light.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_countries(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
# cities_light data models
Country = apps.get_model('cities_light', 'Country')
for profile in UserProfile.objects.filter(geo_country__isnull=False):
# Query countries based on `name` and `alternate_names`
country_query = (Q(name=profile.geo_country.name) |
Q(alternate_names__icontains=profile.geo_country.name))
cities_countries = Country.objects.filter(country_query)
country = None
if cities_countries.exists():
country = cities_countries[0]
kwargs = {
'country': country,
'privacy_country': profile.privacy_geo_country
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def migrate_cities_regions(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
City = apps.get_model('cities_light', 'City')
for profile in UserProfile.objects.filter(country__isnull=False, geo_city__isnull=False):
# Query cities based on `name`, `alternate_names` and `country`
city_query = ((Q(name=profile.geo_city.name) |
Q(alternate_names__icontains=profile.geo_city.name)) &
Q(country=profile.country))
city = None
region = None
cities = City.objects.filter(city_query)
if cities.exists():
city = cities[0]
region = city.region
kwargs = {
'region': region,
'city': city,
'privacy_region': profile.privacy_geo_region,
'privacy_city': profile.privacy_geo_city
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20170220_0715'),
]
operations = [
migrations.RunPython(migrate_countries, backwards),
migrations.RunPython(migrate_cities_regions, backwards),
]
|
Add datamigration from mozillians.geo data to cities_light.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_countries(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
# cities_light data models
Country = apps.get_model('cities_light', 'Country')
for profile in UserProfile.objects.filter(geo_country__isnull=False):
# Query countries based on `name` and `alternate_names`
country_query = (Q(name=profile.geo_country.name) |
Q(alternate_names__icontains=profile.geo_country.name))
cities_countries = Country.objects.filter(country_query)
country = None
if cities_countries.exists():
country = cities_countries[0]
kwargs = {
'country': country,
'privacy_country': profile.privacy_geo_country
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def migrate_cities_regions(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
City = apps.get_model('cities_light', 'City')
for profile in UserProfile.objects.filter(country__isnull=False, geo_city__isnull=False):
# Query cities based on `name`, `alternate_names` and `country`
city_query = ((Q(name=profile.geo_city.name) |
Q(alternate_names__icontains=profile.geo_city.name)) &
Q(country=profile.country))
city = None
region = None
cities = City.objects.filter(city_query)
if cities.exists():
city = cities[0]
region = city.region
kwargs = {
'region': region,
'city': city,
'privacy_region': profile.privacy_geo_region,
'privacy_city': profile.privacy_geo_city
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20170220_0715'),
]
operations = [
migrations.RunPython(migrate_countries, backwards),
migrations.RunPython(migrate_cities_regions, backwards),
]
|
<commit_before><commit_msg>Add datamigration from mozillians.geo data to cities_light.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_countries(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
# cities_light data models
Country = apps.get_model('cities_light', 'Country')
for profile in UserProfile.objects.filter(geo_country__isnull=False):
# Query countries based on `name` and `alternate_names`
country_query = (Q(name=profile.geo_country.name) |
Q(alternate_names__icontains=profile.geo_country.name))
cities_countries = Country.objects.filter(country_query)
country = None
if cities_countries.exists():
country = cities_countries[0]
kwargs = {
'country': country,
'privacy_country': profile.privacy_geo_country
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def migrate_cities_regions(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
City = apps.get_model('cities_light', 'City')
for profile in UserProfile.objects.filter(country__isnull=False, geo_city__isnull=False):
# Query cities based on `name`, `alternate_names` and `country`
city_query = ((Q(name=profile.geo_city.name) |
Q(alternate_names__icontains=profile.geo_city.name)) &
Q(country=profile.country))
city = None
region = None
cities = City.objects.filter(city_query)
if cities.exists():
city = cities[0]
region = city.region
kwargs = {
'region': region,
'city': city,
'privacy_region': profile.privacy_geo_region,
'privacy_city': profile.privacy_geo_city
}
UserProfile.objects.filter(pk=profile.id).update(**kwargs)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20170220_0715'),
]
operations = [
migrations.RunPython(migrate_countries, backwards),
migrations.RunPython(migrate_cities_regions, backwards),
]
|
|
03813bd7658ee8657e9b1253d280b20fb87e3ab7
|
scripts/add-bw-locations.py
|
scripts/add-bw-locations.py
|
#!/usr/bin/env python2
from codecs import open
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Language
from pokedex.db.tables import Location, LocationGameIndex
session = connect()
en = session.query(Language).filter_by(identifier='en').one() # English
ja = session.query(Language).filter_by(identifier='ja').one() # Japanese
with open("bw-location-names-en", "r", "utf-8") as f:
en_names = [line.rstrip("\n") for line in f]
with open("bw-location-names-kanji", "r", "utf-8") as f:
ja_names = [line.rstrip("\n") for line in f]
locations = {}
for i, name in enumerate(zip(en_names, ja_names)):
if i == 0:
continue
en_name, ja_name = name
if not en_name:
continue
if name in locations:
loc = locations[name]
else:
loc = Location()
if en_name:
loc.name_map[en] = en_name
if ja_name:
loc.name_map[ja] = ja_name
loc.region_id = 5 # Unova
loc.identifier = identifier_from_name(en_name)
locations[name] = loc
lgi = LocationGameIndex()
lgi.location = loc
lgi.generation_id = 5 # Gen 5
lgi.game_index = i
session.add(loc)
session.add(lgi)
session.commit()
|
Add script for adding B/W locations
|
Add script for adding B/W locations
in the hope that it will be useful in future generations.
|
Python
|
mit
|
veekun/pokedex,xfix/pokedex,mschex1/pokedex,RK905/pokedex-1,veekun/pokedex,DaMouse404/pokedex
|
Add script for adding B/W locations
in the hope that it will be useful in future generations.
|
#!/usr/bin/env python2
from codecs import open
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Language
from pokedex.db.tables import Location, LocationGameIndex
session = connect()
en = session.query(Language).filter_by(identifier='en').one() # English
ja = session.query(Language).filter_by(identifier='ja').one() # Japanese
with open("bw-location-names-en", "r", "utf-8") as f:
en_names = [line.rstrip("\n") for line in f]
with open("bw-location-names-kanji", "r", "utf-8") as f:
ja_names = [line.rstrip("\n") for line in f]
locations = {}
for i, name in enumerate(zip(en_names, ja_names)):
if i == 0:
continue
en_name, ja_name = name
if not en_name:
continue
if name in locations:
loc = locations[name]
else:
loc = Location()
if en_name:
loc.name_map[en] = en_name
if ja_name:
loc.name_map[ja] = ja_name
loc.region_id = 5 # Unova
loc.identifier = identifier_from_name(en_name)
locations[name] = loc
lgi = LocationGameIndex()
lgi.location = loc
lgi.generation_id = 5 # Gen 5
lgi.game_index = i
session.add(loc)
session.add(lgi)
session.commit()
|
<commit_before><commit_msg>Add script for adding B/W locations
in the hope that it will be useful in future generations.<commit_after>
|
#!/usr/bin/env python2
from codecs import open
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Language
from pokedex.db.tables import Location, LocationGameIndex
session = connect()
en = session.query(Language).filter_by(identifier='en').one() # English
ja = session.query(Language).filter_by(identifier='ja').one() # Japanese
with open("bw-location-names-en", "r", "utf-8") as f:
en_names = [line.rstrip("\n") for line in f]
with open("bw-location-names-kanji", "r", "utf-8") as f:
ja_names = [line.rstrip("\n") for line in f]
locations = {}
for i, name in enumerate(zip(en_names, ja_names)):
if i == 0:
continue
en_name, ja_name = name
if not en_name:
continue
if name in locations:
loc = locations[name]
else:
loc = Location()
if en_name:
loc.name_map[en] = en_name
if ja_name:
loc.name_map[ja] = ja_name
loc.region_id = 5 # Unova
loc.identifier = identifier_from_name(en_name)
locations[name] = loc
lgi = LocationGameIndex()
lgi.location = loc
lgi.generation_id = 5 # Gen 5
lgi.game_index = i
session.add(loc)
session.add(lgi)
session.commit()
|
Add script for adding B/W locations
in the hope that it will be useful in future generations.#!/usr/bin/env python2
from codecs import open
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Language
from pokedex.db.tables import Location, LocationGameIndex
session = connect()
en = session.query(Language).filter_by(identifier='en').one() # English
ja = session.query(Language).filter_by(identifier='ja').one() # Japanese
with open("bw-location-names-en", "r", "utf-8") as f:
en_names = [line.rstrip("\n") for line in f]
with open("bw-location-names-kanji", "r", "utf-8") as f:
ja_names = [line.rstrip("\n") for line in f]
locations = {}
for i, name in enumerate(zip(en_names, ja_names)):
if i == 0:
continue
en_name, ja_name = name
if not en_name:
continue
if name in locations:
loc = locations[name]
else:
loc = Location()
if en_name:
loc.name_map[en] = en_name
if ja_name:
loc.name_map[ja] = ja_name
loc.region_id = 5 # Unova
loc.identifier = identifier_from_name(en_name)
locations[name] = loc
lgi = LocationGameIndex()
lgi.location = loc
lgi.generation_id = 5 # Gen 5
lgi.game_index = i
session.add(loc)
session.add(lgi)
session.commit()
|
<commit_before><commit_msg>Add script for adding B/W locations
in the hope that it will be useful in future generations.<commit_after>#!/usr/bin/env python2
from codecs import open
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Language
from pokedex.db.tables import Location, LocationGameIndex
session = connect()
en = session.query(Language).filter_by(identifier='en').one() # English
ja = session.query(Language).filter_by(identifier='ja').one() # Japanese
with open("bw-location-names-en", "r", "utf-8") as f:
en_names = [line.rstrip("\n") for line in f]
with open("bw-location-names-kanji", "r", "utf-8") as f:
ja_names = [line.rstrip("\n") for line in f]
locations = {}
for i, name in enumerate(zip(en_names, ja_names)):
if i == 0:
continue
en_name, ja_name = name
if not en_name:
continue
if name in locations:
loc = locations[name]
else:
loc = Location()
if en_name:
loc.name_map[en] = en_name
if ja_name:
loc.name_map[ja] = ja_name
loc.region_id = 5 # Unova
loc.identifier = identifier_from_name(en_name)
locations[name] = loc
lgi = LocationGameIndex()
lgi.location = loc
lgi.generation_id = 5 # Gen 5
lgi.game_index = i
session.add(loc)
session.add(lgi)
session.commit()
|
|
2a30afaea9d4cb1d704fd5ec0d78a946770c1c18
|
scripts/download-jamendo.py
|
scripts/download-jamendo.py
|
#!/usr/bin/env python
# Jamendo database dumps can be fetched from: http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz
import xml.etree.cElementTree as ElementTree
import sys, gzip, time, os.path, urllib
class DownloadJamendo:
def __init__(self, destination):
if not os.path.exists(destination):
os.mkdir(destination)
self.destination = destination
def parse(self, dump):
for event, elem in ElementTree.iterparse(dump):
if elem.tag == "artist":
self.proc_artist(elem)
def proc_artist(self, elem):
for artist_e in elem.getchildren():
if artist_e.tag == "Albums":
for album_e in artist_e.getchildren():
self.proc_album(album_e)
def proc_album(self, elem):
for album_e in elem.getchildren():
if album_e.tag == "Tracks":
for track_e in album_e.getchildren():
self.proc_track(track_e)
def proc_track(self, elem):
track_id = None
track_license = None
for track_e in elem.getchildren():
if track_e.tag == "id":
track_id = int(track_e.text)
if track_e.tag == "license":
track_license = track_e.text
if track_id and track_license:
if self.free_license(track_license):
trackurl = "http://api.jamendo.com/get2/stream/track/redirect/?id=%d&streamencoding=ogg2" % track_id
trackfile = os.path.join(self.destination, "%d.ogg" % track_id)
if os.path.exists(trackfile):
print "Already downloaded track %d" % track_id
else:
print "Downloading %s to %s" % (trackurl, trackfile)
urllib.urlretrieve(trackurl, trackfile)
def free_license(self, license):
return ("http://creativecommons.org/licenses/by-sa" in license or "http://creativecommons.org/licenses/by/" in license or "http://artlibre.org/licence.php/lal.html" in license)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: download-jamendo.py <database dump> <destination>"
sys.exit(1)
if sys.argv[1][-2:] == "gz":
dump = gzip.open(sys.argv[1], "r")
else:
dump = open(sys.argv[1], "r")
downloader = DownloadJamendo(sys.argv[2])
downloader.parse(dump)
|
Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)
|
Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)
|
#!/usr/bin/env python
# Jamendo database dumps can be fetched from: http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz
import xml.etree.cElementTree as ElementTree
import sys, gzip, time, os.path, urllib
class DownloadJamendo:
def __init__(self, destination):
if not os.path.exists(destination):
os.mkdir(destination)
self.destination = destination
def parse(self, dump):
for event, elem in ElementTree.iterparse(dump):
if elem.tag == "artist":
self.proc_artist(elem)
def proc_artist(self, elem):
for artist_e in elem.getchildren():
if artist_e.tag == "Albums":
for album_e in artist_e.getchildren():
self.proc_album(album_e)
def proc_album(self, elem):
for album_e in elem.getchildren():
if album_e.tag == "Tracks":
for track_e in album_e.getchildren():
self.proc_track(track_e)
def proc_track(self, elem):
track_id = None
track_license = None
for track_e in elem.getchildren():
if track_e.tag == "id":
track_id = int(track_e.text)
if track_e.tag == "license":
track_license = track_e.text
if track_id and track_license:
if self.free_license(track_license):
trackurl = "http://api.jamendo.com/get2/stream/track/redirect/?id=%d&streamencoding=ogg2" % track_id
trackfile = os.path.join(self.destination, "%d.ogg" % track_id)
if os.path.exists(trackfile):
print "Already downloaded track %d" % track_id
else:
print "Downloading %s to %s" % (trackurl, trackfile)
urllib.urlretrieve(trackurl, trackfile)
def free_license(self, license):
return ("http://creativecommons.org/licenses/by-sa" in license or "http://creativecommons.org/licenses/by/" in license or "http://artlibre.org/licence.php/lal.html" in license)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: download-jamendo.py <database dump> <destination>"
sys.exit(1)
if sys.argv[1][-2:] == "gz":
dump = gzip.open(sys.argv[1], "r")
else:
dump = open(sys.argv[1], "r")
downloader = DownloadJamendo(sys.argv[2])
downloader.parse(dump)
|
<commit_before><commit_msg>Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)<commit_after>
|
#!/usr/bin/env python
# Jamendo database dumps can be fetched from: http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz
import xml.etree.cElementTree as ElementTree
import sys, gzip, time, os.path, urllib
class DownloadJamendo:
def __init__(self, destination):
if not os.path.exists(destination):
os.mkdir(destination)
self.destination = destination
def parse(self, dump):
for event, elem in ElementTree.iterparse(dump):
if elem.tag == "artist":
self.proc_artist(elem)
def proc_artist(self, elem):
for artist_e in elem.getchildren():
if artist_e.tag == "Albums":
for album_e in artist_e.getchildren():
self.proc_album(album_e)
def proc_album(self, elem):
for album_e in elem.getchildren():
if album_e.tag == "Tracks":
for track_e in album_e.getchildren():
self.proc_track(track_e)
def proc_track(self, elem):
track_id = None
track_license = None
for track_e in elem.getchildren():
if track_e.tag == "id":
track_id = int(track_e.text)
if track_e.tag == "license":
track_license = track_e.text
if track_id and track_license:
if self.free_license(track_license):
trackurl = "http://api.jamendo.com/get2/stream/track/redirect/?id=%d&streamencoding=ogg2" % track_id
trackfile = os.path.join(self.destination, "%d.ogg" % track_id)
if os.path.exists(trackfile):
print "Already downloaded track %d" % track_id
else:
print "Downloading %s to %s" % (trackurl, trackfile)
urllib.urlretrieve(trackurl, trackfile)
def free_license(self, license):
return ("http://creativecommons.org/licenses/by-sa" in license or "http://creativecommons.org/licenses/by/" in license or "http://artlibre.org/licence.php/lal.html" in license)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: download-jamendo.py <database dump> <destination>"
sys.exit(1)
if sys.argv[1][-2:] == "gz":
dump = gzip.open(sys.argv[1], "r")
else:
dump = open(sys.argv[1], "r")
downloader = DownloadJamendo(sys.argv[2])
downloader.parse(dump)
|
Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)#!/usr/bin/env python
# Jamendo database dumps can be fetched from: http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz
import xml.etree.cElementTree as ElementTree
import sys, gzip, time, os.path, urllib
class DownloadJamendo:
def __init__(self, destination):
if not os.path.exists(destination):
os.mkdir(destination)
self.destination = destination
def parse(self, dump):
for event, elem in ElementTree.iterparse(dump):
if elem.tag == "artist":
self.proc_artist(elem)
def proc_artist(self, elem):
for artist_e in elem.getchildren():
if artist_e.tag == "Albums":
for album_e in artist_e.getchildren():
self.proc_album(album_e)
def proc_album(self, elem):
for album_e in elem.getchildren():
if album_e.tag == "Tracks":
for track_e in album_e.getchildren():
self.proc_track(track_e)
def proc_track(self, elem):
track_id = None
track_license = None
for track_e in elem.getchildren():
if track_e.tag == "id":
track_id = int(track_e.text)
if track_e.tag == "license":
track_license = track_e.text
if track_id and track_license:
if self.free_license(track_license):
trackurl = "http://api.jamendo.com/get2/stream/track/redirect/?id=%d&streamencoding=ogg2" % track_id
trackfile = os.path.join(self.destination, "%d.ogg" % track_id)
if os.path.exists(trackfile):
print "Already downloaded track %d" % track_id
else:
print "Downloading %s to %s" % (trackurl, trackfile)
urllib.urlretrieve(trackurl, trackfile)
def free_license(self, license):
return ("http://creativecommons.org/licenses/by-sa" in license or "http://creativecommons.org/licenses/by/" in license or "http://artlibre.org/licence.php/lal.html" in license)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: download-jamendo.py <database dump> <destination>"
sys.exit(1)
if sys.argv[1][-2:] == "gz":
dump = gzip.open(sys.argv[1], "r")
else:
dump = open(sys.argv[1], "r")
downloader = DownloadJamendo(sys.argv[2])
downloader.parse(dump)
|
<commit_before><commit_msg>Add a script to download all the fully free tracks from Jamendo (as Ogg Vorbis)<commit_after>#!/usr/bin/env python
# Jamendo database dumps can be fetched from: http://img.jamendo.com/data/dbdump_artistalbumtrack.xml.gz
import xml.etree.cElementTree as ElementTree
import sys, gzip, time, os.path, urllib
class DownloadJamendo:
def __init__(self, destination):
if not os.path.exists(destination):
os.mkdir(destination)
self.destination = destination
def parse(self, dump):
for event, elem in ElementTree.iterparse(dump):
if elem.tag == "artist":
self.proc_artist(elem)
def proc_artist(self, elem):
for artist_e in elem.getchildren():
if artist_e.tag == "Albums":
for album_e in artist_e.getchildren():
self.proc_album(album_e)
def proc_album(self, elem):
for album_e in elem.getchildren():
if album_e.tag == "Tracks":
for track_e in album_e.getchildren():
self.proc_track(track_e)
def proc_track(self, elem):
track_id = None
track_license = None
for track_e in elem.getchildren():
if track_e.tag == "id":
track_id = int(track_e.text)
if track_e.tag == "license":
track_license = track_e.text
if track_id and track_license:
if self.free_license(track_license):
trackurl = "http://api.jamendo.com/get2/stream/track/redirect/?id=%d&streamencoding=ogg2" % track_id
trackfile = os.path.join(self.destination, "%d.ogg" % track_id)
if os.path.exists(trackfile):
print "Already downloaded track %d" % track_id
else:
print "Downloading %s to %s" % (trackurl, trackfile)
urllib.urlretrieve(trackurl, trackfile)
def free_license(self, license):
return ("http://creativecommons.org/licenses/by-sa" in license or "http://creativecommons.org/licenses/by/" in license or "http://artlibre.org/licence.php/lal.html" in license)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: download-jamendo.py <database dump> <destination>"
sys.exit(1)
if sys.argv[1][-2:] == "gz":
dump = gzip.open(sys.argv[1], "r")
else:
dump = open(sys.argv[1], "r")
downloader = DownloadJamendo(sys.argv[2])
downloader.parse(dump)
|
|
129e717143d2b04f244afca31c4f47d9740ea66e
|
app/melange/views/__init__.py
|
app/melange/views/__init__.py
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with Melange-specific views."""
|
Package for Melange specific views added.
|
Package for Melange specific views added.
It will contain modules with view classes and functions that are not specific neither CI nor SOC package.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Package for Melange specific views added.
It will contain modules with view classes and functions that are not specific neither CI nor SOC package.
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with Melange-specific views."""
|
<commit_before><commit_msg>Package for Melange specific views added.
It will contain modules with view classes and functions that are not specific neither CI nor SOC package.<commit_after>
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with Melange-specific views."""
|
Package for Melange specific views added.
It will contain modules with view classes and functions that are not specific neither CI nor SOC package.# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with Melange-specific views."""
|
<commit_before><commit_msg>Package for Melange specific views added.
It will contain modules with view classes and functions that are not specific neither CI nor SOC package.<commit_after># Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with Melange-specific views."""
|
|
6cd0721a953a0708a99828dc8276c510b1b57c11
|
numba/tests/test_dyn_array.py
|
numba/tests/test_dyn_array.py
|
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import njit
class TestDynArray(unittest.TestCase):
def test_empty_1d(self):
@njit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
if __name__ == "__main__":
unittest.main()
|
Add test for np.empty 1d case
|
Add test for np.empty 1d case
|
Python
|
bsd-2-clause
|
cpcloud/numba,pitrou/numba,pombredanne/numba,pombredanne/numba,gdementen/numba,cpcloud/numba,cpcloud/numba,seibert/numba,sklam/numba,ssarangi/numba,stefanseefeld/numba,jriehl/numba,stuartarchibald/numba,sklam/numba,stuartarchibald/numba,stonebig/numba,sklam/numba,stefanseefeld/numba,pitrou/numba,stonebig/numba,GaZ3ll3/numba,pombredanne/numba,stuartarchibald/numba,numba/numba,ssarangi/numba,GaZ3ll3/numba,pitrou/numba,ssarangi/numba,jriehl/numba,stuartarchibald/numba,numba/numba,stuartarchibald/numba,jriehl/numba,GaZ3ll3/numba,IntelLabs/numba,numba/numba,GaZ3ll3/numba,gmarkall/numba,IntelLabs/numba,stonebig/numba,pitrou/numba,IntelLabs/numba,stefanseefeld/numba,pitrou/numba,gmarkall/numba,numba/numba,numba/numba,ssarangi/numba,jriehl/numba,cpcloud/numba,gmarkall/numba,gdementen/numba,stefanseefeld/numba,gdementen/numba,sklam/numba,pombredanne/numba,IntelLabs/numba,seibert/numba,gdementen/numba,gmarkall/numba,cpcloud/numba,stonebig/numba,stefanseefeld/numba,seibert/numba,sklam/numba,seibert/numba,IntelLabs/numba,gdementen/numba,pombredanne/numba,gmarkall/numba,jriehl/numba,ssarangi/numba,GaZ3ll3/numba,seibert/numba,stonebig/numba
|
Add test for np.empty 1d case
|
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import njit
class TestDynArray(unittest.TestCase):
def test_empty_1d(self):
@njit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for np.empty 1d case<commit_after>
|
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import njit
class TestDynArray(unittest.TestCase):
def test_empty_1d(self):
@njit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
if __name__ == "__main__":
unittest.main()
|
Add test for np.empty 1d casefrom __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import njit
class TestDynArray(unittest.TestCase):
def test_empty_1d(self):
@njit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for np.empty 1d case<commit_after>from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import njit
class TestDynArray(unittest.TestCase):
def test_empty_1d(self):
@njit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
if __name__ == "__main__":
unittest.main()
|
|
99d789e4ab0627395804ffde42ab00c394a37b77
|
contrib/django/freppledb/output/management/__init__.py
|
contrib/django/freppledb/output/management/__init__.py
|
# Copyright (C) 2013 by Johan De Taeye, frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import DEFAULT_DB_ALIAS
from django.db.models import get_model, signals
from django.contrib.auth.models import Permission
from freppledb.output import models as output_app
def removeDefaultPermissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
# Delete the default permissions that were created for the models in the output app
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="change").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="add").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="delete").delete()
signals.post_syncdb.connect(removeDefaultPermissions, output_app)
|
Remove default permissions in the "output" app, since irrelevant
|
Remove default permissions in the "output" app, since irrelevant
|
Python
|
agpl-3.0
|
frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe
|
Remove default permissions in the "output" app, since irrelevant
|
# Copyright (C) 2013 by Johan De Taeye, frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import DEFAULT_DB_ALIAS
from django.db.models import get_model, signals
from django.contrib.auth.models import Permission
from freppledb.output import models as output_app
def removeDefaultPermissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
# Delete the default permissions that were created for the models in the output app
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="change").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="add").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="delete").delete()
signals.post_syncdb.connect(removeDefaultPermissions, output_app)
|
<commit_before><commit_msg>Remove default permissions in the "output" app, since irrelevant<commit_after>
|
# Copyright (C) 2013 by Johan De Taeye, frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import DEFAULT_DB_ALIAS
from django.db.models import get_model, signals
from django.contrib.auth.models import Permission
from freppledb.output import models as output_app
def removeDefaultPermissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
# Delete the default permissions that were created for the models in the output app
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="change").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="add").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="delete").delete()
signals.post_syncdb.connect(removeDefaultPermissions, output_app)
|
Remove default permissions in the "output" app, since irrelevant# Copyright (C) 2013 by Johan De Taeye, frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import DEFAULT_DB_ALIAS
from django.db.models import get_model, signals
from django.contrib.auth.models import Permission
from freppledb.output import models as output_app
def removeDefaultPermissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
# Delete the default permissions that were created for the models in the output app
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="change").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="add").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="delete").delete()
signals.post_syncdb.connect(removeDefaultPermissions, output_app)
|
<commit_before><commit_msg>Remove default permissions in the "output" app, since irrelevant<commit_after># Copyright (C) 2013 by Johan De Taeye, frePPLe bvba
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import DEFAULT_DB_ALIAS
from django.db.models import get_model, signals
from django.contrib.auth.models import Permission
from freppledb.output import models as output_app
def removeDefaultPermissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
# Delete the default permissions that were created for the models in the output app
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="change").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="add").delete()
Permission.objects.all().filter(content_type__app_label="output", codename__startswith="delete").delete()
signals.post_syncdb.connect(removeDefaultPermissions, output_app)
|
|
09425139bd06b4b7d96504c53f897c9329331d91
|
array/first-nonrepeat-char.py
|
array/first-nonrepeat-char.py
|
# Given a string s, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'
# function only iterates over the string once and uses O(1) additional memory
# input string contains only lowercase English letters
def first_non_repeating_character(s):
char_in_str = []
dup_list = {}
for i in range(len(s)):
if s[i] in char_in_str:
dup_list[s[i]] = 1
else:
char_in_str.append(s[i])
for j in char_in_str:
if not dup_list.has_key(j):
return j
return '_'
|
Write function that returns first instance of non-repeating character in input string
|
Write function that returns first instance of non-repeating character in input string
|
Python
|
mit
|
derekmpham/interview-prep,derekmpham/interview-prep
|
Write function that returns first instance of non-repeating character in input string
|
# Given a string s, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'
# function only iterates over the string once and uses O(1) additional memory
# input string contains only lowercase English letters
def first_non_repeating_character(s):
char_in_str = []
dup_list = {}
for i in range(len(s)):
if s[i] in char_in_str:
dup_list[s[i]] = 1
else:
char_in_str.append(s[i])
for j in char_in_str:
if not dup_list.has_key(j):
return j
return '_'
|
<commit_before><commit_msg>Write function that returns first instance of non-repeating character in input string<commit_after>
|
# Given a string s, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'
# function only iterates over the string once and uses O(1) additional memory
# input string contains only lowercase English letters
def first_non_repeating_character(s):
char_in_str = []
dup_list = {}
for i in range(len(s)):
if s[i] in char_in_str:
dup_list[s[i]] = 1
else:
char_in_str.append(s[i])
for j in char_in_str:
if not dup_list.has_key(j):
return j
return '_'
|
Write function that returns first instance of non-repeating character in input string# Given a string s, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'
# function only iterates over the string once and uses O(1) additional memory
# input string contains only lowercase English letters
def first_non_repeating_character(s):
char_in_str = []
dup_list = {}
for i in range(len(s)):
if s[i] in char_in_str:
dup_list[s[i]] = 1
else:
char_in_str.append(s[i])
for j in char_in_str:
if not dup_list.has_key(j):
return j
return '_'
|
<commit_before><commit_msg>Write function that returns first instance of non-repeating character in input string<commit_after># Given a string s, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'
# function only iterates over the string once and uses O(1) additional memory
# input string contains only lowercase English letters
def first_non_repeating_character(s):
char_in_str = []
dup_list = {}
for i in range(len(s)):
if s[i] in char_in_str:
dup_list[s[i]] = 1
else:
char_in_str.append(s[i])
for j in char_in_str:
if not dup_list.has_key(j):
return j
return '_'
|
|
45ab98f8439845fdc5035525f8843dc78f8986c5
|
tests/linux_benchmarks/aws_dynamodb_ycsb_benchmark_test.py
|
tests/linux_benchmarks/aws_dynamodb_ycsb_benchmark_test.py
|
# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws_dynamodb_ycsb_benchmark."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.linux_benchmarks import aws_dynamodb_ycsb_benchmark
from perfkitbenchmarker.providers.aws import aws_dynamodb
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class RunTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(
mock.patch.object(aws_dynamodb_ycsb_benchmark,
'GetRemoteVMCredentialsFullPath'))
self.mock_spec = mock.MagicMock()
self.mock_spec.executor = mock.MagicMock()
def testRunThroughputIncreases(self):
# Benchmark raises WCU to 10k during loading if WCU < 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=1000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_has_calls(
[mock.call(wcu=10000), mock.call()])
def testRunThroughputStaysSame(self):
# WCU stays the same during loading if > 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=30000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
Add tests for dynamodb throughput.
|
Add tests for dynamodb throughput.
PiperOrigin-RevId: 445294911
|
Python
|
apache-2.0
|
GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker
|
Add tests for dynamodb throughput.
PiperOrigin-RevId: 445294911
|
# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws_dynamodb_ycsb_benchmark."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.linux_benchmarks import aws_dynamodb_ycsb_benchmark
from perfkitbenchmarker.providers.aws import aws_dynamodb
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class RunTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(
mock.patch.object(aws_dynamodb_ycsb_benchmark,
'GetRemoteVMCredentialsFullPath'))
self.mock_spec = mock.MagicMock()
self.mock_spec.executor = mock.MagicMock()
def testRunThroughputIncreases(self):
# Benchmark raises WCU to 10k during loading if WCU < 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=1000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_has_calls(
[mock.call(wcu=10000), mock.call()])
def testRunThroughputStaysSame(self):
# WCU stays the same during loading if > 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=30000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for dynamodb throughput.
PiperOrigin-RevId: 445294911<commit_after>
|
# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws_dynamodb_ycsb_benchmark."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.linux_benchmarks import aws_dynamodb_ycsb_benchmark
from perfkitbenchmarker.providers.aws import aws_dynamodb
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class RunTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(
mock.patch.object(aws_dynamodb_ycsb_benchmark,
'GetRemoteVMCredentialsFullPath'))
self.mock_spec = mock.MagicMock()
self.mock_spec.executor = mock.MagicMock()
def testRunThroughputIncreases(self):
# Benchmark raises WCU to 10k during loading if WCU < 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=1000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_has_calls(
[mock.call(wcu=10000), mock.call()])
def testRunThroughputStaysSame(self):
# WCU stays the same during loading if > 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=30000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
Add tests for dynamodb throughput.
PiperOrigin-RevId: 445294911# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws_dynamodb_ycsb_benchmark."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.linux_benchmarks import aws_dynamodb_ycsb_benchmark
from perfkitbenchmarker.providers.aws import aws_dynamodb
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class RunTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(
mock.patch.object(aws_dynamodb_ycsb_benchmark,
'GetRemoteVMCredentialsFullPath'))
self.mock_spec = mock.MagicMock()
self.mock_spec.executor = mock.MagicMock()
def testRunThroughputIncreases(self):
# Benchmark raises WCU to 10k during loading if WCU < 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=1000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_has_calls(
[mock.call(wcu=10000), mock.call()])
def testRunThroughputStaysSame(self):
# WCU stays the same during loading if > 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=30000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for dynamodb throughput.
PiperOrigin-RevId: 445294911<commit_after># Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aws_dynamodb_ycsb_benchmark."""
import unittest
from absl import flags
import mock
from perfkitbenchmarker.linux_benchmarks import aws_dynamodb_ycsb_benchmark
from perfkitbenchmarker.providers.aws import aws_dynamodb
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class RunTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.enter_context(
mock.patch.object(aws_dynamodb_ycsb_benchmark,
'GetRemoteVMCredentialsFullPath'))
self.mock_spec = mock.MagicMock()
self.mock_spec.executor = mock.MagicMock()
def testRunThroughputIncreases(self):
# Benchmark raises WCU to 10k during loading if WCU < 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=1000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_has_calls(
[mock.call(wcu=10000), mock.call()])
def testRunThroughputStaysSame(self):
# WCU stays the same during loading if > 10k.
# Arrange
instance = aws_dynamodb.AwsDynamoDBInstance(rcu=1000, wcu=30000)
mock_set_throughput = self.enter_context(
mock.patch.object(instance, 'SetThroughput'))
self.mock_spec.non_relational_db = instance
# Act
aws_dynamodb_ycsb_benchmark.Run(self.mock_spec)
# Assert
mock_set_throughput.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
|
0ab7f8489a2fe783fde98d7336dc291dddb01275
|
tests/DAT/test_Block.py
|
tests/DAT/test_Block.py
|
import unittest
from struct import error
from DAT.Block import Block, BlockChain
class TestBlock(unittest.TestCase):
def test_parse_valid_blob(self):
blob = "\xEF\xBE\xAD\xDE"
blob += 'A' * 252
block = Block.from_blob(blob)
self.assertEqual(block.size, 256)
self.assertEqual(block.next_block_offset, 0xdeadbeef)
self.assertEqual(block.data, 'A' * 252)
def test_parse_invalid_blob(self):
with self.assertRaises(error):
Block.from_blob('A')
class TestBlockChain(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
Add unit tests for Block
|
Add unit tests for Block
|
Python
|
mit
|
ccressent/acnav
|
Add unit tests for Block
|
import unittest
from struct import error
from DAT.Block import Block, BlockChain
class TestBlock(unittest.TestCase):
def test_parse_valid_blob(self):
blob = "\xEF\xBE\xAD\xDE"
blob += 'A' * 252
block = Block.from_blob(blob)
self.assertEqual(block.size, 256)
self.assertEqual(block.next_block_offset, 0xdeadbeef)
self.assertEqual(block.data, 'A' * 252)
def test_parse_invalid_blob(self):
with self.assertRaises(error):
Block.from_blob('A')
class TestBlockChain(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for Block<commit_after>
|
import unittest
from struct import error
from DAT.Block import Block, BlockChain
class TestBlock(unittest.TestCase):
def test_parse_valid_blob(self):
blob = "\xEF\xBE\xAD\xDE"
blob += 'A' * 252
block = Block.from_blob(blob)
self.assertEqual(block.size, 256)
self.assertEqual(block.next_block_offset, 0xdeadbeef)
self.assertEqual(block.data, 'A' * 252)
def test_parse_invalid_blob(self):
with self.assertRaises(error):
Block.from_blob('A')
class TestBlockChain(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
Add unit tests for Blockimport unittest
from struct import error
from DAT.Block import Block, BlockChain
class TestBlock(unittest.TestCase):
def test_parse_valid_blob(self):
blob = "\xEF\xBE\xAD\xDE"
blob += 'A' * 252
block = Block.from_blob(blob)
self.assertEqual(block.size, 256)
self.assertEqual(block.next_block_offset, 0xdeadbeef)
self.assertEqual(block.data, 'A' * 252)
def test_parse_invalid_blob(self):
with self.assertRaises(error):
Block.from_blob('A')
class TestBlockChain(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for Block<commit_after>import unittest
from struct import error
from DAT.Block import Block, BlockChain
class TestBlock(unittest.TestCase):
def test_parse_valid_blob(self):
blob = "\xEF\xBE\xAD\xDE"
blob += 'A' * 252
block = Block.from_blob(blob)
self.assertEqual(block.size, 256)
self.assertEqual(block.next_block_offset, 0xdeadbeef)
self.assertEqual(block.data, 'A' * 252)
def test_parse_invalid_blob(self):
with self.assertRaises(error):
Block.from_blob('A')
class TestBlockChain(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
|
d15a45301689e1ac8b1e75c1152f9218f08ef82f
|
tests/cli/test_cache.py
|
tests/cli/test_cache.py
|
""" Tests for ``yatsm cache``
"""
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
def test_cli_cache_pass_1(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
cfg, '1', '1'
])
assert result.exit_code == 0
def test_cli_cache_pass_2(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly, interlacing
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
'--interlace',
cfg, '1', '1'
])
assert result.exit_code == 0
|
Add tests for yatsm cache CLI
|
Add tests for yatsm cache CLI
|
Python
|
mit
|
ceholden/yatsm,c11/yatsm,valpasq/yatsm,c11/yatsm,valpasq/yatsm,ceholden/yatsm
|
Add tests for yatsm cache CLI
|
""" Tests for ``yatsm cache``
"""
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
def test_cli_cache_pass_1(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
cfg, '1', '1'
])
assert result.exit_code == 0
def test_cli_cache_pass_2(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly, interlacing
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
'--interlace',
cfg, '1', '1'
])
assert result.exit_code == 0
|
<commit_before><commit_msg>Add tests for yatsm cache CLI<commit_after>
|
""" Tests for ``yatsm cache``
"""
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
def test_cli_cache_pass_1(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
cfg, '1', '1'
])
assert result.exit_code == 0
def test_cli_cache_pass_2(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly, interlacing
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
'--interlace',
cfg, '1', '1'
])
assert result.exit_code == 0
|
Add tests for yatsm cache CLI""" Tests for ``yatsm cache``
"""
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
def test_cli_cache_pass_1(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
cfg, '1', '1'
])
assert result.exit_code == 0
def test_cli_cache_pass_2(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly, interlacing
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
'--interlace',
cfg, '1', '1'
])
assert result.exit_code == 0
|
<commit_before><commit_msg>Add tests for yatsm cache CLI<commit_after>""" Tests for ``yatsm cache``
"""
from click.testing import CliRunner
import pytest
from yatsm.cli.main import cli
def test_cli_cache_pass_1(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
cfg, '1', '1'
])
assert result.exit_code == 0
def test_cli_cache_pass_2(example_timeseries, example_results, modify_config,
tmpdir):
""" Run correctly, interlacing
"""
mod_cfg = {'dataset': {'cache_line_dir': tmpdir.mkdir('cache').strpath}}
with modify_config(example_timeseries['config'], mod_cfg) as cfg:
runner = CliRunner()
result = runner.invoke(cli, [
'-v', 'cache',
'--interlace',
cfg, '1', '1'
])
assert result.exit_code == 0
|
|
d954988fad088278e3a58d16b5928d5918a7e79a
|
microphone_match_gui.py
|
microphone_match_gui.py
|
#!/usr/bin/python2
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
import os
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy)
from PyQt5.QtCore import (QCoreApplication, QThread)
import microphone_match
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow,self).__init__()
self.initUI()
def initUI(self):
self.resize(400,50)
self.move(400,600)
self.setWindowTitle('Swingzam')
self.continuousMatcher = microphone_match.ContinuousMatcher()
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton('Record')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel('Ready')
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addStretch(1)
self.setLayout(self.mainVBox)
self.show()
def recordAndMatch(self):
self.recordButton.setText('Recording')
self.matcherThread.start()
def recordingFinished(self):
self.resultLabel.setText(self.matcherThread.result)
self.recordButton.setText('Record')
if __name__ == '__main__':
main(sys.argv)
|
Add PyQt5 gui for the matcher
|
Add PyQt5 gui for the matcher
|
Python
|
mit
|
piotrwicijowski/whistler,piotrwicijowski/whistler
|
Add PyQt5 gui for the matcher
|
#!/usr/bin/python2
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
import os
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy)
from PyQt5.QtCore import (QCoreApplication, QThread)
import microphone_match
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow,self).__init__()
self.initUI()
def initUI(self):
self.resize(400,50)
self.move(400,600)
self.setWindowTitle('Swingzam')
self.continuousMatcher = microphone_match.ContinuousMatcher()
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton('Record')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel('Ready')
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addStretch(1)
self.setLayout(self.mainVBox)
self.show()
def recordAndMatch(self):
self.recordButton.setText('Recording')
self.matcherThread.start()
def recordingFinished(self):
self.resultLabel.setText(self.matcherThread.result)
self.recordButton.setText('Record')
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add PyQt5 gui for the matcher<commit_after>
|
#!/usr/bin/python2
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
import os
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy)
from PyQt5.QtCore import (QCoreApplication, QThread)
import microphone_match
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow,self).__init__()
self.initUI()
def initUI(self):
self.resize(400,50)
self.move(400,600)
self.setWindowTitle('Swingzam')
self.continuousMatcher = microphone_match.ContinuousMatcher()
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton('Record')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel('Ready')
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addStretch(1)
self.setLayout(self.mainVBox)
self.show()
def recordAndMatch(self):
self.recordButton.setText('Recording')
self.matcherThread.start()
def recordingFinished(self):
self.resultLabel.setText(self.matcherThread.result)
self.recordButton.setText('Record')
if __name__ == '__main__':
main(sys.argv)
|
Add PyQt5 gui for the matcher#!/usr/bin/python2
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
import os
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy)
from PyQt5.QtCore import (QCoreApplication, QThread)
import microphone_match
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow,self).__init__()
self.initUI()
def initUI(self):
self.resize(400,50)
self.move(400,600)
self.setWindowTitle('Swingzam')
self.continuousMatcher = microphone_match.ContinuousMatcher()
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton('Record')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel('Ready')
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addStretch(1)
self.setLayout(self.mainVBox)
self.show()
def recordAndMatch(self):
self.recordButton.setText('Recording')
self.matcherThread.start()
def recordingFinished(self):
self.resultLabel.setText(self.matcherThread.result)
self.recordButton.setText('Record')
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add PyQt5 gui for the matcher<commit_after>#!/usr/bin/python2
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import sys
import os
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy)
from PyQt5.QtCore import (QCoreApplication, QThread)
import microphone_match
def main(argv):
app = QApplication(argv)
w = MainWindow()
sys.exit(app.exec_())
class RecorderMatcherThread(QThread):
def __init__(self, matcher):
super(self.__class__, self).__init__()
self.matcher = matcher
def __del__(self):
self.wait()
def run(self):
# database_file_path = QApplication.instance().arguments()[1] if len(QApplication.instance().arguments())>1 else os.path.join(os.path.dirname(os.path.abspath(__file__)),'fpdbase.pklz')
# microphone_match.recordAndMatch(database_file_path)
# self.recordButton.setText('Record')
self.result = self.matcher.recordAndMatch()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow,self).__init__()
self.initUI()
def initUI(self):
self.resize(400,50)
self.move(400,600)
self.setWindowTitle('Swingzam')
self.continuousMatcher = microphone_match.ContinuousMatcher()
self.matcherThread = RecorderMatcherThread(self.continuousMatcher)
self.matcherThread.finished.connect(self.recordingFinished)
self.recordButton = QPushButton('Record')
self.recordButton.resize(self.recordButton.sizeHint())
self.recordButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
self.recordButton.clicked.connect(self.recordAndMatch)
self.resultLabel = QLabel('Ready')
self.recResHBox = QHBoxLayout()
self.recResHBox.addWidget(self.recordButton)
self.recResHBox.addWidget(self.resultLabel)
self.mainVBox = QVBoxLayout()
self.mainVBox.addLayout(self.recResHBox)
self.mainVBox.addStretch(1)
self.setLayout(self.mainVBox)
self.show()
def recordAndMatch(self):
self.recordButton.setText('Recording')
self.matcherThread.start()
def recordingFinished(self):
self.resultLabel.setText(self.matcherThread.result)
self.recordButton.setText('Record')
if __name__ == '__main__':
main(sys.argv)
|
|
8eaa6c9c80b097186887e2ebbfcc561f07630d2b
|
heufybot/utils/signaltimeout.py
|
heufybot/utils/signaltimeout.py
|
# Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
|
Add a signal timeout handler
|
Add a signal timeout handler
Many thanks to @ekimekim for writing this.
|
Python
|
mit
|
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
|
Add a signal timeout handler
Many thanks to @ekimekim for writing this.
|
# Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
|
<commit_before><commit_msg>Add a signal timeout handler
Many thanks to @ekimekim for writing this.<commit_after>
|
# Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
|
Add a signal timeout handler
Many thanks to @ekimekim for writing this.# Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
|
<commit_before><commit_msg>Add a signal timeout handler
Many thanks to @ekimekim for writing this.<commit_after># Taken from https://gist.github.com/ekimekim/b01158dc36c6e2155046684511595d57
import os
import signal
import subprocess
class Timeout(Exception):
"""This is raised when a timeout occurs"""
class SignalTimeout(object):
"""Context manager that raises a Timeout if the inner block takes too long.
Will even interrupt hard loops in C by raising from an OS signal."""
def __init__(self, timeout, signal=signal.SIGUSR1, to_raise=Timeout):
self.timeout = float(timeout)
self.signal = signal
self.to_raise = to_raise
self.old_handler = None
self.proc = None
def __enter__(self):
self.old_handler = signal.signal(self.signal, self._on_signal)
self.proc = subprocess.Popen('sleep {timeout} && kill -{signal} {pid}'.format(
timeout = self.timeout,
signal = self.signal,
pid = os.getpid(),
),
shell = True,
)
def __exit__(self, *exc_info):
if self.proc.poll() is None:
self.proc.kill()
my_handler = signal.signal(self.signal, self.old_handler)
assert my_handler == self._on_signal, "someone else has been fiddling with our signal handler?"
def _on_signal(self, signum, frame):
if self.old_handler:
self.old_handler(signum, frame)
raise self.to_raise
|
|
ba030dc7877632b8eaf59fdbca4d11daf4225f95
|
database.py
|
database.py
|
#!/usr/bin/python2
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///testdb.sqlite', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer,Sequence('user_id_seq'), primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
def main():
# engine = create_engine('sqlite:///:memory:', echo=True)
# Base = declarative_base()
Base.metadata.create_all(engine)
print User.__table__
ed_user = User(name='ed', fullname='Ed Jones', password='edspassword')
print ed_user.name
Session = sessionmaker(bind=engine)
session = Session()
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user
if __name__ == '__main__':
print "Start"
main()
print "End"
|
Add file sql tutorial with sqlachemy
|
Add file sql tutorial with sqlachemy
|
Python
|
unlicense
|
phunghv/meu_sender
|
Add file sql tutorial with sqlachemy
|
#!/usr/bin/python2
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///testdb.sqlite', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer,Sequence('user_id_seq'), primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
def main():
# engine = create_engine('sqlite:///:memory:', echo=True)
# Base = declarative_base()
Base.metadata.create_all(engine)
print User.__table__
ed_user = User(name='ed', fullname='Ed Jones', password='edspassword')
print ed_user.name
Session = sessionmaker(bind=engine)
session = Session()
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user
if __name__ == '__main__':
print "Start"
main()
print "End"
|
<commit_before><commit_msg>Add file sql tutorial with sqlachemy<commit_after>
|
#!/usr/bin/python2
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///testdb.sqlite', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer,Sequence('user_id_seq'), primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
def main():
# engine = create_engine('sqlite:///:memory:', echo=True)
# Base = declarative_base()
Base.metadata.create_all(engine)
print User.__table__
ed_user = User(name='ed', fullname='Ed Jones', password='edspassword')
print ed_user.name
Session = sessionmaker(bind=engine)
session = Session()
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user
if __name__ == '__main__':
print "Start"
main()
print "End"
|
Add file sql tutorial with sqlachemy#!/usr/bin/python2
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///testdb.sqlite', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer,Sequence('user_id_seq'), primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
def main():
# engine = create_engine('sqlite:///:memory:', echo=True)
# Base = declarative_base()
Base.metadata.create_all(engine)
print User.__table__
ed_user = User(name='ed', fullname='Ed Jones', password='edspassword')
print ed_user.name
Session = sessionmaker(bind=engine)
session = Session()
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user
if __name__ == '__main__':
print "Start"
main()
print "End"
|
<commit_before><commit_msg>Add file sql tutorial with sqlachemy<commit_after>#!/usr/bin/python2
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///testdb.sqlite', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer,Sequence('user_id_seq'), primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
def main():
# engine = create_engine('sqlite:///:memory:', echo=True)
# Base = declarative_base()
Base.metadata.create_all(engine)
print User.__table__
ed_user = User(name='ed', fullname='Ed Jones', password='edspassword')
print ed_user.name
Session = sessionmaker(bind=engine)
session = Session()
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user
if __name__ == '__main__':
print "Start"
main()
print "End"
|
|
117d74fa54e8557e2ab09f5d11a61d8ad4e7736a
|
kolibri/utils/tests/test_cli_at_import.py
|
kolibri/utils/tests/test_cli_at_import.py
|
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from mock import patch
@patch("sqlalchemy.create_engine")
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
@patch("sqlalchemy.create_engine")
def test_stop_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.stop.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
|
Add tests that check for database access during status and stop commands.
|
Add tests that check for database access during status and stop commands.
|
Python
|
mit
|
mrpau/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,learningequality/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,mrpau/kolibri,learningequality/kolibri,learningequality/kolibri,learningequality/kolibri,mrpau/kolibri,mrpau/kolibri
|
Add tests that check for database access during status and stop commands.
|
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from mock import patch
@patch("sqlalchemy.create_engine")
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
@patch("sqlalchemy.create_engine")
def test_stop_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.stop.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
|
<commit_before><commit_msg>Add tests that check for database access during status and stop commands.<commit_after>
|
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from mock import patch
@patch("sqlalchemy.create_engine")
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
@patch("sqlalchemy.create_engine")
def test_stop_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.stop.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
|
Add tests that check for database access during status and stop commands."""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from mock import patch
@patch("sqlalchemy.create_engine")
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
@patch("sqlalchemy.create_engine")
def test_stop_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.stop.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
|
<commit_before><commit_msg>Add tests that check for database access during status and stop commands.<commit_after>"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from mock import patch
@patch("sqlalchemy.create_engine")
def test_status_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.status.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
@patch("sqlalchemy.create_engine")
def test_stop_no_db_access(create_engine_mock):
"""
Tests that status does not try to access the database
"""
try:
from kolibri.utils import cli
cli.stop.callback()
except SystemExit:
pass
create_engine_mock.assert_not_called()
|
|
112fbecf8458541b2277d877c58837ec93911a35
|
tests/test_particles.py
|
tests/test_particles.py
|
from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
Add new test for custom variable initialisation to check types
|
Variable: Add new test for custom variable initialisation to check types
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Variable: Add new test for custom variable initialisation to check types
|
from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
<commit_before><commit_msg>Variable: Add new test for custom variable initialisation to check types<commit_after>
|
from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
Variable: Add new test for custom variable initialisation to check typesfrom parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
<commit_before><commit_msg>Variable: Add new test for custom variable initialisation to check types<commit_after>from parcels import Grid, ScipyParticle, JITParticle, Variable
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.fixture
def grid(xdim=100, ydim=100):
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.zeros((xdim, ydim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(0, 1, ydim, dtype=np.float32)
return Grid.from_data(U, lon, lat, V, lon, lat, mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_variable_init(grid, mode, npart=10):
class TestParticle(ptype[mode]):
p_float = Variable('p_float', dtype=np.float32, default=10.)
p_double = Variable('p_double', dtype=np.float64, default=11.)
p_int = Variable('p_int', dtype=np.int32, default=12)
pset = grid.ParticleSet(npart, pclass=TestParticle,
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
assert np.array([isinstance(p.p_float, np.float32) for p in pset]).all()
assert np.allclose([p.p_float for p in pset], 10., rtol=1e-12)
assert np.array([isinstance(p.p_double, np.float64) for p in pset]).all()
assert np.allclose([p.p_double for p in pset], 11., rtol=1e-12)
assert np.array([isinstance(p.p_int, np.int32) for p in pset]).all()
assert np.allclose([p.p_int for p in pset], 12., rtol=1e-12)
|
|
ae10061cbff2017c28099682cccf9c7b9257cb54
|
python/security_test.py
|
python/security_test.py
|
#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
import security
class TestSecurity(unittest.TestCase):
def setUp(self):
self.VALID_OBF_PASSWORD = 'OBF:1v2j1uum1xtv1zej1zer1xtn1uvk1v1v'
self.VALID_BASE64_PASSWORD = 'B64:cGFzc3dvcmQ='
self.PLAIN_TEXT_PASSWORD = 'password'
def test_decode_obf_password(self):
self.assertEqual(security.decode_password(self.VALID_OBF_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode OBF passwords')
def test_decode_base64_password(self):
self.assertEqual(security.decode_password(self.VALID_BASE64_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode Base64 passwords')
def test_return_provided_password_when_no_encoding_scheme_provided(self):
self.assertEqual(security.decode_password(self.PLAIN_TEXT_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly return unencoded passwords')
if __name__ == "__main__":
unittest.main()
|
Add test coverage for security module
|
Add test coverage for security module
|
Python
|
apache-2.0
|
lresende/toree-gateway,lresende/toree-gateway
|
Add test coverage for security module
|
#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
import security
class TestSecurity(unittest.TestCase):
def setUp(self):
self.VALID_OBF_PASSWORD = 'OBF:1v2j1uum1xtv1zej1zer1xtn1uvk1v1v'
self.VALID_BASE64_PASSWORD = 'B64:cGFzc3dvcmQ='
self.PLAIN_TEXT_PASSWORD = 'password'
def test_decode_obf_password(self):
self.assertEqual(security.decode_password(self.VALID_OBF_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode OBF passwords')
def test_decode_base64_password(self):
self.assertEqual(security.decode_password(self.VALID_BASE64_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode Base64 passwords')
def test_return_provided_password_when_no_encoding_scheme_provided(self):
self.assertEqual(security.decode_password(self.PLAIN_TEXT_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly return unencoded passwords')
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test coverage for security module<commit_after>
|
#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
import security
class TestSecurity(unittest.TestCase):
def setUp(self):
self.VALID_OBF_PASSWORD = 'OBF:1v2j1uum1xtv1zej1zer1xtn1uvk1v1v'
self.VALID_BASE64_PASSWORD = 'B64:cGFzc3dvcmQ='
self.PLAIN_TEXT_PASSWORD = 'password'
def test_decode_obf_password(self):
self.assertEqual(security.decode_password(self.VALID_OBF_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode OBF passwords')
def test_decode_base64_password(self):
self.assertEqual(security.decode_password(self.VALID_BASE64_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode Base64 passwords')
def test_return_provided_password_when_no_encoding_scheme_provided(self):
self.assertEqual(security.decode_password(self.PLAIN_TEXT_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly return unencoded passwords')
if __name__ == "__main__":
unittest.main()
|
Add test coverage for security module#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
import security
class TestSecurity(unittest.TestCase):
def setUp(self):
self.VALID_OBF_PASSWORD = 'OBF:1v2j1uum1xtv1zej1zer1xtn1uvk1v1v'
self.VALID_BASE64_PASSWORD = 'B64:cGFzc3dvcmQ='
self.PLAIN_TEXT_PASSWORD = 'password'
def test_decode_obf_password(self):
self.assertEqual(security.decode_password(self.VALID_OBF_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode OBF passwords')
def test_decode_base64_password(self):
self.assertEqual(security.decode_password(self.VALID_BASE64_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode Base64 passwords')
def test_return_provided_password_when_no_encoding_scheme_provided(self):
self.assertEqual(security.decode_password(self.PLAIN_TEXT_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly return unencoded passwords')
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test coverage for security module<commit_after>#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
import security
class TestSecurity(unittest.TestCase):
def setUp(self):
self.VALID_OBF_PASSWORD = 'OBF:1v2j1uum1xtv1zej1zer1xtn1uvk1v1v'
self.VALID_BASE64_PASSWORD = 'B64:cGFzc3dvcmQ='
self.PLAIN_TEXT_PASSWORD = 'password'
def test_decode_obf_password(self):
self.assertEqual(security.decode_password(self.VALID_OBF_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode OBF passwords')
def test_decode_base64_password(self):
self.assertEqual(security.decode_password(self.VALID_BASE64_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly decode Base64 passwords')
def test_return_provided_password_when_no_encoding_scheme_provided(self):
self.assertEqual(security.decode_password(self.PLAIN_TEXT_PASSWORD), self.PLAIN_TEXT_PASSWORD, 'Properly return unencoded passwords')
if __name__ == "__main__":
unittest.main()
|
|
cc046c9a70ebdfec404992f69c09c9fbbfc0b471
|
indra/sources/eidos/eidos_server.py
|
indra/sources/eidos/eidos_server.py
|
import json
from flask import Flask, request
from indra.sources.eidos.eidos_reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text, 'json_ld')
return json.dumps(res)
if __name__ == '__main__':
er = EidosReader()
er.process_text('hello', 'json_ld')
app.run(host='0.0.0.0')
|
Add simple web service for Eidos reading
|
Add simple web service for Eidos reading
|
Python
|
bsd-2-clause
|
johnbachman/indra,johnbachman/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,bgyori/indra,pvtodorov/indra,sorgerlab/indra,sorgerlab/belpy,johnbachman/belpy,bgyori/indra,pvtodorov/indra,johnbachman/indra,pvtodorov/indra
|
Add simple web service for Eidos reading
|
import json
from flask import Flask, request
from indra.sources.eidos.eidos_reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text, 'json_ld')
return json.dumps(res)
if __name__ == '__main__':
er = EidosReader()
er.process_text('hello', 'json_ld')
app.run(host='0.0.0.0')
|
<commit_before><commit_msg>Add simple web service for Eidos reading<commit_after>
|
import json
from flask import Flask, request
from indra.sources.eidos.eidos_reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text, 'json_ld')
return json.dumps(res)
if __name__ == '__main__':
er = EidosReader()
er.process_text('hello', 'json_ld')
app.run(host='0.0.0.0')
|
Add simple web service for Eidos readingimport json
from flask import Flask, request
from indra.sources.eidos.eidos_reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text, 'json_ld')
return json.dumps(res)
if __name__ == '__main__':
er = EidosReader()
er.process_text('hello', 'json_ld')
app.run(host='0.0.0.0')
|
<commit_before><commit_msg>Add simple web service for Eidos reading<commit_after>import json
from flask import Flask, request
from indra.sources.eidos.eidos_reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text, 'json_ld')
return json.dumps(res)
if __name__ == '__main__':
er = EidosReader()
er.process_text('hello', 'json_ld')
app.run(host='0.0.0.0')
|
|
f1d830609d460e8213252251bf3b512511032add
|
thinc/neural/tests/integration/test_relu_gradient.py
|
thinc/neural/tests/integration/test_relu_gradient.py
|
import pytest
@pytest.mark.skip
def test_relu_clips_negative(model, input_BI):
# TODO: There was a bug related to this
pass
@pytest.mark.skip
def test_relu_clipped_gradients_are_zero():
# TODO: There was a bug related to this
pass
|
Add placeholder for relu gradient tests
|
Add placeholder for relu gradient tests
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc
|
Add placeholder for relu gradient tests
|
import pytest
@pytest.mark.skip
def test_relu_clips_negative(model, input_BI):
# TODO: There was a bug related to this
pass
@pytest.mark.skip
def test_relu_clipped_gradients_are_zero():
# TODO: There was a bug related to this
pass
|
<commit_before><commit_msg>Add placeholder for relu gradient tests<commit_after>
|
import pytest
@pytest.mark.skip
def test_relu_clips_negative(model, input_BI):
# TODO: There was a bug related to this
pass
@pytest.mark.skip
def test_relu_clipped_gradients_are_zero():
# TODO: There was a bug related to this
pass
|
Add placeholder for relu gradient testsimport pytest
@pytest.mark.skip
def test_relu_clips_negative(model, input_BI):
# TODO: There was a bug related to this
pass
@pytest.mark.skip
def test_relu_clipped_gradients_are_zero():
# TODO: There was a bug related to this
pass
|
<commit_before><commit_msg>Add placeholder for relu gradient tests<commit_after>import pytest
@pytest.mark.skip
def test_relu_clips_negative(model, input_BI):
# TODO: There was a bug related to this
pass
@pytest.mark.skip
def test_relu_clipped_gradients_are_zero():
# TODO: There was a bug related to this
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.