_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q44200 | Unlock.from_inline | train | def from_inline(cls: Type[UnlockType], inline: str) -> UnlockType:
"""
Return an Unlock instance from inline string format
:param inline: Inline string format
:return:
"""
data = Unlock.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline input")
index = int(data.group(1))
parameters_str = data.group(2).split(' ')
parameters = []
for p in parameters_str:
param = UnlockParameter.from_parameter(p)
if param:
parameters.append(param)
return cls(index, parameters) | python | {
"resource": ""
} |
q44201 | Unlock.inline | train | def inline(self) -> str:
"""
Return inline string format of the instance
:return:
"""
return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters])) | python | {
"resource": ""
} |
q44202 | Transaction.from_compact | train | def from_compact(cls: Type[TransactionType], currency: str, compact: str) -> TransactionType:
"""
Return Transaction instance from compact string format
:param currency: Name of the currency
:param compact: Compact format string
:return:
"""
lines = compact.splitlines(True)
n = 0
header_data = Transaction.re_header.match(lines[n])
if header_data is None:
raise MalformedDocumentError("Compact TX header")
version = int(header_data.group(1))
issuers_num = int(header_data.group(2))
inputs_num = int(header_data.group(3))
unlocks_num = int(header_data.group(4))
outputs_num = int(header_data.group(5))
has_comment = int(header_data.group(6))
locktime = int(header_data.group(7))
n += 1
blockstamp = None # type: Optional[BlockUID]
if version >= 3:
blockstamp = BlockUID.from_str(Transaction.parse_field("CompactBlockstamp", lines[n]))
n += 1
issuers = []
inputs = []
unlocks = []
outputs = []
signatures = []
for i in range(0, issuers_num):
issuer = Transaction.parse_field("Pubkey", lines[n])
issuers.append(issuer)
n += 1
for i in range(0, inputs_num):
input_source = InputSource.from_inline(version, lines[n])
inputs.append(input_source)
n += 1
for i in range(0, unlocks_num):
unlock = Unlock.from_inline(lines[n])
unlocks.append(unlock)
n += 1
for i in range(0, outputs_num):
output_source = OutputSource.from_inline(lines[n])
outputs.append(output_source)
n += 1
comment = ""
if has_comment == 1:
data = Transaction.re_compact_comment.match(lines[n])
if data:
comment = data.group(1)
n += 1
else:
raise MalformedDocumentError("Compact TX Comment")
while n < len(lines):
data = Transaction.re_signature.match(lines[n])
if data:
signatures.append(data.group(1))
n += 1
else:
raise MalformedDocumentError("Compact TX Signatures")
return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures) | python | {
"resource": ""
} |
q44203 | Transaction.from_signed_raw | train | def from_signed_raw(cls: Type[TransactionType], raw: str) -> TransactionType:
"""
Return a Transaction instance from a raw string format
:param raw: Raw string format
:return:
"""
lines = raw.splitlines(True)
n = 0
version = int(Transaction.parse_field("Version", lines[n]))
n += 1
Transaction.parse_field("Type", lines[n])
n += 1
currency = Transaction.parse_field("Currency", lines[n])
n += 1
blockstamp = None # type: Optional[BlockUID]
if version >= 3:
blockstamp = BlockUID.from_str(Transaction.parse_field("Blockstamp", lines[n]))
n += 1
locktime = Transaction.parse_field("Locktime", lines[n])
n += 1
issuers = []
inputs = []
unlocks = []
outputs = []
signatures = []
if Transaction.re_issuers.match(lines[n]):
n += 1
while Transaction.re_inputs.match(lines[n]) is None:
issuer = Transaction.parse_field("Pubkey", lines[n])
issuers.append(issuer)
n += 1
if Transaction.re_inputs.match(lines[n]):
n += 1
while Transaction.re_unlocks.match(lines[n]) is None:
input_source = InputSource.from_inline(version, lines[n])
inputs.append(input_source)
n += 1
if Transaction.re_unlocks.match(lines[n]):
n += 1
while Transaction.re_outputs.match(lines[n]) is None:
unlock = Unlock.from_inline(lines[n])
unlocks.append(unlock)
n += 1
if Transaction.re_outputs.match(lines[n]) is not None:
n += 1
while not Transaction.re_comment.match(lines[n]):
_output = OutputSource.from_inline(lines[n])
outputs.append(_output)
n += 1
comment = Transaction.parse_field("Comment", lines[n])
n += 1
if Transaction.re_signature.match(lines[n]) is not None:
while n < len(lines):
sign = Transaction.parse_field("Signature", lines[n])
signatures.append(sign)
n += 1
return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs,
comment, signatures) | python | {
"resource": ""
} |
q44204 | Transaction.compact | train | def compact(self) -> str:
"""
Return a transaction in its compact format from the instance
:return:
"""
"""TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME
PUBLIC_KEY:INDEX
...
INDEX:SOURCE:FINGERPRINT:AMOUNT
...
PUBLIC_KEY:AMOUNT
...
COMMENT
"""
doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version,
len(self.issuers),
len(self.inputs),
len(self.unlocks),
len(self.outputs),
'1' if self.comment != "" else '0',
self.locktime)
if self.version >= 3:
doc += "{0}\n".format(self.blockstamp)
for pubkey in self.issuers:
doc += "{0}\n".format(pubkey)
for i in self.inputs:
doc += "{0}\n".format(i.inline(self.version))
for u in self.unlocks:
doc += "{0}\n".format(u.inline())
for o in self.outputs:
doc += "{0}\n".format(o.inline())
if self.comment != "":
doc += "{0}\n".format(self.comment)
for s in self.signatures:
doc += "{0}\n".format(s)
return doc | python | {
"resource": ""
} |
q44205 | SimpleTransaction.is_simple | train | def is_simple(tx: Transaction) -> bool:
"""
Filter a transaction and checks if it is a basic one
A simple transaction is a tx which has only one issuer
and two outputs maximum. The unlocks must be done with
simple "SIG" functions, and the outputs must be simple
SIG conditions.
:param tx: the transaction to check
:return: True if a simple transaction
"""
simple = True
if len(tx.issuers) != 1:
simple = False
for unlock in tx.unlocks:
if len(unlock.parameters) != 1:
simple = False
elif type(unlock.parameters[0]) is not SIGParameter:
simple = False
for o in tx.outputs:
# if right condition is not None...
if getattr(o.condition, 'right', None):
simple = False
# if left is not SIG...
elif type(o.condition.left) is not output.SIG:
simple = False
return simple | python | {
"resource": ""
} |
q44206 | Exec.retry | train | def retry(self, retries, task_f, check_f=bool, wait_f=None):
"""
Try a function up to n times.
Raise an exception if it does not pass in time
:param retries int: The number of times to retry
:param task_f func: The function to be run and observed
:param func()bool check_f: a function to check if task_f is complete
:param func()bool wait_f: a function to run between checks
"""
for attempt in range(retries):
ret = task_f()
if check_f(ret):
return ret
if attempt < retries - 1 and wait_f is not None:
wait_f(attempt)
raise RetryException("Giving up after {} failed attempt(s)".format(retries)) | python | {
"resource": ""
} |
q44207 | Exec.gather | train | def gather(self, cmd):
"""
Runs a command and returns rc,stdout,stderr as a tuple.
If called while the `Dir` context manager is in effect, guarantees that the
process is executed in that directory, even if it is no longer the current
directory of the process (i.e. it is thread-safe).
:param cmd: The command and arguments to execute
:return: (rc,stdout,stderr)
"""
if not isinstance(cmd, list):
cmd_list = shlex.split(cmd)
else:
cmd_list = cmd
cwd = pushd.Dir.getcwd()
cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list)
self.logger.debug("Executing:gather {}".format(cmd_info))
proc = subprocess.Popen(
cmd_list, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
rc = proc.returncode
self.logger.debug(
"Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n".
format(cmd_info, rc, out, err))
return rc, out, err | python | {
"resource": ""
} |
q44208 | JSONFeed.json_serial | train | def json_serial(obj):
"""
Custom JSON serializer for objects not serializable by default.
"""
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
raise TypeError('Type {} not serializable.'.format(type(obj))) | python | {
"resource": ""
} |
q44209 | train_weather_predictor | train | def train_weather_predictor(
location='Portland, OR',
years=range(2013, 2016,),
delays=(1, 2, 3),
inputs=('Min Temperature', 'Max Temperature', 'Min Sea Level Pressure', u'Max Sea Level Pressure', 'WindDirDegrees',),
outputs=(u'Max TemperatureF',),
N_hidden=6,
epochs=30,
use_cache=False,
verbosity=2,
):
"""Train a neural nerual net to predict the weather for tomorrow based on past weather.
Builds a linear single hidden layer neural net (multi-dimensional nonlinear regression).
The dataset is a basic SupervisedDataSet rather than a SequentialDataSet, so the training set
and the test set are sampled randomly. This means that historical data for one sample (the delayed
input vector) will likely be used as the target for other samples.
Uses CSVs scraped from wunderground (without an api key) to get daily weather for the years indicated.
Arguments:
location (str): City and state in standard US postal service format: "City, ST"
alternatively an airport code like "PDX or LAX"
delays (list of int): sample delays to use for the input tapped delay line.
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
years (int or list of int): list of 4-digit years to download weather from wunderground
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
"""
df = weather.daily(location, years=years, use_cache=use_cache, verbosity=verbosity).sort()
ds = util.dataset_from_dataframe(df, normalize=False, delays=delays, inputs=inputs, outputs=outputs, verbosity=verbosity)
nn = util.ann_from_ds(ds, N_hidden=N_hidden, verbosity=verbosity)
trainer = util.build_trainer(nn, ds=ds, verbosity=verbosity)
trainer.trainEpochs(epochs)
columns = []
for delay in delays:
columns += [inp + "[-{}]".format(delay) for inp in inputs]
columns += list(outputs)
columns += ['Predicted {}'.format(outp) for outp in outputs]
table = [list(i) + list(t) + list(trainer.module.activate(i)) for i, t in zip(trainer.ds['input'], trainer.ds['target'])]
df = pd.DataFrame(table, columns=columns, index=df.index[max(delays):])
#comparison = df[[] + list(outputs)]
return trainer, df | python | {
"resource": ""
} |
q44210 | oneday_weather_forecast | train | def oneday_weather_forecast(
location='Portland, OR',
inputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity', 'Mean Humidity', 'Min Humidity', 'Max Sea Level Pressure', 'Mean Sea Level Pressure', 'Min Sea Level Pressure', 'Wind Direction'),
outputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity'),
date=None,
epochs=200,
delays=(1, 2, 3, 4),
num_years=4,
use_cache=False,
verbosity=1,
):
""" Provide a weather forecast for tomorrow based on historical weather at that location """
date = make_date(date or datetime.datetime.now().date())
num_years = int(num_years or 10)
years = range(date.year - num_years, date.year + 1)
df = weather.daily(location, years=years, use_cache=use_cache, verbosity=verbosity).sort()
# because up-to-date weather history was cached above, can use that cache, regardless of use_cache kwarg
trainer, df = train_weather_predictor(
location,
years=years,
delays=delays,
inputs=inputs,
outputs=outputs,
epochs=epochs,
verbosity=verbosity,
use_cache=True,
)
nn = trainer.module
forecast = {'trainer': trainer}
yesterday = dict(zip(outputs, nn.activate(trainer.ds['input'][-2])))
forecast['yesterday'] = update_dict(yesterday, {'date': df.index[-2].date()})
today = dict(zip(outputs, nn.activate(trainer.ds['input'][-1])))
forecast['today'] = update_dict(today, {'date': df.index[-1].date()})
ds = util.input_dataset_from_dataframe(df[-max(delays):], delays=delays, inputs=inputs, normalize=False, verbosity=0)
tomorrow = dict(zip(outputs, nn.activate(ds['input'][-1])))
forecast['tomorrow'] = update_dict(tomorrow, {'date': (df.index[-1] + datetime.timedelta(1)).date()})
return forecast | python | {
"resource": ""
} |
q44211 | run_competition | train | def run_competition(builders=[], task=BalanceTask(), Optimizer=HillClimber, rounds=3, max_eval=20, N_hidden=3, verbosity=0):
""" pybrain buildNetwork builds a subtly different network structhan build_ann... so compete them!
Arguments:
task (Task): task to compete at
Optimizer (class): pybrain.Optimizer class to instantiate for each competitor
rounds (int): number of times to run the competition
max_eval (int): number of objective function evaluations that the optimizer is allowed
in each round
N_hidden (int): number of hidden nodes in each network being competed
The functional difference that I can see is that:
buildNetwork connects the bias to the output
build_ann does not
The api differences are:
build_ann allows heterogeneous layer types but the output layer is always linear
buildNetwork allows specification of the output layer type
"""
results = []
builders = list(builders) + [buildNetwork, util.build_ann]
for r in range(rounds):
heat = []
# FIXME: shuffle the order of the builders to keep things fair
# (like switching sides of the tennis court)
for builder in builders:
try:
competitor = builder(task.outdim, N_hidden, task.indim, verbosity=verbosity)
except NetworkError:
competitor = builder(task.outdim, N_hidden, task.indim)
# TODO: verify that a full reset is actually happening
task.reset()
optimizer = Optimizer(task, competitor, maxEvaluations=max_eval)
t0 = time.time()
nn, nn_best = optimizer.learn()
t1 = time.time()
heat += [(nn_best, t1-t0, nn)]
results += [tuple(heat)]
if verbosity >= 0:
print([competitor_scores[:2] for competitor_scores in heat])
# # alternatively:
# agent = ( pybrain.rl.agents.OptimizationAgent(net, HillClimber())
# or
# pybrain.rl.agents.LearningAgent(net, pybrain.rl.learners.ENAC()) )
# exp = pybrain.rl.experiments.EpisodicExperiment(task, agent).doEpisodes(100)
means = [[np.array([r[i][j] for r in results]).mean() for i in range(len(results[0]))] for j in range(2)]
if verbosity > -1:
print('Mean Performance:')
print(means)
perfi, speedi = np.argmax(means[0]), np.argmin(means[1])
print('And the winner for performance is ... Algorithm #{} (0-offset array index [{}])'.format(perfi+1, perfi))
print('And the winner for speed is ... Algorithm #{} (0-offset array index [{}])'.format(speedi+1, speedi))
return results, means | python | {
"resource": ""
} |
q44212 | environ | train | def environ(context):
"""Retrieves the environment for a particular SETSHELL context"""
if 'BASEDIRSETSHELL' not in os.environ:
# It seems that we are in a hostile environment
# try to source the Idiap-wide shell
idiap_source = "/idiap/resource/software/initfiles/shrc"
if os.path.exists(idiap_source):
logger.debug("Sourcing: '%s'"%idiap_source)
try:
command = ['bash', '-c', 'source %s && env' % idiap_source]
pi = subprocess.Popen(command, stdout = subprocess.PIPE)
# overwrite the default environment
for line in pi.stdout:
line = str_(line)
(key, _, value) = line.partition("=")
os.environ[key.strip()] = value.strip()
except OSError as e:
# occurs when the file is not executable or not found
pass
# in case the BASEDIRSETSHELL environment variable is not set,
# we are not at Idiap,
# and so we don't have to set any additional variables.
if 'BASEDIRSETSHELL' not in os.environ:
return dict(os.environ)
BASEDIRSETSHELL = os.environ['BASEDIRSETSHELL']
dosetshell = '%s/setshell/bin/dosetshell' % BASEDIRSETSHELL
command = [dosetshell, '-s', 'sh', context]
# First things first, we get the path to the temp file created by dosetshell
try:
logger.debug("Executing: '%s'", ' '.join(command))
p = subprocess.Popen(command, stdout = subprocess.PIPE)
except OSError as e:
# occurs when the file is not executable or not found
raise OSError("Error executing '%s': %s (%d)" % (' '.join(command), e.strerror, e.errno))
try:
source = str_(p.communicate()[0]).strip()
except KeyboardInterrupt: # the user CTRL-C'ed
os.kill(p.pid, signal.SIGTERM)
sys.exit(signal.SIGTERM)
# We have now the name of the source file, source it and erase it
command2 = ['bash', '-c', 'source %s && env' % source]
try:
logger.debug("Executing: '%s'", ' '.join(command2))
p2 = subprocess.Popen(command2, stdout = subprocess.PIPE)
except OSError as e:
# occurs when the file is not executable or not found
raise OSError("Error executing '%s': %s (%d)" % (' '.join(command2), e.strerror, e.errno))
new_environ = dict(os.environ)
for line in p2.stdout:
line = str_(line)
(key, _, value) = line.partition("=")
new_environ[key.strip()] = value.strip()
try:
p2.communicate()
except KeyboardInterrupt: # the user CTRL-C'ed
os.kill(p2.pid, signal.SIGTERM)
sys.exit(signal.SIGTERM)
if os.path.exists(source): os.unlink(source)
logger.debug("Discovered environment for context '%s':", context)
for k in sorted(new_environ.keys()):
logger.debug(" %s = %s", k, new_environ[k])
return new_environ | python | {
"resource": ""
} |
q44213 | sexec | train | def sexec(context, command, error_on_nonzero=True):
"""Executes a command within a particular Idiap SETSHELL context"""
import six
if isinstance(context, six.string_types): E = environ(context)
else: E = context
try:
logger.debug("Executing: '%s'", ' '.join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=E)
(stdout, stderr) = p.communicate() #note: stderr will be 'None'
if p.returncode != 0:
if error_on_nonzero:
raise RuntimeError("Execution of '%s' exited with status != 0 (%d): %s" % (' '.join(command), p.returncode, str_(stdout)))
else:
logger.debug("Execution of '%s' exited with status != 0 (%d): %s" % \
(' '.join(command), p.returncode, str_(stdout)))
return stdout.strip()
except KeyboardInterrupt: # the user CTRC-C'ed
os.kill(p.pid, signal.SIGTERM)
sys.exit(signal.SIGTERM) | python | {
"resource": ""
} |
q44214 | get_dates_in_period | train | def get_dates_in_period(start=None, top=None, step=1, step_dict={}):
"""Return a list of dates from the `start` to `top`."""
delta = relativedelta(**step_dict) if step_dict else timedelta(days=step)
start = start or datetime.today()
top = top or start + delta
dates = []
current = start
while current <= top:
dates.append(current)
current += delta
return dates | python | {
"resource": ""
} |
q44215 | localize_date | train | def localize_date(date, city):
""" Localize date into city
Date: datetime
City: timezone city definitio. Example: 'Asia/Qatar', 'America/New York'..
"""
local = pytz.timezone(city)
local_dt = local.localize(date, is_dst=None)
return local_dt | python | {
"resource": ""
} |
q44216 | get_month_from_date_str | train | def get_month_from_date_str(date_str, lang=DEFAULT_DATE_LANG):
"""Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
"""
date_str = date_str.lower()
with calendar.different_locale(LOCALES[lang]):
month_abbrs = list(calendar.month_abbr)
for seq, abbr in enumerate(month_abbrs):
if abbr and abbr.lower() in date_str:
return seq, abbr
return () | python | {
"resource": ""
} |
q44217 | replace_month_abbr_with_num | train | def replace_month_abbr_with_num(date_str, lang=DEFAULT_DATE_LANG):
"""Replace month strings occurrences with month number."""
num, abbr = get_month_from_date_str(date_str, lang)
return re.sub(abbr, str(num), date_str, flags=re.IGNORECASE) | python | {
"resource": ""
} |
q44218 | translate_month_abbr | train | def translate_month_abbr(
date_str,
source_lang=DEFAULT_DATE_LANG,
target_lang=DEFAULT_DATE_LANG):
"""Translate the month abbreviation from one locale to another."""
month_num, month_abbr = get_month_from_date_str(date_str, source_lang)
with calendar.different_locale(LOCALES[target_lang]):
translated_abbr = calendar.month_abbr[month_num]
return re.sub(
month_abbr, translated_abbr, date_str, flags=re.IGNORECASE) | python | {
"resource": ""
} |
q44219 | merge_datetime | train | def merge_datetime(date, time='', date_format='%d/%m/%Y', time_format='%H:%M'):
"""Create ``datetime`` object from date and time strings."""
day = datetime.strptime(date, date_format)
if time:
time = datetime.strptime(time, time_format)
time = datetime.time(time)
day = datetime.date(day)
day = datetime.combine(day, time)
return day | python | {
"resource": ""
} |
q44220 | display_list | train | def display_list(prefix, l, color):
""" Prints a file list to terminal, allows colouring output. """
for itm in l: print colored(prefix + itm['path'], color) | python | {
"resource": ""
} |
q44221 | pfx_path | train | def pfx_path(path):
""" Prefix a path with the OS path separator if it is not already """
if path[0] != os.path.sep: return os.path.sep + path
else: return path | python | {
"resource": ""
} |
q44222 | file_or_default | train | def file_or_default(path, default, function = None):
""" Return a default value if a file does not exist """
try:
result = file_get_contents(path)
if function != None: return function(result)
return result
except IOError as e:
if e.errno == errno.ENOENT: return default
raise | python | {
"resource": ""
} |
q44223 | make_dirs_if_dont_exist | train | def make_dirs_if_dont_exist(path):
""" Create directories in path if they do not exist """
if path[-1] not in ['/']: path += '/'
path = os.path.dirname(path)
if path != '':
try: os.makedirs(path)
except OSError: pass | python | {
"resource": ""
} |
q44224 | cpjoin | train | def cpjoin(*args):
""" custom path join """
rooted = True if args[0].startswith('/') else False
def deslash(a): return a[1:] if a.startswith('/') else a
newargs = [deslash(arg) for arg in args]
path = os.path.join(*newargs)
if rooted: path = os.path.sep + path
return path | python | {
"resource": ""
} |
q44225 | get_single_file_info | train | def get_single_file_info(f_path, int_path):
""" Gets the creates and last change times for a single file,
f_path is the path to the file on disk, int_path is an internal
path relative to a root directory. """
return { 'path' : force_unicode(int_path),
'created' : os.path.getctime(f_path),
'last_mod' : os.path.getmtime(f_path)} | python | {
"resource": ""
} |
q44226 | hash_file | train | def hash_file(file_path, block_size = 65536):
""" Hashes a file with sha256 """
sha = hashlib.sha256()
with open(file_path, 'rb') as h_file:
file_buffer = h_file.read(block_size)
while len(file_buffer) > 0:
sha.update(file_buffer)
file_buffer = h_file.read(block_size)
return sha.hexdigest() | python | {
"resource": ""
} |
q44227 | get_file_list | train | def get_file_list(path):
""" Recursively lists all files in a file system below 'path'. """
f_list = []
def recur_dir(path, newpath = os.path.sep):
files = os.listdir(path)
for fle in files:
f_path = cpjoin(path, fle)
if os.path.isdir(f_path): recur_dir(f_path, cpjoin(newpath, fle))
elif os.path.isfile(f_path): f_list.append(get_single_file_info(f_path, cpjoin(newpath, fle)))
recur_dir(path)
return f_list | python | {
"resource": ""
} |
q44228 | find_manifest_changes | train | def find_manifest_changes(new_file_state, old_file_state):
""" Find what has changed between two sets of files """
prev_state_dict = copy.deepcopy(old_file_state)
changed_files = {}
# Find files which are new on the server
for itm in new_file_state:
if itm['path'] in prev_state_dict:
d_itm = prev_state_dict.pop(itm['path'])
# If the file has been modified
if itm['last_mod'] != d_itm['last_mod']:
n_itm = itm.copy()
n_itm['status'] = 'changed'
changed_files[itm['path']] = n_itm
else:
pass # The file has not changed
else:
n_itm = itm.copy()
n_itm['status'] = 'new'
changed_files[itm['path']] = n_itm
# any files remaining in the old file state have been deleted locally
for itm in prev_state_dict.itervalues():
n_itm = itm.copy()
n_itm['status'] = 'deleted'
changed_files[itm['path']] = n_itm
return changed_files | python | {
"resource": ""
} |
q44229 | is_archive | train | def is_archive(filename):
'''returns boolean of whether this filename looks like an archive'''
for archive in archive_formats:
if filename.endswith(archive_formats[archive]['suffix']):
return True
return False | python | {
"resource": ""
} |
q44230 | unarchive | train | def unarchive(filename,output_dir='.'):
'''unpacks the given archive into ``output_dir``'''
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for archive in archive_formats:
if filename.endswith(archive_formats[archive]['suffix']):
return subprocess.call(archive_formats[archive]['command'](output_dir,filename))==0
return False | python | {
"resource": ""
} |
q44231 | flatten | train | def flatten(nested_list):
'''converts a list-of-lists to a single flat list'''
return_list = []
for i in nested_list:
if isinstance(i,list):
return_list += flatten(i)
else:
return_list.append(i)
return return_list | python | {
"resource": ""
} |
q44232 | log | train | def log(fname,msg):
''' generic logging function '''
with open(fname,'a') as f:
f.write(datetime.datetime.now().strftime('%m-%d-%Y %H:%M:\n') + msg + '\n') | python | {
"resource": ""
} |
q44233 | hash | train | def hash(filename):
'''returns string of MD5 hash of given filename'''
buffer_size = 10*1024*1024
m = hashlib.md5()
with open(filename) as f:
buff = f.read(buffer_size)
while len(buff)>0:
m.update(buff)
buff = f.read(buffer_size)
dig = m.digest()
return ''.join(['%x' % ord(x) for x in dig]) | python | {
"resource": ""
} |
q44234 | hash_str | train | def hash_str(string):
'''returns string of MD5 hash of given string'''
m = hashlib.md5()
m.update(string)
dig = m.digest()
return ''.join(['%x' % ord(x) for x in dig]) | python | {
"resource": ""
} |
q44235 | find | train | def find(file):
'''tries to find ``file`` using OS-specific searches and some guessing'''
# Try MacOS Spotlight:
mdfind = which('mdfind')
if mdfind:
out = run([mdfind,'-name',file],stderr=None,quiet=None)
if out.return_code==0 and out.output:
for fname in out.output.split('\n'):
if os.path.basename(fname)==file:
return fname
# Try UNIX locate:
locate = which('locate')
if locate:
out = run([locate,file],stderr=None,quiet=None)
if out.return_code==0 and out.output:
for fname in out.output.split('\n'):
if os.path.basename(fname)==file:
return fname
# Try to look through the PATH, and some guesses:
path_search = os.environ["PATH"].split(os.pathsep)
path_search += ['/usr/local/afni','/usr/local/afni/atlases','/usr/local/share','/usr/local/share/afni','/usr/local/share/afni/atlases']
afni_path = which('afni')
if afni_path:
path_search.append(os.path.dirname(afni_path))
if nl.wrappers.fsl.bet2:
path_search.append(os.path.dirname(nl.wrappers.fsl.bet2))
for path in path_search:
path = path.strip('"')
try:
if file in os.listdir(path):
return os.path.join(path,file)
except:
pass | python | {
"resource": ""
} |
q44236 | get_hexagram | train | def get_hexagram(method='THREE COIN'):
"""
Return one or two hexagrams using any of a variety of divination methods.
The ``NAIVE`` method simply returns a uniformally random ``int`` between
``1`` and ``64``.
All other methods return a 2-tuple where the first value
represents the starting hexagram and the second represents the 'moving to'
hexagram.
To find the name and unicode glyph for a found hexagram, look it up in
the module-level `hexagrams` dict.
Args:
method (str): ``'THREE COIN'``, ``'YARROW'``, or ``'NAIVE'``,
the divination method model to use. Note that the three coin and
yarrow methods are not actually literally simulated,
but rather statistical models reflecting the methods are passed
to `blur.rand` functions to accurately approximate them.
Returns:
int: If ``method == 'NAIVE'``, the ``int`` key of the found hexagram.
Otherwise a `tuple` will be returned.
tuple: A 2-tuple of form ``(int, int)`` where the first value
is key of the starting hexagram and the second is that of the
'moving-to' hexagram.
Raises: ValueError if ``method`` is invalid
Examples:
The function being used alone: ::
>>> get_hexagram(method='THREE COIN') # doctest: +SKIP
# Might be...
(55, 2)
>>> get_hexagram(method='YARROW') # doctest: +SKIP
# Might be...
(41, 27)
>>> get_hexagram(method='NAIVE') # doctest: +SKIP
# Might be...
26
Usage in combination with hexagram lookup: ::
>>> grams = get_hexagram()
>>> grams # doctest: +SKIP
(47, 42)
# unpack hexagrams for convenient reference
>>> initial, moving_to = grams
>>> hexagrams[initial] # doctest: +SKIP
('䷮', '困', 'Confining')
>>> hexagrams[moving_to] # doctest: +SKIP
('䷩', '益', 'Augmenting')
>>> print('{} moving to {}'.format(
... hexagrams[initial][2],
... hexagrams[moving_to][2])
... ) # doctest: +SKIP
Confining moving to Augmenting
"""
if method == 'THREE COIN':
weights = [('MOVING YANG', 2),
('MOVING YIN', 2),
('STATIC YANG', 6),
('STATIC YIN', 6)]
elif method == 'YARROW':
weights = [('MOVING YANG', 8),
('MOVING YIN', 2),
('STATIC YANG', 11),
('STATIC YIN', 17)]
elif method == 'NAIVE':
return random.randint(1, 64)
else:
raise ValueError('`method` value of "{}" is invalid')
hexagram_1 = []
hexagram_2 = []
for i in range(6):
roll = weighted_choice(weights)
if roll == 'MOVING YANG':
hexagram_1.append(1)
hexagram_2.append(0)
elif roll == 'MOVING YIN':
hexagram_1.append(0)
hexagram_2.append(1)
elif roll == 'STATIC YANG':
hexagram_1.append(1)
hexagram_2.append(1)
else: # if roll == 'STATIC YIN'
hexagram_1.append(0)
hexagram_2.append(0)
# Convert hexagrams lists into tuples
hexagram_1 = tuple(hexagram_1)
hexagram_2 = tuple(hexagram_2)
return (_hexagram_dict[hexagram_1], _hexagram_dict[hexagram_2]) | python | {
"resource": ""
} |
q44237 | get_supported_resources | train | def get_supported_resources(netid):
"""
Returns list of Supported resources
"""
url = _netid_supported_url(netid)
response = get_resource(url)
return _json_to_supported(response) | python | {
"resource": ""
} |
q44238 | _json_to_supported | train | def _json_to_supported(response_body):
"""
Returns a list of Supported objects
"""
data = json.loads(response_body)
supported = []
for supported_data in data.get("supportedList", []):
supported.append(Supported().from_json(
supported_data))
return supported | python | {
"resource": ""
} |
q44239 | add_arguments | train | def add_arguments(parser):
"""Adds stock arguments to argparse parsers from scripts that submit grid
jobs."""
default_log_path = os.path.realpath('logs')
parser.add_argument('--log-dir', metavar='LOG', type=str,
dest='logdir', default=default_log_path,
help='Base directory used for logging (defaults to "%(default)s")')
q_choices = (
'default', 'all.q',
'q_1day', 'q1d',
'q_1week', 'q1w',
'q_1month', 'q1m',
'q_1day_mth', 'q1dm',
'q_1week_mth', 'q1wm',
'q_gpu', 'gpu',
'q_long_gpu', 'lgpu',
'q_short_gpu', 'sgpu',
)
parser.add_argument('--queue-name', metavar='QUEUE', type=str,
dest='queue', default=q_choices[0], choices=q_choices,
help='Queue for submission - one of ' + \
'|'.join(q_choices) + ' (defaults to "%(default)s")')
parser.add_argument('--hostname', metavar='HOSTNAME', type=str,
dest='hostname', default=None,
help='If set, it asks the queue to use only a subset of the available nodes')
parser.add_argument('--memfree', metavar='MEMFREE', type=str,
dest='memfree', default=None,
help='Adds the \'-l mem_free\' argument to qsub')
parser.add_argument('--hvmem', metavar='HVMEM', type=str,
dest='hvmem', default=None,
help='Adds the \'-l h_vmem\' argument to qsub')
parser.add_argument('--pe-opt', metavar='PE_OPT', type=str,
dest='pe_opt', default=None,
help='Adds the \'--pe \' argument to qsub')
parser.add_argument('--no-cwd', default=True, action='store_false',
dest='cwd', help='Do not change to the current directory when starting the grid job')
parser.add_argument('--dry-run', default=False, action='store_true',
dest='dryrun', help='Does not really submit anything, just print what would do instead')
parser.add_argument('--job-database', default=None,
dest='statefile', help='The path to the state file that will be created with the submissions (defaults to the parent directory of your logs directory)')
return parser | python | {
"resource": ""
} |
q44240 | submit | train | def submit(jman, command, arguments, deps=[], array=None):
"""An easy submission option for grid-enabled scripts. Create the log
directories using random hash codes. Use the arguments as parsed by the main
script."""
logdir = os.path.join(os.path.realpath(arguments.logdir),
tools.random_logdir())
jobname = os.path.splitext(os.path.basename(command[0]))[0]
cmd = tools.make_shell(sys.executable, command)
if arguments.dryrun:
return DryRunJob(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array)
# really submit
return jman.submit(cmd, cwd=arguments.cwd, queue=arguments.queue,
hostname=arguments.hostname, memfree=arguments.memfree,
hvmem=arguments.hvmem, gpumem=arguments.gpumem, pe_opt=arguments.pe_opt,
stdout=logdir, stderr=logdir, name=jobname, deps=deps,
array=array) | python | {
"resource": ""
} |
q44241 | Markov.add_to_dict | train | def add_to_dict(self, text):
""" Generate word n-tuple and next word probability dict """
n = self.n
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|!)\s', text)
# '' is a special symbol for the start of a sentence like pymarkovchain uses
for sentence in sentences:
sentence = sentence.replace('"','') # remove quotes
words = sentence.strip().split() # split each sentence into its constituent words
if len(words) == 0:
continue
# first word follows a sentence end
self.word_dict[("",)][words[0]].count += 1
for j in range(1, n+1):
for i in range(len(words) - 1):
if i + j >= len(words):
continue
word = tuple(words[i:i + j])
self.word_dict[word][words[i + j]].count += 1
# last word precedes a sentence end
self.word_dict[tuple(words[len(words) - j:len(words)])][""].count += 1
# We've now got the db filled with parametrized word counts
# We still need to normalize this to represent probabilities
for word in self.word_dict:
wordsum = 0
for nextword in self.word_dict[word]:
wordsum += self.word_dict[word][nextword].count
if wordsum != 0:
for nextword in self.word_dict[word]:
self.word_dict[word][nextword].prob = self.word_dict[word][nextword].count / wordsum | python | {
"resource": ""
} |
q44242 | Markov.next_word | train | def next_word(self, previous_words):
"""The next word that is generated by the Markov Chain
depends on a tuple of the previous words from the Chain"""
# The previous words may never have appeared in order in the corpus used to
# generate the word_dict. Consequently, we want to try to find the previous
# words in orde, but if they are not there, then we remove the earliest word
# one by one and recheck. This means that next word depends on the current state
# but possible not on the entire state
previous_words = tuple(previous_words)
if previous_words != ("",): # the empty string 1-tuple (singleton tuple) is always there
while previous_words not in self.word_dict:
previous_words = tuple(previous_words[1:])
if not previous_words:
return ""
frequencies = self.word_dict[previous_words]
inv = [(v.prob,k) for k, v in frequencies.items()]
p, w = zip(*inv)
return np.random.choice(w,1,p)[0] | python | {
"resource": ""
} |
q44243 | get_transaction_document | train | def get_transaction_document(current_block: dict, source: dict, from_pubkey: str, to_pubkey: str) -> Transaction:
"""
Return a Transaction document
:param current_block: Current block infos
:param source: Source to send
:param from_pubkey: Public key of the issuer
:param to_pubkey: Public key of the receiver
:return: Transaction
"""
# list of inputs (sources)
inputs = [
InputSource(
amount=source['amount'],
base=source['base'],
source=source['type'],
origin_id=source['identifier'],
index=source['noffset']
)
]
# list of issuers of the inputs
issuers = [
from_pubkey
]
# list of unlocks of the inputs
unlocks = [
Unlock(
# inputs[index]
index=0,
# unlock inputs[index] if signatures[0] is from public key of issuers[0]
parameters=[SIGParameter(0)]
)
]
# lists of outputs
outputs = [
OutputSource(amount=source['amount'], base=source['base'], condition="SIG({0})".format(to_pubkey))
]
transaction = Transaction(
version=TRANSACTION_VERSION,
currency=current_block['currency'],
blockstamp=BlockUID(current_block['number'], current_block['hash']),
locktime=0,
issuers=issuers,
inputs=inputs,
unlocks=unlocks,
outputs=outputs,
comment='',
signatures=[]
)
return transaction | python | {
"resource": ""
} |
q44244 | check_type | train | def check_type(obj: Any,
candidate_type: Any,
reltype: str = 'invariant') -> bool:
"""Tell wether a value correspond to a type,
optionally specifying the type as contravariant or covariant.
Args:
obj (Any): The value to check.
candidate_type (Any): The type to check the object against.
reltype (:obj:`str`, optional): Variance of the type, can be contravariant,
covariant or invariant. By default is invariant.
Returns:
bool: True if the type is fine, False otherwise
Raises:
ValueError: When the variance or the type are not among the ones the function can manage.
"""
if reltype not in ['invariant', 'covariant', 'contravariant']:
raise ValueError(f' Variadic type {reltype} is unknown')
# builtin type like str, or a class
if type(candidate_type) == type and reltype in ['invariant']:
return isinstance(obj, candidate_type)
if type(candidate_type) == type and reltype in ['covariant']:
return issubclass(obj.__class__, candidate_type)
if type(candidate_type) == type and reltype in ['contravariant']:
return issubclass(candidate_type, obj.__class__)
# Any accepts everything
if type(candidate_type) == type(Any):
return True
# Union, at least one match in __args__
if type(candidate_type) == type(Union):
return any(check_type(obj, t, reltype) for t in candidate_type.__args__)
# Tuple, each element matches the corresponding type in __args__
if type(candidate_type) == type(Tuple) and tuple in candidate_type.__bases__:
if not hasattr(obj, '__len__'):
return False
if len(candidate_type.__args__) != len(obj):
return False
return all(check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__))
# Dict, each (key, value) matches the type in __args__
if type(candidate_type) == type(Dict) and dict in candidate_type.__bases__:
if type(obj) != dict:
return False
return all(check_type(k, candidate_type.__args__[0], reltype)
and check_type(v, candidate_type.__args__[1], reltype)
for (k, v) in obj.items())
# List or Set, each element matches the type in __args__
if type(candidate_type) == type(List) and \
(list in candidate_type.__bases__ or set in candidate_type.__bases__):
if not hasattr(obj, '__len__'):
return False
return all(check_type(o, candidate_type.__args__[0], reltype) for o in obj)
# TypeVar, this is tricky
if type(candidate_type) == TypeVar:
# TODO consider contravariant, variant and bound
# invariant with a list of constraints, acts like a Tuple
if not candidate_type.__constraints__:
return True
if not (candidate_type.__covariant__ or candidate_type.__contravariant__):
return any(check_type(obj, t) for t in candidate_type.__constraints__)
if type(candidate_type) == type(Type):
return check_type(obj, candidate_type.__args__[0], reltype='covariant')
if inspect.isclass(candidate_type) and reltype in ['invariant']:
return isinstance(obj, candidate_type)
raise ValueError(f'Cannot check against {reltype} type {candidate_type}') | python | {
"resource": ""
} |
q44245 | BaseField.get_source | train | def get_source(self, key, name_spaces=None, default_prefix=''):
"""Generates the dictionary key for the serialized representation
based on the instance variable source and a provided key.
:param str key: name of the field in model
:returns: self.source or key
"""
source = self.source or key
prefix = default_prefix
if name_spaces and self.name_space and self.name_space in name_spaces:
prefix = ''.join([name_spaces[self.name_space], ':'])
return ''.join([prefix, source]) | python | {
"resource": ""
} |
q44246 | IntegerField.validate | train | def validate(self, raw_data, **kwargs):
"""Convert the raw_data to an integer.
"""
try:
converted_data = int(raw_data)
return super(IntegerField, self).validate(converted_data)
except ValueError:
raise ValidationException(self.messages['invalid'], repr(raw_data)) | python | {
"resource": ""
} |
q44247 | FloatField.validate | train | def validate(self, raw_data, **kwargs):
"""Convert the raw_data to a float.
"""
try:
converted_data = float(raw_data)
super(FloatField, self).validate(converted_data, **kwargs)
return raw_data
except ValueError:
raise ValidationException(self.messages['invalid'], repr(raw_data)) | python | {
"resource": ""
} |
q44248 | DateTimeField.validate | train | def validate(self, raw_data, **kwargs):
"""The raw_data is returned unchanged."""
super(DateTimeField, self).validate(raw_data, **kwargs)
try:
if isinstance(raw_data, datetime.datetime):
self.converted = raw_data
elif self.serial_format is None:
# parse as iso8601
self.converted = parse(raw_data)
else:
self.converted = datetime.datetime.strptime(raw_data,
self.serial_format)
return raw_data
except (ParseError, ValueError) as e:
msg = self.messages['parse'] % dict(cls=self.__class__.__name__,
data=raw_data,
format=self.serial_format)
raise ValidationException(msg, raw_data) | python | {
"resource": ""
} |
q44249 | _clean_post_content | train | def _clean_post_content(blog_url, content):
"""
Replace import path with something relative to blog.
"""
content = re.sub(
"<img.src=\"%s(.*)\"" % blog_url,
lambda s: "<img src=\"%s\"" % _get_relative_upload(s.groups(1)[0]),
content)
return content | python | {
"resource": ""
} |
q44250 | JobManagerSGE.submit | train | def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = "logs", dry_run = False, verbosity = 0, stop_on_failure = False, **kwargs):
"""Submits a job that will be executed in the grid."""
# add job to database
self.lock()
job = add_job(self.session, command_line, name, dependencies, array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure, context=self.context, **kwargs)
logger.info("Added job '%s' to the database." % job)
if dry_run:
print("Would have added the Job")
print(job)
print("to the database to be executed in the grid with options:", str(kwargs))
self.session.delete(job)
logger.info("Deleted job '%s' from the database due to dry-run option" % job)
job_id = None
else:
job_id = self._submit_to_grid(job, name, array, dependencies, log_dir, verbosity, **kwargs)
self.session.commit()
self.unlock()
return job_id | python | {
"resource": ""
} |
q44251 | JobManagerSGE.run_job | train | def run_job(self, job_id, array_id = None):
"""Overwrites the run-job command from the manager to extract the correct job id before calling base class implementation."""
# get the unique job id from the given grid id
self.lock()
jobs = list(self.session.query(Job).filter(Job.id == job_id))
if len(jobs) != 1:
self.unlock()
raise ValueError("Could not find job id '%d' in the database'" % job_id)
job_id = jobs[0].unique
self.unlock()
# call base class implementation with the corrected job id
return JobManager.run_job(self, job_id, array_id) | python | {
"resource": ""
} |
q44252 | JobManagerSGE.stop_jobs | train | def stop_jobs(self, job_ids):
"""Stops the jobs in the grid."""
self.lock()
jobs = self.get_jobs(job_ids)
for job in jobs:
if job.status in ('executing', 'queued', 'waiting'):
qdel(job.id, context=self.context)
logger.info("Stopped job '%s' in the SGE grid." % job)
job.submit()
self.session.commit()
self.unlock() | python | {
"resource": ""
} |
q44253 | SecretKey.encrypt | train | def encrypt(self, pubkey: str, nonce: Union[str, bytes], text: Union[str, bytes]) -> str:
"""
Encrypt message text with the public key of the recipient and a nonce
The nonce must be a 24 character string (you can use libnacl.utils.rand_nonce() to get one)
and unique for each encrypted message.
Return base58 encoded encrypted message
:param pubkey: Base58 encoded public key of the recipient
:param nonce: Unique nonce
:param text: Message to encrypt
:return:
"""
text_bytes = ensure_bytes(text)
nonce_bytes = ensure_bytes(nonce)
recipient_pubkey = PublicKey(pubkey)
crypt_bytes = libnacl.public.Box(self, recipient_pubkey).encrypt(text_bytes, nonce_bytes)
return Base58Encoder.encode(crypt_bytes[24:]) | python | {
"resource": ""
} |
q44254 | SecretKey.decrypt | train | def decrypt(self, pubkey: str, nonce: Union[str, bytes], text: str) -> str:
"""
Decrypt encrypted message text with recipient public key and the unique nonce used by the sender.
:param pubkey: Public key of the recipient
:param nonce: Unique nonce used by the sender
:param text: Encrypted message
:return:
"""
sender_pubkey = PublicKey(pubkey)
nonce_bytes = ensure_bytes(nonce)
encrypt_bytes = Base58Encoder.decode(text)
decrypt_bytes = libnacl.public.Box(self, sender_pubkey).decrypt(encrypt_bytes, nonce_bytes)
return decrypt_bytes.decode('utf-8') | python | {
"resource": ""
} |
q44255 | PublicKey.encrypt_seal | train | def encrypt_seal(self, data: Union[str, bytes]) -> bytes:
"""
Encrypt data with a curve25519 version of the ed25519 public key
:param data: Bytes data to encrypt
"""
curve25519_public_key = libnacl.crypto_sign_ed25519_pk_to_curve25519(self.pk)
return libnacl.crypto_box_seal(ensure_bytes(data), curve25519_public_key) | python | {
"resource": ""
} |
q44256 | Command.override_default_templates | train | def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_ | python | {
"resource": ""
} |
q44257 | Command.get_apps | train | def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates | python | {
"resource": ""
} |
q44258 | Command.get_plugs_mail_classes | train | def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes | python | {
"resource": ""
} |
q44259 | Command.create_templates | train | def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count | python | {
"resource": ""
} |
q44260 | Command.open_file | train | def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text | python | {
"resource": ""
} |
q44261 | Command.template_exists_db | train | def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True | python | {
"resource": ""
} |
q44262 | PathFinder2._path_hooks | train | def _path_hooks(cls, path): # from importlib.PathFinder
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None | python | {
"resource": ""
} |
q44263 | PathFinder2._path_importer_cache | train | def _path_importer_cache(cls, path): # from importlib.PathFinder
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder | python | {
"resource": ""
} |
q44264 | PathFinder2.find_module | train | def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is for python2 only
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
elif spec.loader is None and spec.submodule_search_locations:
# Here we need to create a namespace loader to handle namespaces since python2 doesn't...
return NamespaceLoader2(spec.name, spec.submodule_search_locations)
else:
return spec.loader | python | {
"resource": ""
} |
q44265 | PathFinder2.find_spec | train | def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec | python | {
"resource": ""
} |
q44266 | FileFinder2.find_spec | train | def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module. Returns the
matching spec, or None if not found."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
base_path = os.path.join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
init_full_path = os.path.join(base_path, init_filename)
full_path = base_path + suffix
if os.path.isfile(init_full_path):
return self._get_spec(loader_class, fullname, init_full_path, [base_path], target)
if os.path.isfile(full_path): # maybe we need more checks here (importlib filefinder checks its cache...)
return self._get_spec(loader_class, fullname, full_path, None, target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = os.path.isdir(base_path)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None | python | {
"resource": ""
} |
q44267 | FileFinder2.find_module | train | def find_module(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns loader.
"""
spec = self.find_spec(fullname)
if spec is None:
return None
# We need to handle the namespace case here for python2
if spec.loader is None and len(spec.submodule_search_locations):
spec.loader = NamespaceLoader2(spec.name, spec.submodule_search_locations)
return spec.loader | python | {
"resource": ""
} |
q44268 | FileFinder2.path_hook | train | def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder2(path):
"""Path hook for FileFinder2."""
if not os.path.isdir(path):
raise _ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder2 | python | {
"resource": ""
} |
q44269 | Uniq.getid | train | def getid(self, idtype):
'''
idtype in Uniq constants
'''
memorable_id = None
while memorable_id in self._ids:
l=[]
for _ in range(4):
l.append(str(randint(0, 19)))
memorable_id = ''.join(l)
self._ids.append(memorable_id)
return idtype + '-' + memorable_id | python | {
"resource": ""
} |
q44270 | register_deregister | train | def register_deregister(notifier, event_type, callback=None,
args=None, kwargs=None, details_filter=None,
weak=False):
"""Context manager that registers a callback, then deregisters on exit.
NOTE(harlowja): if the callback is none, then this registers nothing, which
is different from the behavior of the ``register`` method
which will *not* accept none as it is not callable...
"""
if callback is None:
yield
else:
notifier.register(event_type, callback,
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
try:
yield
finally:
notifier.deregister(event_type, callback,
details_filter=details_filter) | python | {
"resource": ""
} |
q44271 | Listener.dead | train | def dead(self):
"""Whether the callback no longer exists.
If the callback is maintained via a weak reference, and that
weak reference has been collected, this will be true
instead of false.
"""
if not self._weak:
return False
cb = self._callback()
if cb is None:
return True
return False | python | {
"resource": ""
} |
q44272 | Listener.is_equivalent | train | def is_equivalent(self, callback, details_filter=None):
"""Check if the callback provided is the same as the internal one.
:param callback: callback used for comparison
:param details_filter: callback used for comparison
:returns: false if not the same callback, otherwise true
:rtype: boolean
"""
cb = self.callback
if cb is None and callback is not None:
return False
if cb is not None and callback is None:
return False
if cb is not None and callback is not None \
and not reflection.is_same_callback(cb, callback):
return False
if details_filter is not None:
if self._details_filter is None:
return False
else:
return reflection.is_same_callback(self._details_filter,
details_filter)
else:
return self._details_filter is None | python | {
"resource": ""
} |
q44273 | Notifier.is_registered | train | def is_registered(self, event_type, callback, details_filter=None):
"""Check if a callback is registered.
:param event_type: event type callback was registered to
:param callback: callback that was used during registration
:param details_filter: details filter that was used during
registration
:returns: if the callback is registered
:rtype: boolean
"""
listeners = self._topics.get(event_type, [])
for listener in listeners:
if listener.is_equivalent(callback, details_filter=details_filter):
return True
return False | python | {
"resource": ""
} |
q44274 | Notifier._do_dispatch | train | def _do_dispatch(self, listeners, event_type, details):
"""Calls into listeners, handling failures and logging as needed."""
possible_calls = len(listeners)
call_failures = 0
for listener in listeners:
try:
listener(event_type, details.copy())
except Exception:
self._logger.warn(
"Failure calling listener %s to notify about event"
" %s, details: %s", listener, event_type, details,
exc_info=True)
call_failures += 1
return _Notified(possible_calls,
possible_calls - call_failures,
call_failures) | python | {
"resource": ""
} |
q44275 | Notifier.notify | train | def notify(self, event_type, details):
"""Notify about an event occurrence.
All callbacks registered to receive notifications about given
event type will be called. If the provided event type can not be
used to emit notifications (this is checked via
the :meth:`.can_be_registered` method) then a value error will be
raised.
:param event_type: event type that occurred
:param details: additional event details *dictionary* passed to
callback keyword argument with the same name
:type details: dictionary
:returns: a future object that will have a result named tuple with
contents being (total listeners called, how many listeners
were **successfully** called, how many listeners
were not **successfully** called); do note that the result
may be delayed depending on internal executor used.
"""
if not self.can_trigger_notification(event_type):
raise ValueError("Event type '%s' is not allowed to trigger"
" notifications" % event_type)
listeners = list(self._topics.get(self.ANY, []))
listeners.extend(self._topics.get(event_type, []))
if not details:
details = {}
fut = self._executor.submit(self._do_dispatch, listeners,
event_type, details)
return fut | python | {
"resource": ""
} |
q44276 | Notifier.register | train | def register(self, event_type, callback,
args=None, kwargs=None, details_filter=None,
weak=False):
"""Register a callback to be called when event of a given type occurs.
Callback will be called with provided ``args`` and ``kwargs`` and
when event type occurs (or on any event if ``event_type`` equals to
:attr:`.ANY`). It will also get additional keyword argument,
``details``, that will hold event details provided to the
:meth:`.notify` method (if a details filter callback is provided then
the target callback will *only* be triggered if the details filter
callback returns a truthy value).
:param event_type: event type to get triggered on
:param callback: function callback to be registered.
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value pair arguments
:type kwargs: dictionary
:param weak: if the callback retained should be referenced via
a weak reference or a strong reference (defaults to
holding a strong reference)
:type weak: bool
:returns: the listener that was registered
:rtype: :py:class:`~.Listener`
"""
if not six.callable(callback):
raise ValueError("Event callback must be callable")
if details_filter is not None:
if not six.callable(details_filter):
raise ValueError("Details filter must be callable")
if not self.can_be_registered(event_type):
raise ValueError("Disallowed event type '%s' can not have a"
" callback registered" % event_type)
if kwargs:
for k in self.RESERVED_KEYS:
if k in kwargs:
raise KeyError("Reserved key '%s' not allowed in "
"kwargs" % k)
with self._lock:
if self.is_registered(event_type, callback,
details_filter=details_filter):
raise ValueError("Event callback already registered with"
" equivalent details filter")
listener = Listener(_make_ref(callback, weak=weak),
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
listeners = self._topics.setdefault(event_type, [])
listeners.append(listener)
return listener | python | {
"resource": ""
} |
q44277 | Notifier.listeners_iter | train | def listeners_iter(self):
"""Return an iterator over the mapping of event => listeners bound.
The listener list(s) returned should **not** be mutated.
NOTE(harlowja): Each listener in the yielded (event, listeners)
tuple is an instance of the :py:class:`~.Listener` type, which
itself wraps a provided callback (and its details filter
callback, if any).
"""
topics = set(six.iterkeys(self._topics))
while topics:
event_type = topics.pop()
try:
yield event_type, self._topics[event_type]
except KeyError:
pass | python | {
"resource": ""
} |
q44278 | read_local_files | train | def read_local_files(*file_paths: str) -> str:
"""
Reads one or more text files and returns them joined together.
A title is automatically created based on the file name.
Args:
*file_paths: list of files to aggregate
Returns: content of files
"""
def _read_single_file(file_path):
with open(file_path) as f:
filename = os.path.splitext(file_path)[0]
title = f'{filename}\n{"=" * len(filename)}'
return '\n\n'.join((title, f.read()))
return '\n' + '\n\n'.join(map(_read_single_file, file_paths)) | python | {
"resource": ""
} |
q44279 | LangID._readfile | train | def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz | python | {
"resource": ""
} |
q44280 | LangID.train | train | def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified() | python | {
"resource": ""
} |
q44281 | LangID.is_training_modified | train | def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False | python | {
"resource": ""
} |
q44282 | TableAccess.exists | train | def exists(c_table_cd: str, tables: I2B2Tables) -> int:
""" Return the number of records that exist with the table code.
- Ideally this should be zero or one, but the default table doesn't have a key
:param c_table_cd: key to test
:param tables:
:return: number of records found
"""
conn = tables.ont_connection
table = tables.schemes
return bool(list(conn.execute(table.select().where(table.c.c_table_cd == c_table_cd)))) | python | {
"resource": ""
} |
q44283 | TableAccess.del_records | train | def del_records(c_table_cd: str, tables: I2B2Tables) -> int:
""" Delete all records with c_table_code
:param c_table_cd: key to delete
:param tables:
:return: number of records deleted
"""
conn = tables.ont_connection
table = tables.schemes
return conn.execute(table.delete().where(table.c.c_table_cd == c_table_cd)).rowcount | python | {
"resource": ""
} |
q44284 | read_config | train | def read_config(filename=None):
"""
Read a config filename into .ini format and return dict of shares.
Keyword arguments:
filename -- the path of config filename (default None)
Return dict.
"""
if not os.path.exists(filename):
raise IOError('Impossibile trovare il filename %s' % filename)
shares = []
config = ConfigParser()
config.read(filename)
for share_items in [config.items(share_title) for share_title in
config.sections()]:
dict_share = {}
for key, value in share_items:
if key == 'hostname' and '@' in value:
hostname, credentials = (item[::-1] for item
in value[::-1].split('@', 1))
dict_share.update({key: hostname})
credentials = tuple(cred.lstrip('"').rstrip('"')
for cred in credentials.split(':', 1))
dict_share.update({'username': credentials[0]})
if len(credentials) > 1:
dict_share.update({'password': credentials[1]})
continue
dict_share.update({key: value})
shares.append(dict_share)
return shares | python | {
"resource": ""
} |
q44285 | JobManager.unlock | train | def unlock(self):
"""Closes the session to the database."""
if not hasattr(self, 'session'):
raise RuntimeError('Error detected! The session that you want to close does not exist any more!')
logger.debug("Closed database session of '%s'" % self._database)
self.session.close()
del self.session | python | {
"resource": ""
} |
q44286 | JobManager._create | train | def _create(self):
"""Creates a new and empty database."""
from .tools import makedirs_safe
# create directory for sql database
makedirs_safe(os.path.dirname(self._database))
# create all the tables
Base.metadata.create_all(self._engine)
logger.debug("Created new empty database '%s'" % self._database) | python | {
"resource": ""
} |
q44287 | JobManager.get_jobs | train | def get_jobs(self, job_ids = None):
"""Returns a list of jobs that are stored in the database."""
if job_ids is not None and len(job_ids) == 0:
return []
q = self.session.query(Job)
if job_ids is not None:
q = q.filter(Job.unique.in_(job_ids))
return sorted(list(q), key=lambda job: job.unique) | python | {
"resource": ""
} |
q44288 | JobManager.list | train | def list(self, job_ids, print_array_jobs = False, print_dependencies = False, long = False, print_times = False, status=Status, names=None, ids_only=False):
"""Lists the jobs currently added to the database."""
# configuration for jobs
fields = ("job-id", "grid-id", "queue", "status", "job-name")
lengths = (6, 17, 11, 12, 16)
dependency_length = 0
if print_dependencies:
fields += ("dependencies",)
lengths += (25,)
dependency_length = lengths[-1]
if long:
fields += ("submitted command",)
lengths += (43,)
format = "{:^%d} " * len(lengths)
format = format % lengths
# if ids_only:
# self.lock()
# for job in self.get_jobs():
# print(job.unique, end=" ")
# self.unlock()
# return
array_format = "{0:^%d} {1:>%d} {2:^%d} {3:^%d}" % lengths[:4]
delimiter = format.format(*['='*k for k in lengths])
array_delimiter = array_format.format(*["-"*k for k in lengths[:4]])
header = [fields[k].center(lengths[k]) for k in range(len(lengths))]
# print header
if not ids_only:
print(' '.join(header))
print(delimiter)
self.lock()
for job in self.get_jobs(job_ids):
job.refresh()
if job.status in status and (names is None or job.name in names):
if ids_only:
print(job.unique, end=" ")
else:
print(job.format(format, dependency_length))
if print_times:
print(times(job))
if (not ids_only) and print_array_jobs and job.array:
print(array_delimiter)
for array_job in job.array:
if array_job.status in status:
print(array_job.format(array_format))
if print_times:
print(times(array_job))
print(array_delimiter)
self.unlock() | python | {
"resource": ""
} |
q44289 | JobManager.report | train | def report(self, job_ids=None, array_ids=None, output=True, error=True, status=Status, name=None):
"""Iterates through the output and error files and write the results to command line."""
def _write_contents(job):
# Writes the contents of the output and error files to command line
out_file, err_file = job.std_out_file(), job.std_err_file()
logger.info("Contents of output file: '%s'" % out_file)
if output and out_file is not None and os.path.exists(out_file) and os.stat(out_file).st_size > 0:
print(open(out_file).read().rstrip())
print("-"*20)
if error and err_file is not None and os.path.exists(err_file) and os.stat(err_file).st_size > 0:
logger.info("Contents of error file: '%s'" % err_file)
print(open(err_file).read().rstrip())
print("-"*40)
def _write_array_jobs(array_jobs):
for array_job in array_jobs:
print("Array Job", str(array_job.id), ("(%s) :"%array_job.machine_name if array_job.machine_name is not None else ":"))
_write_contents(array_job)
self.lock()
# check if an array job should be reported
if array_ids:
if len(job_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.unique.in_(job_ids)).filter(Job.unique == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
if array_jobs: print(array_jobs[0].job)
_write_array_jobs(array_jobs)
else:
# iterate over all jobs
jobs = self.get_jobs(job_ids)
for job in jobs:
if name is not None and job.name != name:
continue
if job.status not in status:
continue
if job.array:
print(job)
_write_array_jobs(job.array)
else:
print(job)
_write_contents(job)
if job.log_dir is not None:
print("-"*60)
self.unlock() | python | {
"resource": ""
} |
q44290 | JobManager.delete | train | def delete(self, job_ids, array_ids = None, delete_logs = True, delete_log_dir = False, status = Status, delete_jobs = True):
"""Deletes the jobs with the given ids from the database."""
def _delete_dir_if_empty(log_dir):
if log_dir and delete_log_dir and os.path.isdir(log_dir) and not os.listdir(log_dir):
os.rmdir(log_dir)
logger.info("Removed empty log directory '%s'" % log_dir)
def _delete(job, try_to_delete_dir=False):
# delete the job from the database
if delete_logs:
self.delete_logs(job)
if try_to_delete_dir:
_delete_dir_if_empty(job.log_dir)
if delete_jobs:
self.session.delete(job)
self.lock()
# check if array ids are specified
if array_ids:
if len(job_ids) != 1: logger.error("If array ids are specified exactly one job id must be given.")
array_jobs = list(self.session.query(ArrayJob).join(Job).filter(Job.unique.in_(job_ids)).filter(Job.unique == ArrayJob.job_id).filter(ArrayJob.id.in_(array_ids)))
if array_jobs:
job = array_jobs[0].job
for array_job in array_jobs:
if array_job.status in status:
if delete_jobs:
logger.debug("Deleting array job '%d' of job '%d' from the database." % (array_job.id, job.unique))
_delete(array_job)
if not job.array:
if job.status in status:
if delete_jobs:
logger.info("Deleting job '%d' from the database." % job.unique)
_delete(job, delete_jobs)
else:
# iterate over all jobs
jobs = self.get_jobs(job_ids)
for job in jobs:
# delete all array jobs
if job.array:
for array_job in job.array:
if array_job.status in status:
if delete_jobs:
logger.debug("Deleting array job '%d' of job '%d' from the database." % (array_job.id, job.unique))
_delete(array_job)
# delete this job
if job.status in status:
if delete_jobs:
logger.info("Deleting job '%d' from the database." % job.unique)
_delete(job, delete_jobs)
self.session.commit()
self.unlock() | python | {
"resource": ""
} |
q44291 | current | train | def current(config):
"""Display current revision"""
with open(config, 'r'):
main.current(yaml.load(open(config))) | python | {
"resource": ""
} |
q44292 | revision | train | def revision(config, message):
"""Create new revision file in a scripts directory"""
with open(config, 'r'):
main.revision(yaml.load(open(config)), message) | python | {
"resource": ""
} |
q44293 | reapply | train | def reapply(config):
"""Reapply current revision"""
with open(config, 'r'):
main.reapply(yaml.load(open(config))) | python | {
"resource": ""
} |
q44294 | show | train | def show(config):
"""Show revision list"""
with open(config, 'r'):
main.show(yaml.load(open(config))) | python | {
"resource": ""
} |
q44295 | raise_from | train | def raise_from(exc, cause):
"""
Does the same as ``raise LALALA from BLABLABLA`` does in Python 3.
But works in Python 2 also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
context_tb = sys.exc_info()[2]
incorrect_cause = not (
(isinstance(cause, type) and issubclass(cause, Exception)) or
isinstance(cause, BaseException) or
cause is None
)
if incorrect_cause:
raise TypeError("exception causes must derive from BaseException")
if cause is not None:
if not getattr(cause, "__pep3134__", False):
# noinspection PyBroadException
try:
raise_(cause)
except: # noqa pylint: disable=W0702
cause = sys.exc_info()[1]
cause.__fixed_traceback__ = context_tb
# noinspection PyBroadException
try:
raise_(exc)
except: # noqa pylint: disable=W0702
exc = sys.exc_info()[1]
exc.__original_exception__.__suppress_context__ = True
exc.__original_exception__.__cause__ = cause
exc.__original_exception__.__context__ = None
raise exc | python | {
"resource": ""
} |
q44296 | Command.find_files | train | def find_files(self, root):
"""
Helper method to get all files in the given root.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
ignore = lambda pattern: fnmatch.fnmatchcase(filename, pattern)
return any(ignore(pattern) for pattern in ignore_patterns)
dir_suffix = '%s*' % os.sep
normalized_patterns = [
p[:-len(dir_suffix)] if p.endswith(dir_suffix) else p
for p in self.ignore_patterns
]
all_files = []
walker = os.walk(root, topdown=True, followlinks=self.follow_symlinks)
for dir_path, dir_names, file_names in walker:
for dir_name in dir_names[:]:
path = os.path.normpath(os.path.join(dir_path, dir_name))
if is_ignored(path, normalized_patterns):
dir_names.remove(dir_name)
if self.verbose:
print_out("Ignoring directory '{:}'".format(dir_name))
for file_name in file_names:
path = os.path.normpath(os.path.join(dir_path, file_name))
if is_ignored(path, self.ignore_patterns):
if self.verbose:
print_out("Ignoring file '{:}' in '{:}'".format(
file_name, dir_path))
else:
all_files.append((dir_path, file_name))
return sorted(all_files) | python | {
"resource": ""
} |
q44297 | plotter | train | def plotter(path, show, goodFormat):
'''makes some plots
creates binned histograms of the results of each module
(ie count of results in ranges [(0,40), (40, 50), (50,60), (60, 70), (70, 80), (80, 90), (90, 100)])
Arguments:
path {str} -- path to save plots to
show {boolean} -- whether to show plots using python
goodFormat {dict} -- module : [results for module]
output:
saves plots to files/shows plots depending on inputs
'''
for module in goodFormat.items(): # for each module
bins = [0, 40, 50, 60, 70, 80, 90, 100]
# cut the data into bins
out = pd.cut(module[1], bins=bins, include_lowest=True)
ax = out.value_counts().plot.bar(rot=0, color="b", figsize=(10, 6), alpha=0.5,
title=module[0]) # plot counts of the cut data as a bar
ax.set_xticklabels(['0 to 40', '40 to 50', '50 to 60',
'60 to 70', '70 to 80', '80 to 90', '90 to 100'])
ax.set_ylabel("# of candidates")
ax.set_xlabel(
"grade bins \n total candidates: {}".format(len(module[1])))
if path is not None and show is not False:
# if export path directory doesn't exist: create it
if not pathlib.Path.is_dir(path.as_posix()):
pathlib.Path.mkdir(path.as_posix())
plt.savefig(path / ''.join([module[0], '.png']))
plt.show()
elif path is not None:
# if export path directory doesn't exist: create it
if not pathlib.Path.is_dir(path):
pathlib.Path.mkdir(path)
plt.savefig(path / ''.join([module[0], '.png']))
plt.close()
elif show is not False:
plt.show() | python | {
"resource": ""
} |
q44298 | myGrades | train | def myGrades(year, candidateNumber, badFormat, length):
'''returns final result of candidateNumber in year
Arguments:
year {int} -- the year candidateNumber is in
candidateNumber {str} -- the candidateNumber of candidateNumber
badFormat {dict} -- candNumber : [results for candidate]
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- a weighted average for a specific candidate number and year
'''
weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5]
if year == 1:
myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])
* weights1[i] for i in range(length-1)]) / 6
elif year == 2 or year == 3:
myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])
* weights2[i] for i in range(length-1)]) / 7
elif year == 4:
myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])
for i in range(length-1)]) / 8
return myFinalResult | python | {
"resource": ""
} |
q44299 | myRank | train | def myRank(grade, badFormat, year, length):
'''rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
'''
return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.