_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q6800
|
SeqRepo._get_unique_seqid
|
train
|
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
|
python
|
{
"resource": ""
}
|
q6801
|
SeqAliasDB.find_aliases
|
train
|
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params)
|
python
|
{
"resource": ""
}
|
q6802
|
SeqAliasDB.store_alias
|
train
|
def store_alias(self, seq_id, namespace, alias):
"""associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
# success => new record
return c.lastrowid
except sqlite3.IntegrityError:
pass
# IntegrityError fall-through
# existing record is guaranteed to exist uniquely; fetchone() should always succeed
current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone()
# if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple
# and we return current record
if current_rec["seq_id"] == seq_id:
_logger.debug(log_pfx + ": duplicate record")
return current_rec["seqalias_id"]
# otherwise, we're reassigning; deprecate old record, then retry
_logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"]))
self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]])
return self.store_alias(seq_id, namespace, alias)
|
python
|
{
"resource": ""
}
|
q6803
|
add_assembly_names
|
train
|
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit()
|
python
|
{
"resource": ""
}
|
q6804
|
snapshot
|
train
|
def snapshot(opts):
"""snapshot a seqrepo data directory by hardlinking sequence files,
copying sqlite databases, and remove write permissions from directories
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
dst_dir = opts.destination_name
if not dst_dir.startswith("/"):
# interpret dst_dir as relative to parent dir of seqrepo_dir
dst_dir = os.path.join(opts.root_directory, dst_dir)
src_dir = os.path.realpath(seqrepo_dir)
dst_dir = os.path.realpath(dst_dir)
if commonpath([src_dir, dst_dir]).startswith(src_dir):
raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir))
if os.path.exists(dst_dir):
raise IOError(dst_dir + ": File exists")
tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".")
_logger.debug("src_dir = " + src_dir)
_logger.debug("dst_dir = " + dst_dir)
_logger.debug("tmp_dir = " + tmp_dir)
# TODO: cleanup of tmpdir on failure
makedirs(tmp_dir, exist_ok=True)
wd = os.getcwd()
os.chdir(src_dir)
# make destination directories (walk is top-down)
for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames):
dp = os.path.join(tmp_dir, rp)
os.mkdir(dp)
# hard link sequence files
for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames
if ".bgz" in filename):
dp = os.path.join(tmp_dir, rp)
os.link(rp, dp)
# copy sqlite databases
for rp in ["aliases.sqlite3", "sequences/db.sqlite3"]:
dp = os.path.join(tmp_dir, rp)
shutil.copyfile(rp, dp)
# recursively drop write perms on snapshot
mode_aw = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
def _drop_write(p):
mode = os.lstat(p).st_mode
new_mode = mode & ~mode_aw
os.chmod(p, new_mode)
for dp in (os.path.join(dirpath, dirent)
for dirpath, dirnames, filenames in os.walk(tmp_dir) for dirent in dirnames + filenames):
_drop_write(dp)
_drop_write(tmp_dir)
os.rename(tmp_dir, dst_dir)
_logger.info("snapshot created in " + dst_dir)
os.chdir(wd)
|
python
|
{
"resource": ""
}
|
q6805
|
StorageBackend.set_sqlite_pragmas
|
train
|
def set_sqlite_pragmas(self):
"""
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine.
It currently sets:
- journal_mode to WAL
:return: None
"""
def _pragmas_on_connect(dbapi_con, con_record):
dbapi_con.execute("PRAGMA journal_mode = WAL;")
event.listen(self.engine, "connect", _pragmas_on_connect)
|
python
|
{
"resource": ""
}
|
q6806
|
StorageBackend.schedule_job
|
train
|
def schedule_job(self, j):
"""
Add the job given by j to the job queue.
Note: Does not actually run the job.
"""
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
session.add(orm_job)
try:
session.commit()
except Exception as e:
logging.error(
"Got an error running session.commit(): {}".format(e))
return job_id
|
python
|
{
"resource": ""
}
|
q6807
|
StorageBackend.mark_job_as_canceling
|
train
|
def mark_job_as_canceling(self, job_id):
"""
Mark the job as requested for canceling. Does not actually try to cancel a running job.
:param job_id: the job to be marked as canceling.
:return: the job object
"""
job, _ = self._update_job_state(job_id, State.CANCELING)
return job
|
python
|
{
"resource": ""
}
|
q6808
|
BaseWorkerBackend.handle_incoming_message
|
train
|
def handle_incoming_message(self, msg):
"""
Start or cancel a job, based on the msg.
If msg.type == MessageType.START_JOB, then start the job given by msg.job.
If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id.
Args:
msg (barbequeue.messaging.classes.Message):
Returns: None
"""
if msg.type == MessageType.START_JOB:
job = msg.message['job']
self.schedule_job(job)
elif msg.type == MessageType.CANCEL_JOB:
job_id = msg.message['job_id']
self.cancel(job_id)
|
python
|
{
"resource": ""
}
|
q6809
|
WorkerBackend.schedule_job
|
train
|
def schedule_job(self, job):
"""
schedule a job to the type of workers spawned by self.start_workers.
:param job: the job to schedule for running.
:return:
"""
l = _reraise_with_traceback(job.get_lambda_to_execute())
future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel)
# assign the futures to a dict, mapping them to a job
self.job_future_mapping[future] = job
self.future_job_mapping[job.job_id] = future
# callback for when the future is now!
future.add_done_callback(self.handle_finished_future)
# add the job to our cancel notifications data structure, with False at first
self.cancel_notifications[job.job_id] = False
return future
|
python
|
{
"resource": ""
}
|
q6810
|
WorkerBackend._check_for_cancel
|
train
|
def _check_for_cancel(self, job_id, current_stage=""):
"""
Check if a job has been requested to be cancelled. When called, the calling function can
optionally give the stage it is currently in, so the user has information on where the job
was before it was cancelled.
:param job_id: The job_id to check
:param current_stage: Where the job currently is
:return: raises a CancelledError if we find out that we were cancelled.
"""
future = self.future_job_mapping[job_id]
is_cancelled = future._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
if is_cancelled:
raise UserCancelledError(last_stage=current_stage)
|
python
|
{
"resource": ""
}
|
q6811
|
Scheduler.request_job_cancel
|
train
|
def request_job_cancel(self, job_id):
"""
Send a message to the workers to cancel the job with job_id. We then mark the job in the storage
as being canceled.
:param job_id: the job to cancel
:return: None
"""
msg = CancelMessage(job_id)
self.messaging_backend.send(self.worker_mailbox, msg)
self.storage_backend.mark_job_as_canceling(job_id)
|
python
|
{
"resource": ""
}
|
q6812
|
Scheduler.handle_worker_messages
|
train
|
def handle_worker_messages(self, timeout):
"""
Read messages that are placed in self.incoming_mailbox,
and then update the job states corresponding to each message.
Args:
timeout: How long to wait for an incoming message, if the mailbox is empty right now.
Returns: None
"""
msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20)
for msg in msgs:
self.handle_single_message(msg)
|
python
|
{
"resource": ""
}
|
q6813
|
Job.get_lambda_to_execute
|
train
|
def get_lambda_to_execute(self):
"""
return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job.
"""
def y(update_progress_func, cancel_job_func):
"""
Call the function stored in self.func, and passing in update_progress_func
or cancel_job_func depending if self.track_progress or self.cancellable is defined,
respectively.
:param update_progress_func: The callback for when the job updates its progress.
:param cancel_job_func: The function that the function has to call occasionally to see
if the user wants to cancel the currently running job.
:return: Any
"""
func = import_stringified_func(self.func)
extrafunckwargs = {}
args, kwargs = copy.copy(self.args), copy.copy(self.kwargs)
if self.track_progress:
extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id)
if self.cancellable:
extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id)
kwargs.update(extrafunckwargs)
return func(*args, **kwargs)
return y
|
python
|
{
"resource": ""
}
|
q6814
|
Job.percentage_progress
|
train
|
def percentage_progress(self):
"""
Returns a float between 0 and 1, representing the current job's progress in its task.
If total_progress is not given or 0, just return self.progress.
:return: float corresponding to the total percentage progress of the job.
"""
if self.total_progress != 0:
return float(self.progress) / self.total_progress
else:
return self.progress
|
python
|
{
"resource": ""
}
|
q6815
|
Client.schedule
|
train
|
def schedule(self, func, *args, **kwargs):
"""
Schedules a function func for execution.
One special parameter is track_progress. If passed in and not None, the func will be passed in a
keyword parameter called update_progress:
def update_progress(progress, total_progress, stage=""):
The running function can call the update_progress function to notify interested parties of the function's
current progress.
Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special
"check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job
to be cancelled.
The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored
within the job and can be retrieved when the job status is queried.
All other parameters are directly passed to the function when it starts running.
:type func: callable or str
:param func: A callable object that will be scheduled for running.
:return: a string representing the job_id.
"""
# if the func is already a job object, just schedule that directly.
if isinstance(func, Job):
job = func
# else, turn it into a job first.
else:
job = Job(func, *args, **kwargs)
job.track_progress = kwargs.pop('track_progress', False)
job.cancellable = kwargs.pop('cancellable', False)
job.extra_metadata = kwargs.pop('extra_metadata', {})
job_id = self.storage.schedule_job(job)
return job_id
|
python
|
{
"resource": ""
}
|
q6816
|
Client.wait
|
train
|
def wait(self, job_id, timeout=None):
"""
Wait until the job given by job_id has a new update.
:param job_id: the id of the job to wait for.
:param timeout: how long to wait for a job state change before timing out.
:return: Job object corresponding to job_id
"""
return self.storage.wait_for_job_update(job_id, timeout=timeout)
|
python
|
{
"resource": ""
}
|
q6817
|
Client.wait_for_completion
|
train
|
def wait_for_completion(self, job_id, timeout=None):
"""
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a
iceqube.exceptions.TimeoutError if timeout is exceeded before each job change.
:param job_id: the id of the job to wait for.
:param timeout: how long to wait for a job state change before timing out.
"""
while 1:
job = self.wait(job_id, timeout=timeout)
if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]:
return job
else:
continue
|
python
|
{
"resource": ""
}
|
q6818
|
invalidate_cache_after_error
|
train
|
def invalidate_cache_after_error(f):
"""
catch any exception and invalidate internal cache with list of nodes
"""
@wraps(f)
def wrapper(self, *args, **kwds):
try:
return f(self, *args, **kwds)
except Exception:
self.clear_cluster_nodes_cache()
raise
return wrapper
|
python
|
{
"resource": ""
}
|
q6819
|
ElastiCache.update_params
|
train
|
def update_params(self, params):
"""
update connection params to maximize performance
"""
if not params.get('BINARY', True):
raise Warning('To increase performance please use ElastiCache'
' in binary mode')
else:
params['BINARY'] = True # patch params, set binary mode
if 'OPTIONS' not in params:
# set special 'behaviors' pylibmc attributes
params['OPTIONS'] = {
'tcp_nodelay': True,
'ketama': True
}
|
python
|
{
"resource": ""
}
|
q6820
|
ElastiCache.get_cluster_nodes
|
train
|
def get_cluster_nodes(self):
"""
return list with all nodes in cluster
"""
if not hasattr(self, '_cluster_nodes_cache'):
server, port = self._servers[0].split(':')
try:
self._cluster_nodes_cache = (
get_cluster_info(server, port,
self._ignore_cluster_errors)['nodes'])
except (socket.gaierror, socket.timeout) as err:
raise Exception('Cannot connect to cluster {0} ({1})'.format(
self._servers[0], err
))
return self._cluster_nodes_cache
|
python
|
{
"resource": ""
}
|
q6821
|
restore_placeholders
|
train
|
def restore_placeholders(msgid, translation):
"""Restore placeholders in the translated message."""
placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid)
return re.sub(
r'(\s*)(__[\w]+?__)(\s*)',
lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]),
translation)
|
python
|
{
"resource": ""
}
|
q6822
|
Command.translate_file
|
train
|
def translate_file(self, root, file_name, target_language):
"""
convenience method for translating a pot file
:param root: the absolute path of folder where the file is present
:param file_name: name of the file to be translated (it should be a pot file)
:param target_language: language in which the file needs to be translated
"""
logger.info('filling up translations for locale `{}`'.format(target_language))
po = polib.pofile(os.path.join(root, file_name))
strings = self.get_strings_to_translate(po)
# translate the strings,
# all the translated strings are returned
# in the same order on the same index
# viz. [a, b] -> [trans_a, trans_b]
tl = get_translator()
translated_strings = tl.translate_strings(strings, target_language, 'en', False)
self.update_translations(po, translated_strings)
po.save()
|
python
|
{
"resource": ""
}
|
q6823
|
Command.get_strings_to_translate
|
train
|
def get_strings_to_translate(self, po):
"""Return list of string to translate from po file.
:param po: POFile object to translate
:type po: polib.POFile
:return: list of string to translate
:rtype: collections.Iterable[six.text_type]
"""
strings = []
for index, entry in enumerate(po):
if not self.need_translate(entry):
continue
strings.append(humanize_placeholders(entry.msgid))
if entry.msgid_plural:
strings.append(humanize_placeholders(entry.msgid_plural))
return strings
|
python
|
{
"resource": ""
}
|
q6824
|
Command.update_translations
|
train
|
def update_translations(self, entries, translated_strings):
"""Update translations in entries.
The order and number of translations should match to get_strings_to_translate() result.
:param entries: list of entries to translate
:type entries: collections.Iterable[polib.POEntry] | polib.POFile
:param translated_strings: list of translations
:type translated_strings: collections.Iterable[six.text_type]
"""
translations = iter(translated_strings)
for entry in entries:
if not self.need_translate(entry):
continue
if entry.msgid_plural:
# fill the first plural form with the entry.msgid translation
translation = next(translations)
translation = fix_translation(entry.msgid, translation)
entry.msgstr_plural[0] = translation
# fill the rest of plural forms with the entry.msgid_plural translation
translation = next(translations)
translation = fix_translation(entry.msgid_plural, translation)
for k, v in entry.msgstr_plural.items():
if k != 0:
entry.msgstr_plural[k] = translation
else:
translation = next(translations)
translation = fix_translation(entry.msgid, translation)
entry.msgstr = translation
# Set the 'fuzzy' flag on translation
if self.set_fuzzy and 'fuzzy' not in entry.flags:
entry.flags.append('fuzzy')
|
python
|
{
"resource": ""
}
|
q6825
|
load_ply
|
train
|
def load_ply(fileobj):
"""Same as load_ply, but takes a file-like object"""
def nextline():
"""Read next line, skip comments"""
while True:
line = fileobj.readline()
assert line != '' # eof
if not line.startswith('comment'):
return line.strip()
assert nextline() == 'ply'
assert nextline() == 'format ascii 1.0'
line = nextline()
assert line.startswith('element vertex')
nverts = int(line.split()[2])
# print 'nverts : ', nverts
assert nextline() == 'property float x'
assert nextline() == 'property float y'
assert nextline() == 'property float z'
line = nextline()
assert line.startswith('element face')
nfaces = int(line.split()[2])
# print 'nfaces : ', nfaces
assert nextline() == 'property list uchar int vertex_indices'
line = nextline()
has_texcoords = line == 'property list uchar float texcoord'
if has_texcoords:
assert nextline() == 'end_header'
else:
assert line == 'end_header'
# Verts
verts = np.zeros((nverts, 3))
for i in range(nverts):
vals = nextline().split()
verts[i, :] = [float(v) for v in vals[:3]]
# Faces
faces = []
faces_uv = []
for i in range(nfaces):
vals = nextline().split()
assert int(vals[0]) == 3
faces.append([int(v) for v in vals[1:4]])
if has_texcoords:
assert len(vals) == 11
assert int(vals[4]) == 6
faces_uv.append([(float(vals[5]), float(vals[6])),
(float(vals[7]), float(vals[8])),
(float(vals[9]), float(vals[10]))])
# faces_uv.append([float(v) for v in vals[5:]])
else:
assert len(vals) == 4
return verts, faces, faces_uv
|
python
|
{
"resource": ""
}
|
q6826
|
read_ssh_config
|
train
|
def read_ssh_config(path):
"""
Read ssh config file and return parsed SshConfig
"""
with open(path, "r") as fh_:
lines = fh_.read().splitlines()
return SshConfig(lines)
|
python
|
{
"resource": ""
}
|
q6827
|
_remap_key
|
train
|
def _remap_key(key):
""" Change key into correct casing if we know the parameter """
if key in KNOWN_PARAMS:
return key
if key.lower() in known_params:
return KNOWN_PARAMS[known_params.index(key.lower())]
return key
|
python
|
{
"resource": ""
}
|
q6828
|
SshConfig.parse
|
train
|
def parse(self, lines):
"""Parse lines from ssh config file"""
cur_entry = None
for line in lines:
kv_ = _key_value(line)
if len(kv_) > 1:
key, value = kv_
if key.lower() == "host":
cur_entry = value
self.hosts_.add(value)
self.lines_.append(ConfigLine(line=line, host=cur_entry, key=key, value=value))
else:
self.lines_.append(ConfigLine(line=line))
|
python
|
{
"resource": ""
}
|
q6829
|
SshConfig.host
|
train
|
def host(self, host):
"""
Return the configuration of a specific host as a dictionary.
Dictionary always contains lowercase versions of the attribute names.
Parameters
----------
host : the host to return values for.
Returns
-------
dict of key value pairs, excluding "Host", empty map if host is not found.
"""
if host in self.hosts_:
vals = defaultdict(list)
for k, value in [(x.key.lower(), x.value) for x in self.lines_
if x.host == host and x.key.lower() != "host"]:
vals[k].append(value)
flatten = lambda x: x[0] if len(x) == 1 else x
return {k: flatten(v) for k, v in vals.items()}
return {}
|
python
|
{
"resource": ""
}
|
q6830
|
SshConfig.set
|
train
|
def set(self, host, **kwargs):
"""
Set configuration values for an existing host.
Overwrites values for existing settings, or adds new settings.
Parameters
----------
host : the Host to modify.
**kwargs : The new configuration parameters
"""
self.__check_host_args(host, kwargs)
def update_line(key, value):
"""Produce new config line"""
return " %s %s" % (key, value)
for key, values in kwargs.items():
if type(values) not in [list, tuple]: # pylint: disable=unidiomatic-typecheck
values = [values]
lower_key = key.lower()
update_idx = [idx for idx, x in enumerate(self.lines_)
if x.host == host and x.key.lower() == lower_key]
extra_remove = []
for idx in update_idx:
if values: # values available, update the line
value = values.pop()
self.lines_[idx].line = update_line(self.lines_[idx].key, value)
self.lines_[idx].value = value
else: # no more values available, remove the line
extra_remove.append(idx)
for idx in reversed(sorted(extra_remove)):
del self.lines_[idx]
if values:
mapped_key = _remap_key(key)
max_idx = max([idx for idx, line in enumerate(self.lines_) if line.host == host])
for value in values:
self.lines_.insert(max_idx + 1, ConfigLine(line=update_line(mapped_key, value),
host=host, key=mapped_key,
value=value))
|
python
|
{
"resource": ""
}
|
q6831
|
SshConfig.unset
|
train
|
def unset(self, host, *args):
"""
Removes settings for a host.
Parameters
----------
host : the host to remove settings from.
*args : list of settings to removes.
"""
self.__check_host_args(host, args)
remove_idx = [idx for idx, x in enumerate(self.lines_)
if x.host == host and x.key.lower() in args]
for idx in reversed(sorted(remove_idx)):
del self.lines_[idx]
|
python
|
{
"resource": ""
}
|
q6832
|
SshConfig.rename
|
train
|
def rename(self, old_host, new_host):
"""
Renames a host configuration.
Parameters
----------
old_host : the host to rename.
new_host : the new host value
"""
if new_host in self.hosts_:
raise ValueError("Host %s: already exists." % new_host)
for line in self.lines_: # update lines
if line.host == old_host:
line.host = new_host
if line.key.lower() == "host":
line.value = new_host
line.line = "Host %s" % new_host
self.hosts_.remove(old_host) # update host cache
self.hosts_.add(new_host)
|
python
|
{
"resource": ""
}
|
q6833
|
SshConfig.add
|
train
|
def add(self, host, **kwargs):
"""
Add another host to the SSH configuration.
Parameters
----------
host: The Host entry to add.
**kwargs: The parameters for the host (without "Host" parameter itself)
"""
if host in self.hosts_:
raise ValueError("Host %s: exists (use update)." % host)
self.hosts_.add(host)
self.lines_.append(ConfigLine(line="", host=None))
self.lines_.append(ConfigLine(line="Host %s" % host, host=host, key="Host", value=host))
for k, v in kwargs.items():
if type(v) not in [list, tuple]:
v = [v]
mapped_k = _remap_key(k)
for value in v:
self.lines_.append(ConfigLine(line=" %s %s" % (mapped_k, str(value)), host=host, key=mapped_k, value=value))
self.lines_.append(ConfigLine(line="", host=None))
|
python
|
{
"resource": ""
}
|
q6834
|
SshConfig.remove
|
train
|
def remove(self, host):
"""
Removes a host from the SSH configuration.
Parameters
----------
host : The host to remove
"""
if host not in self.hosts_:
raise ValueError("Host %s: not found." % host)
self.hosts_.remove(host)
# remove lines, including comments inside the host lines
host_lines = [ idx for idx, x in enumerate(self.lines_) if x.host == host ]
remove_range = reversed(range(min(host_lines), max(host_lines) + 1))
for idx in remove_range:
del self.lines_[idx]
|
python
|
{
"resource": ""
}
|
q6835
|
SshConfig.write
|
train
|
def write(self, path):
"""
Writes ssh config file
Parameters
----------
path : The file to write to
"""
with open(path, "w") as fh_:
fh_.write(self.config())
|
python
|
{
"resource": ""
}
|
q6836
|
orthogonal_vector
|
train
|
def orthogonal_vector(v):
"""Return an arbitrary vector that is orthogonal to v"""
if v[1] != 0 or v[2] != 0:
c = (1, 0, 0)
else:
c = (0, 1, 0)
return np.cross(v, c)
|
python
|
{
"resource": ""
}
|
q6837
|
show_plane
|
train
|
def show_plane(orig, n, scale=1.0, **kwargs):
"""
Show the plane with the given origin and normal. scale give its size
"""
b1 = orthogonal_vector(n)
b1 /= la.norm(b1)
b2 = np.cross(b1, n)
b2 /= la.norm(b2)
verts = [orig + scale*(-b1 - b2),
orig + scale*(b1 - b2),
orig + scale*(b1 + b2),
orig + scale*(-b1 + b2)]
faces = [(0, 1, 2), (0, 2, 3)]
trimesh3d(np.array(verts), faces, **kwargs)
|
python
|
{
"resource": ""
}
|
q6838
|
triangle_intersects_plane
|
train
|
def triangle_intersects_plane(mesh, tid, plane):
"""
Returns true if the given triangle is cut by the plane. This will return
false if a single vertex of the triangle lies on the plane
"""
dists = [point_to_plane_dist(mesh.verts[vid], plane)
for vid in mesh.tris[tid]]
side = np.sign(dists)
return not (side[0] == side[1] == side[2])
|
python
|
{
"resource": ""
}
|
q6839
|
compute_triangle_plane_intersections
|
train
|
def compute_triangle_plane_intersections(mesh, tid, plane, dist_tol=1e-8):
"""
Compute the intersection between a triangle and a plane
Returns a list of intersections in the form
(INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection
(INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices
This return between 0 and 2 intersections :
- 0 : the plane does not intersect the plane
- 1 : one of the triangle's vertices lies on the plane (so it just
"touches" the plane without really intersecting)
- 2 : the plane slice the triangle in two parts (either vertex-edge,
vertex-vertex or edge-edge)
"""
# TODO: Use a distance cache
dists = {vid: point_to_plane_dist(mesh.verts[vid], plane)
for vid in mesh.tris[tid]}
# TODO: Use an edge intersection cache (we currently compute each edge
# intersection twice : once for each tri)
# This is to avoid registering the same vertex intersection twice
# from two different edges
vert_intersect = {vid: False for vid in dists.keys()}
# Iterate through the edges, cutting the ones that intersect
intersections = []
for e in mesh.edges_for_triangle(tid):
v1 = mesh.verts[e[0]]
d1 = dists[e[0]]
v2 = mesh.verts[e[1]]
d2 = dists[e[1]]
if np.fabs(d1) < dist_tol:
# Avoid creating the vertex intersection twice
if not vert_intersect[e[0]]:
# point on plane
intersections.append((INTERSECT_VERTEX, v1, e[0]))
vert_intersect[e[0]] = True
if np.fabs(d2) < dist_tol:
if not vert_intersect[e[1]]:
# point on plane
intersections.append((INTERSECT_VERTEX, v2, e[1]))
vert_intersect[e[1]] = True
# If vertices are on opposite sides of the plane, we have an edge
# intersection
if d1 * d2 < 0:
# Due to numerical accuracy, we could have both a vertex intersect
# and an edge intersect on the same vertex, which is impossible
if not vert_intersect[e[0]] and not vert_intersect[e[1]]:
# intersection factor (between 0 and 1)
# here is a nice drawing :
# https://ravehgonen.files.wordpress.com/2013/02/slide8.png
# keep in mind d1, d2 are *signed* distances (=> d1 - d2)
s = d1 / (d1 - d2)
vdir = v2 - v1
ipos = v1 + vdir * s
intersections.append((INTERSECT_EDGE, ipos, e))
return intersections
|
python
|
{
"resource": ""
}
|
q6840
|
_walk_polyline
|
train
|
def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol):
"""
Given an intersection, walk through the mesh triangles, computing
intersection with the cut plane for each visited triangle and adding
those intersection to a polyline.
"""
T = set(T)
p = []
# Loop until we have explored all the triangles for the current
# polyline
while True:
p.append(intersect[1])
tid, intersections, T = get_next_triangle(mesh, T, plane,
intersect, dist_tol)
if tid is None:
break
# get_next_triangle returns triangles that our plane actually
# intersects (as opposed to touching only a single vertex),
# hence the assert
assert len(intersections) == 2
# Of the two returned intersections, one should have the
# intersection point equal to p[-1]
if la.norm(intersections[0][1] - p[-1]) < dist_tol:
intersect = intersections[1]
else:
assert la.norm(intersections[1][1] - p[-1]) < dist_tol, \
'%s not close to %s' % (str(p[-1]), str(intersections))
intersect = intersections[0]
return p, T
|
python
|
{
"resource": ""
}
|
q6841
|
cross_section
|
train
|
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs):
"""
Compute the planar cross section of a mesh. This returns a set of
polylines.
Args:
verts: Nx3 array of the vertices position
faces: Nx3 array of the faces, containing vertex indices
plane_orig: 3-vector indicating the plane origin
plane_normal: 3-vector indicating the plane normal
Returns:
A list of Nx3 arrays, each representing a disconnected portion
of the cross section as a polyline
"""
mesh = TriangleMesh(verts, tris)
plane = Plane(plane_orig, plane_normal)
return cross_section_mesh(mesh, plane, **kwargs)
|
python
|
{
"resource": ""
}
|
q6842
|
merge_close_vertices
|
train
|
def merge_close_vertices(verts, faces, close_epsilon=1e-5):
"""
Will merge vertices that are closer than close_epsilon.
Warning, this has a O(n^2) memory usage because we compute the full
vert-to-vert distance matrix. If you have a large mesh, might want
to use some kind of spatial search structure like an octree or some fancy
hashing scheme
Returns: new_verts, new_faces
"""
# Pairwise distance between verts
if USE_SCIPY:
D = spdist.cdist(verts, verts)
else:
D = np.sqrt(np.abs(pdist_squareformed_numpy(verts)))
# Compute a mapping from old to new : for each input vert, store the index
# of the new vert it will be merged into
old2new = np.zeros(D.shape[0], dtype=np.int)
# A mask indicating if a vertex has already been merged into another
merged_verts = np.zeros(D.shape[0], dtype=np.bool)
new_verts = []
for i in range(D.shape[0]):
if merged_verts[i]:
continue
else:
# The vertices that will be merged into this one
merged = np.flatnonzero(D[i, :] < close_epsilon)
old2new[merged] = len(new_verts)
new_verts.append(verts[i])
merged_verts[merged] = True
new_verts = np.array(new_verts)
# Recompute face indices to index in new_verts
new_faces = np.zeros((len(faces), 3), dtype=np.int)
for i, f in enumerate(faces):
new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]])
# again, plot with utils.trimesh3d(new_verts, new_faces)
return new_verts, new_faces
|
python
|
{
"resource": ""
}
|
q6843
|
signed_to_float
|
train
|
def signed_to_float(hex: str) -> float:
"""Convert signed hexadecimal to floating value."""
if int(hex, 16) & 0x8000:
return -(int(hex, 16) & 0x7FFF) / 10
else:
return int(hex, 16) / 10
|
python
|
{
"resource": ""
}
|
q6844
|
encode_packet
|
train
|
def encode_packet(packet: dict) -> str:
"""Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;'
"""
if packet['protocol'] == 'rfdebug':
return '10;RFDEBUG=' + packet['command'] + ';'
elif packet['protocol'] == 'rfudebug':
return '10;RFDEBUG=' + packet['command'] + ';'
else:
return SWITCH_COMMAND_TEMPLATE.format(
node=PacketHeader.master.value,
**packet
)
|
python
|
{
"resource": ""
}
|
q6845
|
serialize_packet_id
|
train
|
def serialize_packet_id(packet: dict) -> str:
"""Serialize packet identifiers into one reversable string.
>>> serialize_packet_id({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'newkaku_000001_01'
>>> serialize_packet_id({
... 'protocol': 'ikea koppla',
... 'id': '000080',
... 'switch': '0',
... 'command': 'on',
... })
'ikeakoppla_000080_0'
>>> # unserializeable protocol name without explicit entry
>>> # in translation table should be properly serialized
>>> serialize_packet_id({
... 'protocol': 'alecto v4',
... 'id': '000080',
... 'switch': '0',
... 'command': 'on',
... })
'alectov4_000080_0'
"""
# translate protocol in something reversable
protocol = protocol_translations[packet['protocol']]
if protocol == UNKNOWN:
protocol = 'rflink'
return '_'.join(filter(None, [
protocol,
packet.get('id', None),
packet.get('switch', None),
]))
|
python
|
{
"resource": ""
}
|
q6846
|
packet_events
|
train
|
def packet_events(packet: dict) -> Generator:
"""Return list of all events in the packet.
>>> x = list(packet_events({
... 'protocol': 'alecto v1',
... 'id': 'ec02',
... 'temperature': 1.0,
... 'temperature_unit': '°C',
... 'humidity': 10,
... 'humidity_unit': '%',
... }))
>>> assert {
... 'id': 'alectov1_ec02_temp',
... 'sensor': 'temperature',
... 'value': 1.0,
... 'unit': '°C',
... } in x
>>> assert {
... 'id': 'alectov1_ec02_hum',
... 'sensor': 'humidity',
... 'value': 10,
... 'unit': '%',
... } in x
>>> y = list(packet_events({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... }))
>>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y
"""
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}
packet_id = serialize_packet_id(packet)
events = {f: v for f, v in packet.items() if f in field_abbrev}
if 'command' in events or 'version' in events:
# switch events only have one event in each packet
yield dict(id=packet_id, **events)
else:
if packet_id == 'debug':
yield {
'id': 'raw',
'value': packet.get('pulses(usec)'),
'tm': packet.get('tm'),
'pulses': packet.get('pulses'),
}
else:
# sensors can have multiple
for sensor, value in events.items():
unit = packet.get(sensor + '_unit', None)
yield {
'id': packet_id + PACKET_ID_SEP + field_abbrev[sensor],
'sensor': sensor,
'value': value,
'unit': unit,
}
if packet_id != 'rflink':
yield {
'id': packet_id + PACKET_ID_SEP + 'update_time',
'sensor': 'update_time',
'value': round(time.time()),
'unit': 's',
}
|
python
|
{
"resource": ""
}
|
q6847
|
RFLinkProxy.forward_packet
|
train
|
def forward_packet(self, writer, packet, raw_packet):
"""Forward packet from client to RFLink."""
peer = writer.get_extra_info('peername')
log.debug(' %s:%s: forwarding data: %s', peer[0], peer[1], packet)
if 'command' in packet:
packet_id = serialize_packet_id(packet)
command = packet['command']
ack = yield from self.protocol.send_command_ack(
packet_id, command)
if ack:
writer.write("20;00;OK;".encode() + CRLF)
for _ in range(DEFAULT_SIGNAL_REPETITIONS-1):
yield from self.protocol.send_command_ack(
packet_id, command)
else:
self.protocol.send_raw_packet(raw_packet)
|
python
|
{
"resource": ""
}
|
q6848
|
RFLinkProxy.client_connected_callback
|
train
|
def client_connected_callback(self, reader, writer):
"""Handle connected client."""
peer = writer.get_extra_info('peername')
clients.append((reader, writer, peer))
log.info("Incoming connection from: %s:%s", peer[0], peer[1])
try:
while True:
data = yield from reader.readline()
if not data:
break
try:
line = data.decode().strip()
except UnicodeDecodeError:
line = '\x00'
# Workaround for domoticz issue #2816
if line[-1] != DELIM:
line = line + DELIM
if valid_packet(line):
yield from self.handle_raw_tx_packet(writer, line)
else:
log.warning(" %s:%s: dropping invalid data: '%s'", peer[0], peer[1], line)
pass
except ConnectionResetError:
pass
except Exception as e:
log.exception(e)
log.info("Disconnected from: %s:%s", peer[0], peer[1])
writer.close()
clients.remove((reader, writer, peer))
|
python
|
{
"resource": ""
}
|
q6849
|
RFLinkProxy.raw_callback
|
train
|
def raw_callback(self, raw_packet):
"""Send data to all connected clients."""
if not ';PONG;' in raw_packet:
log.info('forwarding packet %s to clients', raw_packet)
else:
log.debug('forwarding packet %s to clients', raw_packet)
writers = [i[1] for i in list(clients)]
for writer in writers:
writer.write(str(raw_packet).encode() + CRLF)
|
python
|
{
"resource": ""
}
|
q6850
|
RFLinkProxy.reconnect
|
train
|
def reconnect(self, exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
self.protocol = None
if not self.closing:
log.warning('disconnected from Rflink, reconnecting')
self.loop.create_task(self.connect())
|
python
|
{
"resource": ""
}
|
q6851
|
create_rflink_connection
|
train
|
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol,
packet_callback=None, event_callback=None,
disconnect_callback=None, ignore=None, loop=None):
"""Create Rflink manager class, returns transport coroutine."""
# use default protocol if not specified
protocol = partial(
protocol,
loop=loop if loop else asyncio.get_event_loop(),
packet_callback=packet_callback,
event_callback=event_callback,
disconnect_callback=disconnect_callback,
ignore=ignore if ignore else [],
)
# setup serial connection if no transport specified
if host:
conn = loop.create_connection(protocol, host, port)
else:
baud = baud
conn = create_serial_connection(loop, protocol, port, baud)
return conn
|
python
|
{
"resource": ""
}
|
q6852
|
ProtocolBase.data_received
|
train
|
def data_received(self, data):
"""Add incoming data to buffer."""
data = data.decode()
log.debug('received data: %s', data.strip())
self.buffer += data
self.handle_lines()
|
python
|
{
"resource": ""
}
|
q6853
|
ProtocolBase.send_raw_packet
|
train
|
def send_raw_packet(self, packet: str):
"""Encode and put packet string onto write buffer."""
data = packet + '\r\n'
log.debug('writing data: %s', repr(data))
self.transport.write(data.encode())
|
python
|
{
"resource": ""
}
|
q6854
|
ProtocolBase.log_all
|
train
|
def log_all(self, file):
"""Log all data received from RFLink to file."""
global rflink_log
if file == None:
rflink_log = None
else:
log.debug('logging to: %s', file)
rflink_log = open(file, 'a')
|
python
|
{
"resource": ""
}
|
q6855
|
PacketHandling.handle_packet
|
train
|
def handle_packet(self, packet):
"""Process incoming packet dict and optionally call callback."""
if self.packet_callback:
# forward to callback
self.packet_callback(packet)
else:
print('packet', packet)
|
python
|
{
"resource": ""
}
|
q6856
|
PacketHandling.send_command
|
train
|
def send_command(self, device_id, action):
"""Send device command to rflink gateway."""
command = deserialize_packet_id(device_id)
command['command'] = action
log.debug('sending command: %s', command)
self.send_packet(command)
|
python
|
{
"resource": ""
}
|
q6857
|
CommandSerialization.send_command_ack
|
train
|
def send_command_ack(self, device_id, action):
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
log.debug('packet acknowledged')
except concurrent.futures._base.TimeoutError:
acknowledgement = {'ok': False, 'message': 'timeout'}
log.warning('acknowledge timeout')
else:
acknowledgement = self._last_ack.get('ok', False)
finally:
# allow next command
self._ready_to_send.release()
return acknowledgement
|
python
|
{
"resource": ""
}
|
q6858
|
EventHandling._handle_packet
|
train
|
def _handle_packet(self, packet):
"""Event specific packet handling logic.
Break packet into events and fires configured event callback or
nicely prints events for console.
"""
events = packet_events(packet)
for event in events:
if self.ignore_event(event['id']):
log.debug('ignoring event with id: %s', event)
continue
log.debug('got event: %s', event)
if self.event_callback:
self.event_callback(event)
else:
self.handle_event(event)
|
python
|
{
"resource": ""
}
|
q6859
|
EventHandling.ignore_event
|
train
|
def ignore_event(self, event_id):
"""Verify event id against list of events to ignore.
>>> e = EventHandling(ignore=[
... 'test1_00',
... 'test2_*',
... ])
>>> e.ignore_event('test1_00')
True
>>> e.ignore_event('test2_00')
True
>>> e.ignore_event('test3_00')
False
"""
for ignore in self.ignore:
if (ignore == event_id or
(ignore.endswith('*') and event_id.startswith(ignore[:-1]))):
return True
return False
|
python
|
{
"resource": ""
}
|
q6860
|
_initial_population_gsa
|
train
|
def _initial_population_gsa(population_size, solution_size, lower_bounds,
upper_bounds):
"""Create a random initial population of floating point values.
Args:
population_size: an integer representing the number of solutions in the population.
problem_size: the number of values in each solution.
lower_bounds: a list, each value is a lower bound for the corresponding
part of the solution.
upper_bounds: a list, each value is a upper bound for the corresponding
part of the solution.
Returns:
list; A list of random solutions.
"""
if len(lower_bounds) != solution_size or len(upper_bounds) != solution_size:
raise ValueError(
"Lower and upper bounds much have a length equal to the problem size."
)
return common.make_population(population_size, common.random_real_solution,
solution_size, lower_bounds, upper_bounds)
|
python
|
{
"resource": ""
}
|
q6861
|
_new_population_gsa
|
train
|
def _new_population_gsa(population, fitnesses, velocities, lower_bounds,
upper_bounds, grav_initial, grav_reduction_rate,
iteration, max_iterations):
"""Generate a new population as given by GSA algorithm.
In GSA paper, grav_initial is G_i
"""
# Update the gravitational constant, and the best and worst of the population
# Calculate the mass and acceleration for each solution
# Update the velocity and position of each solution
population_size = len(population)
solution_size = len(population[0])
# In GSA paper, grav is G
grav = _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations)
masses = _get_masses(fitnesses)
# Create bundled solution with position and mass for the K best calculation
# Also store index to later check if two solutions are the same
# Sorted by solution fitness (mass)
solutions = [{
'pos': pos,
'mass': mass,
'index': i
} for i, (pos, mass) in enumerate(zip(population, masses))]
solutions.sort(key=lambda x: x['mass'], reverse=True)
# Get the force on each solution
# Only the best K solutions apply force
# K linearly decreases to 1
num_best = int(population_size - (population_size - 1) *
(iteration / float(max_iterations)))
forces = []
for i in range(population_size):
force_vectors = []
for j in range(num_best):
# If it is not the same solution
if i != solutions[j]['index']:
force_vectors.append(
_gsa_force(grav, masses[i], solutions[j]['mass'],
population[i], solutions[j]['pos']))
forces.append(_gsa_total_force(force_vectors, solution_size))
# Get the acceleration of each solution
accelerations = []
for i in range(population_size):
accelerations.append(_gsa_acceleration(forces[i], masses[i]))
# Update the velocity of each solution
new_velocities = []
for i in range(population_size):
new_velocities.append(
_gsa_update_velocity(velocities[i], accelerations[i]))
# Create the new population
new_population = []
for i in range(population_size):
new_position = _gsa_update_position(population[i], new_velocities[i])
# Constrain to bounds
new_position = list(
numpy.clip(new_position, lower_bounds, upper_bounds))
new_population.append(new_position)
return new_population, new_velocities
|
python
|
{
"resource": ""
}
|
q6862
|
_next_grav_gsa
|
train
|
def _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations):
"""Calculate G as given by GSA algorithm.
In GSA paper, grav is G
"""
return grav_initial * math.exp(
-grav_reduction_rate * iteration / float(max_iterations))
|
python
|
{
"resource": ""
}
|
q6863
|
_get_masses
|
train
|
def _get_masses(fitnesses):
"""Convert fitnesses into masses, as given by GSA algorithm."""
# Obtain constants
best_fitness = max(fitnesses)
worst_fitness = min(fitnesses)
fitness_range = best_fitness - worst_fitness
# Calculate raw masses for each solution
raw_masses = []
for fitness in fitnesses:
# Epsilon is added to prevent divide by zero errors
raw_masses.append((fitness - worst_fitness) / (fitness_range + EPSILON)
+ EPSILON)
# Normalize to obtain final mass for each solution
total_mass = sum(raw_masses)
masses = []
for mass in raw_masses:
masses.append(mass / total_mass)
return masses
|
python
|
{
"resource": ""
}
|
q6864
|
_gsa_force
|
train
|
def _gsa_force(grav, mass_i, mass_j, position_i, position_j):
"""Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: The position of solution i. (x_i)
position_j: The position of solution j. (x_j)
returns:
numpy.array; The force vector of solution j on solution i.
"""
position_diff = numpy.subtract(position_j, position_i)
distance = numpy.linalg.norm(position_diff)
# The first 3 terms give the magnitude of the force
# The last term is a vector that provides the direction
# Epsilon prevents divide by zero errors
return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
|
python
|
{
"resource": ""
}
|
q6865
|
_gsa_total_force
|
train
|
def _gsa_total_force(force_vectors, vector_length):
"""Return a randomly weighted sum of the force vectors.
args:
force_vectors: A list of force vectors on solution i.
returns:
numpy.array; The total force on solution i.
"""
if len(force_vectors) == 0:
return [0.0] * vector_length
# The GSA algorithm specifies that the total force in each dimension
# is a random sum of the individual forces in that dimension.
# For this reason we sum the dimensions individually instead of simply
# using vec_a+vec_b
total_force = [0.0] * vector_length
for force_vec in force_vectors:
for i in range(vector_length):
total_force[i] += random.uniform(0.0, 1.0) * force_vec[i]
return total_force
|
python
|
{
"resource": ""
}
|
q6866
|
_gsa_update_velocity
|
train
|
def _gsa_update_velocity(velocity, acceleration):
"""Stochastically update velocity given acceleration.
In GSA paper, velocity is v_i, acceleration is a_i
"""
# The GSA algorithm specifies that the new velocity for each dimension
# is a sum of a random fraction of its current velocity in that dimension,
# and its acceleration in that dimension
# For this reason we sum the dimensions individually instead of simply
# using vec_a+vec_b
new_velocity = []
for vel, acc in zip(velocity, acceleration):
new_velocity.append(random.uniform(0.0, 1.0) * vel + acc)
return new_velocity
|
python
|
{
"resource": ""
}
|
q6867
|
_new_population_genalg
|
train
|
def _new_population_genalg(population,
fitnesses,
mutation_chance=0.02,
crossover_chance=0.7,
selection_function=gaoperators.tournament_selection,
crossover_function=gaoperators.one_point_crossover):
"""Perform all genetic algorithm operations on a population, and return a new population.
population must have an even number of chromosomes.
Args:
population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]]
fitness: A list of fitnesses that correspond with chromosomes in the population,
ex. [1.2, 10.8]
mutation_chance: the chance that a bit will be flipped during mutation
crossover_chance: the chance that two parents will be crossed during crossover
selection_function: A function that will select parents for crossover and mutation
crossover_function: A function that will cross two parents
Returns:
list; A new population of chromosomes, that should be more fit.
"""
# Selection
# Create the population of parents that will be crossed and mutated.
intermediate_population = selection_function(population, fitnesses)
# Crossover
new_population = _crossover(intermediate_population, crossover_chance,
crossover_function)
# Mutation
# Mutates chromosomes in place
gaoperators.random_flip_mutate(new_population, mutation_chance)
# Return new population
return new_population
|
python
|
{
"resource": ""
}
|
q6868
|
_crossover
|
train
|
def _crossover(population, crossover_chance, crossover_operator):
"""Perform crossover on a population, return the new crossed-over population."""
new_population = []
for i in range(0, len(population), 2): # For every other index
# Take parents from every set of 2 in the population
# Wrap index if out of range
try:
parents = (population[i], population[i + 1])
except IndexError:
parents = (population[i], population[0])
# If crossover takes place
if random.uniform(0.0, 1.0) <= crossover_chance:
# Add children to the new population
new_population.extend(crossover_operator(parents))
else:
new_population.extend(parents)
return new_population
|
python
|
{
"resource": ""
}
|
q6869
|
random_real_solution
|
train
|
def random_real_solution(solution_size, lower_bounds, upper_bounds):
"""Make a list of random real numbers between lower and upper bounds."""
return [
random.uniform(lower_bounds[i], upper_bounds[i])
for i in range(solution_size)
]
|
python
|
{
"resource": ""
}
|
q6870
|
make_population
|
train
|
def make_population(population_size, solution_generator, *args, **kwargs):
"""Make a population with the supplied generator."""
return [
solution_generator(*args, **kwargs) for _ in range(population_size)
]
|
python
|
{
"resource": ""
}
|
q6871
|
tournament_selection
|
train
|
def tournament_selection(population,
fitnesses,
num_competitors=2,
diversity_weight=0.0):
"""Create a list of parents with tournament selection.
Args:
population: A list of solutions.
fitnesses: A list of fitness values corresponding to solutions in population.
num_competitors: Number of solutions to compare every round.
Best solution among competitors is selected.
diversity_weight: Weight of diversity metric.
Determines how frequently diversity is used to select tournament winners.
Note that fitness is given a weight of 1.0.
diversity_weight == 1.0 gives equal weight to diversity and fitness.
"""
# Optimization if diversity factor is disabled
if diversity_weight <= 0.0:
fitness_pop = zip(fitnesses,
population) # Zip for easy fitness comparison
# Get num_competitors random chromosomes, then add best to result,
# by taking max fitness and getting chromosome from tuple.
# Repeat until full.
return [
max(random.sample(fitness_pop, num_competitors))[1]
for _ in range(len(population))
]
else:
indices = range(len(population))
# Select tournament winners by either max fitness or diversity.
# The metric to check is randomly selected, weighted by diversity_weight.
# diversity_metric is calculated between the given solution,
# and the list of all currently selected solutions.
selected_solutions = []
# Select as many solutions are there are in population
for _ in range(len(population)):
competitor_indices = random.sample(indices, num_competitors)
# Select by either fitness or diversity,
# Selected by weighted random selection
# NOTE: We assume fitness has a weight of 1.0
if random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight)):
# Fitness
selected_solutions.append(
max(
zip([fitnesses[i] for i in competitor_indices],
[population[i] for i in competitor_indices]))[-1])
else:
# Diversity
# Break ties by fitness
selected_solutions.append(
max(
zip([
_diversity_metric(population[i], selected_solutions
) for i in competitor_indices
], [fitnesses[i] for i in competitor_indices],
[population[i] for i in competitor_indices]))[-1])
return selected_solutions
|
python
|
{
"resource": ""
}
|
q6872
|
stochastic_selection
|
train
|
def stochastic_selection(population, fitnesses):
"""Create a list of parents with stochastic universal sampling."""
pop_size = len(population)
probabilities = _fitnesses_to_probabilities(fitnesses)
# Create selection list (for stochastic universal sampling)
selection_list = []
selection_spacing = 1.0 / pop_size
selection_start = random.uniform(0.0, selection_spacing)
for i in range(pop_size):
selection_list.append(selection_start + selection_spacing * i)
# Select intermediate population according to selection list
intermediate_population = []
for selection in selection_list:
for (i, probability) in enumerate(probabilities):
if probability >= selection:
intermediate_population.append(population[i])
break
random.shuffle(intermediate_population)
return intermediate_population
|
python
|
{
"resource": ""
}
|
q6873
|
roulette_selection
|
train
|
def roulette_selection(population, fitnesses):
"""Create a list of parents with roulette selection."""
probabilities = _fitnesses_to_probabilities(fitnesses)
intermediate_population = []
for _ in range(len(population)):
# Choose a random individual
selection = random.uniform(0.0, 1.0)
# Iterate over probabilities list
for i, probability in enumerate(probabilities):
if probability >= selection: # First probability that is greater
intermediate_population.append(population[i])
break
return intermediate_population
|
python
|
{
"resource": ""
}
|
q6874
|
_diversity_metric
|
train
|
def _diversity_metric(solution, population):
"""Return diversity value for solution compared to given population.
Metric is sum of distance between solution and each solution in population,
normalized to [0.0, 1.0].
"""
# Edge case for empty population
# If there are no other solutions, the given solution has maximum diversity
if population == []:
return 1.0
return (
sum([_manhattan_distance(solution, other) for other in population])
# Normalize (assuming each value in solution is in range [0.0, 1.0])
# NOTE: len(solution) is maximum manhattan distance
/ (len(population) * len(solution)))
|
python
|
{
"resource": ""
}
|
q6875
|
_manhattan_distance
|
train
|
def _manhattan_distance(vec_a, vec_b):
"""Return manhattan distance between two lists of numbers."""
if len(vec_a) != len(vec_b):
raise ValueError('len(vec_a) must equal len(vec_b)')
return sum(map(lambda a, b: abs(a - b), vec_a, vec_b))
|
python
|
{
"resource": ""
}
|
q6876
|
_fitnesses_to_probabilities
|
train
|
def _fitnesses_to_probabilities(fitnesses):
"""Return a list of probabilities proportional to fitnesses."""
# Do not allow negative fitness values
min_fitness = min(fitnesses)
if min_fitness < 0.0:
# Make smallest fitness value 0
fitnesses = map(lambda f: f - min_fitness, fitnesses)
fitness_sum = sum(fitnesses)
# Generate probabilities
# Creates a list of increasing values.
# The greater the gap between two values, the greater the probability.
# Ex. [0.1, 0.23, 0.56, 1.0]
prob_sum = 0.0
probabilities = []
for fitness in fitnesses:
if fitness < 0:
raise ValueError(
"Fitness cannot be negative, fitness = {}.".format(fitness))
prob_sum += (fitness / fitness_sum)
probabilities.append(prob_sum)
probabilities[-1] += 0.0001 # to compensate for rounding errors
return probabilities
|
python
|
{
"resource": ""
}
|
q6877
|
one_point_crossover
|
train
|
def one_point_crossover(parents):
"""Perform one point crossover on two parent chromosomes.
Select a random position in the chromosome.
Take genes to the left from one parent and the rest from the other parent.
Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy
"""
# The point that the chromosomes will be crossed at (see Ex. above)
crossover_point = random.randint(1, len(parents[0]) - 1)
return (_one_parent_crossover(parents[0], parents[1], crossover_point),
_one_parent_crossover(parents[1], parents[0], crossover_point))
|
python
|
{
"resource": ""
}
|
q6878
|
uniform_crossover
|
train
|
def uniform_crossover(parents):
"""Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
"""
chromosome_length = len(parents[0])
children = [[], []]
for i in range(chromosome_length):
selected_parent = random.randint(0, 1)
# Take from the selected parent, and add it to child 1
# Take from the other parent, and add it to child 2
children[0].append(parents[selected_parent][i])
children[1].append(parents[1 - selected_parent][i])
return children
|
python
|
{
"resource": ""
}
|
q6879
|
random_flip_mutate
|
train
|
def random_flip_mutate(population, mutation_chance):
"""Mutate every chromosome in a population, list is modified in place.
Mutation occurs by randomly flipping bits (genes).
"""
for chromosome in population: # For every chromosome in the population
for i in range(len(chromosome)): # For every bit in the chromosome
# If mutation takes place
if random.uniform(0.0, 1.0) <= mutation_chance:
chromosome[i] = 1 - chromosome[i]
|
python
|
{
"resource": ""
}
|
q6880
|
_duplicates
|
train
|
def _duplicates(list_):
"""Return dict mapping item -> indices."""
item_indices = {}
for i, item in enumerate(list_):
try:
item_indices[item].append(i)
except KeyError: # First time seen
item_indices[item] = [i]
return item_indices
|
python
|
{
"resource": ""
}
|
q6881
|
_parse_parameter_locks
|
train
|
def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks):
"""Synchronize meta_parameters and locked_values.
The union of these two sets will have all necessary parameters.
locked_values will have the parameters specified in parameter_locks.
"""
# WARNING: meta_parameters is modified inline
locked_values = {}
if parameter_locks:
for name in parameter_locks:
# Store the current optimzier value
# and remove from our dictionary of paramaters to optimize
locked_values[name] = getattr(optimizer, name)
meta_parameters.pop(name)
return locked_values
|
python
|
{
"resource": ""
}
|
q6882
|
_get_hyperparameter_solution_size
|
train
|
def _get_hyperparameter_solution_size(meta_parameters):
"""Determine size of binary encoding of parameters.
Also adds binary size information for each parameter.
"""
# WARNING: meta_parameters is modified inline
solution_size = 0
for _, parameters in meta_parameters.iteritems():
if parameters['type'] == 'discrete':
# Binary encoding of discrete values -> log_2 N
num_values = len(parameters['values'])
binary_size = helpers.binary_size(num_values)
elif parameters['type'] == 'int':
# Use enough bits to cover range from min to max
# + 1 to include max in range
int_range = parameters['max'] - parameters['min'] + 1
binary_size = helpers.binary_size(int_range)
elif parameters['type'] == 'float':
# Use enough bits to provide fine steps between min and max
float_range = parameters['max'] - parameters['min']
# * 1000 provides 1000 values between each natural number
binary_size = helpers.binary_size(float_range * 1000)
else:
raise ValueError('Parameter type "{}" does not match known values'.
format(parameters['type']))
# Store binary size with parameters for use in decode function
parameters['binary_size'] = binary_size
solution_size += binary_size
return solution_size
|
python
|
{
"resource": ""
}
|
q6883
|
_make_hyperparameter_decode_func
|
train
|
def _make_hyperparameter_decode_func(locked_values, meta_parameters):
"""Create a function that converts the binary solution to parameters."""
# Locked parameters are also returned by decode function, but are not
# based on solution
def decode(solution):
"""Convert solution into dict of hyperparameters."""
# Start with out stationary (locked) paramaters
hyperparameters = copy.deepcopy(locked_values)
# Obtain moving hyperparameters from binary solution
index = 0
for name, parameters in meta_parameters.iteritems():
# Obtain binary for this hyperparameter
binary_size = parameters['binary_size']
binary = solution[index:index + binary_size]
index += binary_size # Just index to start of next hyperparameter
# Decode binary
if parameters['type'] == 'discrete':
i = helpers.binary_to_int(
binary, upper_bound=len(parameters['values']) - 1)
value = parameters['values'][i]
elif parameters['type'] == 'int':
value = helpers.binary_to_int(
binary,
lower_bound=parameters['min'],
upper_bound=parameters['max'])
elif parameters['type'] == 'float':
value = helpers.binary_to_float(
binary,
lower_bound=parameters['min'],
upper_bound=parameters['max'])
else:
raise ValueError(
'Parameter type "{}" does not match known values'.format(
parameters['type']))
# Store value
hyperparameters[name] = value
return hyperparameters
return decode
|
python
|
{
"resource": ""
}
|
q6884
|
_meta_fitness_func
|
train
|
def _meta_fitness_func(parameters,
_optimizer,
_problems,
_master_fitness_dict,
_runs=20):
"""Test a metaheuristic with parameters encoded in solution.
Our goal is to minimize number of evaluation runs until a solution is found,
while maximizing chance of finding solution to the underlying problem
NOTE: while meta optimization requires a 'known' solution, this solution
can be an estimate to provide the meta optimizer with a sense of progress.
"""
# Create the optimizer with parameters encoded in solution
optimizer = copy.deepcopy(_optimizer)
optimizer._set_hyperparameters(parameters)
optimizer.logging = False
# Preload fitness dictionary from master, and disable clearing dict
# NOTE: master_fitness_dict will be modified inline, and therefore,
# we do not need to take additional steps to update it
if _master_fitness_dict != None: # None means low memory mode
optimizer.clear_cache = False
optimizer._Optimizer__encoded_cache = _master_fitness_dict
# Because metaheuristics are stochastic, we run the optimizer multiple times,
# to obtain an average of performance
all_evaluation_runs = []
solutions_found = []
for _ in range(_runs):
for problem in _problems:
# Get performance for problem
optimizer.optimize(problem)
all_evaluation_runs.append(optimizer.fitness_runs)
if optimizer.solution_found:
solutions_found.append(1.0)
else:
solutions_found.append(0.0)
# Our main goal is to minimize time the optimizer takes
fitness = 1.0 / helpers.avg(all_evaluation_runs)
# Optimizer is heavily penalized for missing solutions
# To avoid 0 fitness
fitness = fitness * helpers.avg(solutions_found)**2 + 1e-19
return fitness
|
python
|
{
"resource": ""
}
|
q6885
|
Problem.copy
|
train
|
def copy(self,
fitness_function=None,
decode_function=None,
fitness_args=None,
decode_args=None,
fitness_kwargs=None,
decode_kwargs=None):
"""Return a copy of this problem.
Optionally replace this problems arguments with those passed in.
"""
if fitness_function is None:
fitness_function = self._fitness_function
if decode_function is None:
decode_function = self._decode_function
if fitness_args is None:
fitness_args = self._fitness_args
if decode_args is None:
decode_args = self._decode_args
if fitness_kwargs is None:
fitness_kwargs = self._fitness_kwargs
if decode_kwargs is None:
decode_kwargs = self._decode_kwargs
return Problem(
fitness_function,
decode_function=decode_function,
fitness_args=fitness_args,
decode_args=decode_args,
fitness_kwargs=fitness_kwargs,
decode_kwargs=decode_kwargs)
|
python
|
{
"resource": ""
}
|
q6886
|
Problem.get_fitness
|
train
|
def get_fitness(self, solution):
"""Return fitness for the given solution."""
return self._fitness_function(solution, *self._fitness_args,
**self._fitness_kwargs)
|
python
|
{
"resource": ""
}
|
q6887
|
Problem.decode_solution
|
train
|
def decode_solution(self, encoded_solution):
"""Return solution from an encoded representation."""
return self._decode_function(encoded_solution, *self._decode_args,
**self._decode_kwargs)
|
python
|
{
"resource": ""
}
|
q6888
|
Optimizer.optimize
|
train
|
def optimize(self, problem, max_iterations=100, max_seconds=float('inf'),
cache_encoded=True, cache_solution=False, clear_cache=True,
logging_func=_print_fitnesses,
n_processes=0):
"""Find the optimal inputs for a given fitness function.
Args:
problem: An instance of Problem. The problem to solve.
max_iterations: The number of iterations to optimize before stopping.
max_seconds: Maximum number of seconds to optimize for, before stopping.
Note that condition is only checked one per iteration,
meaning optimization can take more than max_seconds,
especially if fitnesses take a long time to calculate.
cache_encoded: bool; Whether or not to cache fitness of encoded strings.
Encoded strings are produced directly by the optimizer.
If an encoded string is found in cache, it will not be decoded.
cache_solution: bool; Whether or not to cache fitness of decoded solutions.
Decoded solution is provided by problems decode function.
If problem does not provide a hash solution function,
Various naive hashing methods will be attempted, including:
tuple, tuple(sorted(dict.items)), str.
clear_cache: bool; Whether or not to reset cache after optimization.
Disable if you want to run optimize multiple times on the same problem.
logging_func: func/None; Function taking:
iteration, population, solutions, fitnesses, best_solution, best_fitness
Called after every iteration.
Use for custom logging, or set to None to disable logging.
Note that best_solution and best_fitness are the best of all iterations so far.
n_processes: int; Number of processes to use for multiprocessing.
If <= 0, do not use multiprocessing.
Returns:
object; The best solution, after decoding.
"""
if not isinstance(problem, Problem):
raise TypeError('problem must be an instance of Problem class')
# Prepare pool for multiprocessing
if n_processes > 0:
try:
pool = multiprocessing.Pool(processes=n_processes)
except NameError:
raise ImportError(
'pickle, dill, or multiprocessing library is not available.'
)
else:
pool = None
# Set first, incase optimizer uses _max_iterations in initialization
self.__max_iterations = max_iterations
# Initialize algorithm
self._reset()
best_solution = {'solution': None, 'fitness': None}
population = self.initial_population()
try:
# Begin optimization loop
start = time.clock()
for self.iteration in itertools.count(1): # Infinite sequence of iterations
# Evaluate potential solutions
solutions, fitnesses, finished = self._get_fitnesses(
problem,
population,
cache_encoded=cache_encoded,
cache_solution=cache_solution,
pool=pool)
# If the best fitness from this iteration is better than
# the global best
best_index, best_fitness = max(
enumerate(fitnesses), key=operator.itemgetter(1))
if best_fitness > best_solution['fitness']:
# Store the new best solution
best_solution['fitness'] = best_fitness
best_solution['solution'] = solutions[best_index]
if logging_func:
logging_func(self.iteration, population, solutions,
fitnesses, best_solution['solution'],
best_solution['fitness'])
# Break if solution found
if finished:
self.solution_found = True
break
# Break if out of time
if time.clock() - start >= max_seconds:
break
# Break if out of iterations
if self.iteration >= max_iterations:
break
# Continue optimizing
population = self.next_population(population, fitnesses)
# Store best internally, before returning
self.best_solution = best_solution['solution']
self.best_fitness = best_solution['fitness']
finally:
# Clear caches
if clear_cache:
# Clear caches from memory
self.__encoded_cache = {}
self.__solution_cache = {}
# Reset encoded, and decoded key functions
self._get_encoded_key = self._get_encoded_key_type
self._get_solution_key = self._get_solution_key_type
# Clean up multiprocesses
try:
pool.terminate() # Kill outstanding work
pool.close() # Close child processes
except AttributeError:
# No pool
assert pool is None
return self.best_solution
|
python
|
{
"resource": ""
}
|
q6889
|
Optimizer._reset_bookkeeping
|
train
|
def _reset_bookkeeping(self):
"""Reset bookkeeping parameters to initial values.
Call before beginning optimization.
"""
self.iteration = 0
self.fitness_runs = 0
self.best_solution = None
self.best_fitness = None
self.solution_found = False
|
python
|
{
"resource": ""
}
|
q6890
|
Optimizer._get_fitnesses
|
train
|
def _get_fitnesses(self,
problem,
population,
cache_encoded=True,
cache_solution=False,
pool=None):
"""Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation.
"""
fitnesses = [None] * len(population)
#############################
# Decoding
#############################
if cache_encoded:
try:
encoded_keys = map(self._get_encoded_key, population)
# Get all fitnesses from encoded_solution cache
to_decode_indices = []
for i, encoded_key in enumerate(encoded_keys):
try:
fitnesses[i] = self.__encoded_cache[encoded_key]
# Note that this fitness will never be better than the current best
# because we have already evaluted it,
# Therefore, we do not need to worry about decoding the solution
except KeyError: # Cache miss
to_decode_indices.append(i)
except UnhashableError: # Cannot hash encoded solution
encoded_keys = None
to_decode_indices = range(len(population))
else:
encoded_keys = None
to_decode_indices = range(len(population))
# Decode all that need to be decoded, and combine back into list the same length
# as population
if encoded_keys is None:
to_decode_keys = None
else:
to_decode_keys = [encoded_keys[i] for i in to_decode_indices]
solutions = [None] * len(population)
for i, solution in zip(to_decode_indices,
self._pmap(
problem.decode_solution,
[population[i] for i in to_decode_indices],
to_decode_keys,
pool)):
solutions[i] = solution
#############################
# Evaluating
#############################
if cache_solution:
try:
# Try to make solutions hashable
# Use user provided hash function if available
if problem.hash_solution:
hash_solution_func = problem.hash_solution
else:
# Otherwise, default to built in "smart" hash function
hash_solution_func = self._get_solution_key
solution_keys = [
hash_solution_func(solution)
# None corresponds to encoded_solutions found in cache
if solution is not None else None for solution in solutions
]
# Get all fitnesses from solution cache
to_eval_indices = []
for i, solution_key in enumerate(solution_keys):
if solution_key is not None: # Otherwise, fitness already found in encoded cache
try:
fitnesses[i] = self.__solution_cache[solution_key]
except KeyError: # Cache miss
to_eval_indices.append(i)
except UnhashableError: # Cannot hash solution
solution_keys = None
to_eval_indices = to_decode_indices[:]
else:
solution_keys = None
to_eval_indices = to_decode_indices[:]
# Evaluate all that need to be evaluated, and combine back into fitnesses list
if solution_keys is None:
if encoded_keys is None:
# No way to detect duplicates
to_eval_keys = None
else:
# Cannot use decoded keys, default to encoded keys
to_eval_keys = [encoded_keys[i] for i in to_eval_indices]
else:
to_eval_keys = [solution_keys[i] for i in to_eval_indices]
finished = False
eval_bookkeeping = {}
for i, fitness_finished in zip(to_eval_indices,
self._pmap(
problem.get_fitness,
[solutions[i] for i in to_eval_indices],
to_eval_keys,
pool,
bookkeeping_dict=eval_bookkeeping)):
# Unpack fitness_finished tuple
try:
fitness, maybe_finished = fitness_finished
if maybe_finished:
finished = True
except TypeError: # Not (fitness, finished) tuple
fitness = fitness_finished
fitnesses[i] = fitness
#############################
# Finishing
#############################
# Bookkeeping
# keep track of how many times fitness is evaluated
self.fitness_runs += len(eval_bookkeeping['key_indices']) # Evaled once for each unique key
# Add evaluated fitnesses to caches (both of them)
if cache_encoded and encoded_keys is not None:
for i in to_decode_indices: # Encoded cache misses
self.__encoded_cache[encoded_keys[i]] = fitnesses[i]
if cache_solution and solution_keys is not None:
for i in to_eval_indices: # Decoded cache misses
self.__solution_cache[solution_keys[i]] = fitnesses[i]
# Return
# assert None not in fitnesses # Un-comment for debugging
return solutions, fitnesses, finished
|
python
|
{
"resource": ""
}
|
q6891
|
Optimizer._pmap
|
train
|
def _pmap(self, func, items, keys, pool, bookkeeping_dict=None):
"""Efficiently map func over all items.
Calls func only once for duplicate items.
Item duplicates are detected by corresponding keys.
Unless keys is None.
Serial if pool is None, but still skips duplicates.
"""
if keys is not None: # Otherwise, cannot hash items
# Remove duplicates first (use keys)
# Create mapping (dict) of key to list of indices
key_indices = _duplicates(keys).values()
else: # Cannot hash items
# Assume no duplicates
key_indices = [[i] for i in range(len(items))]
# Use only the first of duplicate indices in decoding
if pool is not None:
# Parallel map
results = pool.map(
functools.partial(_unpickle_run, pickle.dumps(func)),
[items[i[0]] for i in key_indices])
else:
results = map(func, [items[i[0]] for i in key_indices])
# Add bookkeeping
if bookkeeping_dict is not None:
bookkeeping_dict['key_indices'] = key_indices
# Combine duplicates back into list
all_results = [None] * len(items)
for indices, result in zip(key_indices, results):
for j, i in enumerate(indices):
# Avoid duplicate result objects in list,
# in case they are used in functions with side effects
if j > 0:
result = copy.deepcopy(result)
all_results[i] = result
return all_results
|
python
|
{
"resource": ""
}
|
q6892
|
Optimizer._set_hyperparameters
|
train
|
def _set_hyperparameters(self, parameters):
"""Set internal optimization parameters."""
for name, value in parameters.iteritems():
try:
getattr(self, name)
except AttributeError:
raise ValueError(
'Each parameter in parameters must be an attribute. '
'{} is not.'.format(name))
setattr(self, name, value)
|
python
|
{
"resource": ""
}
|
q6893
|
Optimizer._get_hyperparameters
|
train
|
def _get_hyperparameters(self):
"""Get internal optimization parameters."""
hyperparameters = {}
for key in self._hyperparameters:
hyperparameters[key] = getattr(self, key)
return hyperparameters
|
python
|
{
"resource": ""
}
|
q6894
|
Optimizer.optimize_hyperparameters
|
train
|
def optimize_hyperparameters(self,
problems,
parameter_locks=None,
smoothing=20,
max_iterations=100,
_meta_optimizer=None,
_low_memory=True):
"""Optimize hyperparameters for a given problem.
Args:
parameter_locks: a list of strings, each corresponding to a hyperparamter
that should not be optimized.
problems: Either a single problem, or a list of problem instances,
allowing optimization based on multiple similar problems.
smoothing: int; number of runs to average over for each set of hyperparameters.
max_iterations: The number of iterations to optimize before stopping.
_low_memory: disable performance enhancements to save memory
(they use a lot of memory otherwise).
"""
if smoothing <= 0:
raise ValueError('smoothing must be > 0')
# problems supports either one or many problem instances
if isinstance(problems, collections.Iterable):
for problem in problems:
if not isinstance(problem, Problem):
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
elif isinstance(problems, Problem):
problems = [problems]
else:
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
# Copy to avoid permanent modification
meta_parameters = copy.deepcopy(self._hyperparameters)
# First, handle parameter locks, since it will modify our
# meta_parameters dict
locked_values = _parse_parameter_locks(self, meta_parameters,
parameter_locks)
# We need to know the size of our chromosome,
# based on the hyperparameters to optimize
solution_size = _get_hyperparameter_solution_size(meta_parameters)
# We also need to create a decode function to transform the binary solution
# into parameters for the metaheuristic
decode = _make_hyperparameter_decode_func(locked_values,
meta_parameters)
# A master fitness dictionary can be stored for use between calls
# to meta_fitness
if _low_memory:
master_fitness_dict = None
else:
master_fitness_dict = {}
additional_parameters = {
'_optimizer': self,
'_problems': problems,
'_runs': smoothing,
'_master_fitness_dict': master_fitness_dict,
}
META_FITNESS = Problem(
_meta_fitness_func,
decode_function=decode,
fitness_kwargs=additional_parameters)
if _meta_optimizer is None:
# Initialize default meta optimizer
# GenAlg is used because it supports both discrete and continous
# attributes
from optimal import GenAlg
# Create metaheuristic with computed decode function and soltuion
# size
_meta_optimizer = GenAlg(solution_size)
else:
# Adjust supplied metaheuristic for this problem
_meta_optimizer._solution_size = solution_size
# Determine the best hyperparameters with a metaheuristic
best_parameters = _meta_optimizer.optimize(
META_FITNESS, max_iterations=max_iterations)
# Set the hyperparameters inline
self._set_hyperparameters(best_parameters)
# And return
return best_parameters
|
python
|
{
"resource": ""
}
|
q6895
|
compare
|
train
|
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats
|
python
|
{
"resource": ""
}
|
q6896
|
benchmark
|
train
|
def benchmark(optimizer, problem, runs=20, **kwargs):
"""Run an optimizer through a problem multiple times.
Args:
optimizer: Optimizer; The optimizer to benchmark.
problem: Problem; The problem to benchmark on.
runs: int > 0; Number of times that optimize is called on problem.
Returns:
dict; A dictionary of various statistics.
"""
stats = {'runs': []}
# Disable logging, to avoid spamming the user
# TODO: Maybe we shouldn't disable by default?
kwargs = copy.copy(kwargs)
kwargs['logging_func'] = None
# Determine effectiveness of metaheuristic over many runs
# The stochastic nature of metaheuristics make this necessary
# for an accurate evaluation
for _ in range(runs):
optimizer.optimize(problem, **kwargs)
# Convert bool to number for mean and standard deviation calculations
if optimizer.solution_found:
finished_num = 1.0
else:
finished_num = 0.0
stats_ = {
'fitness': optimizer.best_fitness,
'fitness_runs': optimizer.fitness_runs,
'solution_found': finished_num
}
stats['runs'].append(stats_)
# Little progress 'bar'
print '.',
# Mean gives a good overall idea of the metaheuristics effectiveness
# Standard deviation (SD) shows consistency of performance
_add_mean_sd_to_stats(stats)
return stats
|
python
|
{
"resource": ""
}
|
q6897
|
aggregate
|
train
|
def aggregate(all_stats):
"""Combine stats for multiple optimizers to obtain one mean and sd.
Useful for combining stats for the same optimizer class and multiple problems.
Args:
all_stats: dict; output from compare.
"""
aggregate_stats = {'means': [], 'standard_deviations': []}
for optimizer_key in all_stats:
# runs is the mean, for add_mean_sd function
mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])
mean_stats['name'] = optimizer_key
aggregate_stats['means'].append(mean_stats)
# also keep track of standard deviations
sd_stats = copy.deepcopy(
all_stats[optimizer_key]['standard_deviation'])
sd_stats['name'] = optimizer_key
aggregate_stats['standard_deviations'].append(sd_stats)
_add_mean_sd_to_stats(aggregate_stats, 'means')
return aggregate_stats
|
python
|
{
"resource": ""
}
|
q6898
|
_mean_of_runs
|
train
|
def _mean_of_runs(stats, key='runs'):
"""Obtain the mean of stats.
Args:
stats: dict; A set of stats, structured as above.
key: str; Optional key to determine where list of runs is found in stats
"""
num_runs = len(stats[key])
first = stats[key][0]
mean = {}
for stat_key in first:
# Skip non numberic attributes
if isinstance(first[stat_key], numbers.Number):
mean[stat_key] = sum(run[stat_key]
for run in stats[key]) / float(num_runs)
return mean
|
python
|
{
"resource": ""
}
|
q6899
|
_sd_of_runs
|
train
|
def _sd_of_runs(stats, mean, key='runs'):
"""Obtain the standard deviation of stats.
Args:
stats: dict; A set of stats, structured as above.
mean: dict; Mean for each key in stats.
key: str; Optional key to determine where list of runs is found in stats
"""
num_runs = len(stats[key])
first = stats[key][0]
standard_deviation = {}
for stat_key in first:
# Skip non numberic attributes
if isinstance(first[stat_key], numbers.Number):
standard_deviation[stat_key] = math.sqrt(
sum((run[stat_key] - mean[stat_key])**2
for run in stats[key]) / float(num_runs))
return standard_deviation
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.