_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q41100 | get_match_info | train | def get_match_info(template, match, state):
"""
Given a template and a regex match within said template, return a
dictionary of information about the match to be used to help parse the
template.
"""
info = match.groupdict()
# Put special delimiter cases in terms of normal ones
if info['change']:
info.update({
'tag_type' : '=',
'tag_key' : info['delims'],
})
elif info['raw']:
info.update({
'tag_type' : '&',
'tag_key' : info['raw_key'],
})
# Rename the important match variables for convenience
tag_start = match.start()
tag_end = match.end()
tag_type = info['tag_type']
tag_key = info['tag_key']
lead_wsp = info['lead_wsp']
end_wsp = info['end_wsp']
begins_line = (tag_start == 0) or (template[tag_start-1] in state.eol_chars)
ends_line = (tag_end == len(template) or
template[tag_end] in state.eol_chars)
interpolating = (tag_type in ('', '&'))
standalone = (not interpolating) and begins_line and ends_line
if end_wsp:
tag_end -= len(end_wsp)
if standalone:
template_length = len(template)
# Standalone tags strip exactly one occurence of '\r', '\n', or '\r\n'
# from the end of the line.
if tag_end < len(template) and template[tag_end] == '\r':
tag_end += 1
if tag_end < len(template) and template[tag_end] == '\n':
tag_end += 1
elif lead_wsp:
tag_start += len(lead_wsp)
lead_wsp = ''
info.update({
'tag_start' : tag_start,
'tag_end' : tag_end,
'tag_type' : tag_type,
'tag_key' : tag_key,
'lead_wsp' : lead_wsp,
'end_wsp' : end_wsp,
'begins_line' : begins_line,
'ends_line' : ends_line,
'interpolating' : interpolating,
'standalone' : standalone,
})
return info | python | {
"resource": ""
} |
q41101 | get_tag_context | train | def get_tag_context(name, state):
"""
Given a tag name, return its associated value as defined in the current
context stack.
"""
new_contexts = 0
ctm = None
while True:
try:
ctx_key, name = name.split('.', 1)
ctm = state.context.get(ctx_key)
except ValueError:
break
if not ctm:
break
else:
state.context.push(ctm)
new_contexts += 1
ctm = state.context.get(name)
return new_contexts, ctm | python | {
"resource": ""
} |
q41102 | render | train | def render(template, context, partials={}, state=None):
""" Renders a given mustache template, with sane defaults. """
# Create a new state by default
state = state or State()
# Add context to the state dict
if isinstance(context, Context):
state.context = context
else:
state.context = Context(context)
# Add any partials to the state dict
if partials:
state.partials.push(partials)
# Render the rendered template
return __render(make_unicode(template), state) | python | {
"resource": ""
} |
q41103 | parallel | train | def parallel(func, inputs, n_jobs, expand_args=False):
"""
Convenience wrapper around joblib's parallelization.
"""
if expand_args:
return Parallel(n_jobs=n_jobs)(delayed(func)(*args) for args in inputs)
else:
return Parallel(n_jobs=n_jobs)(delayed(func)(arg) for arg in inputs) | python | {
"resource": ""
} |
q41104 | breeding_plugevent | train | def breeding_plugevent(request, breeding_id):
"""This view defines a form for adding new plug events from a breeding cage.
This form requires a breeding_id from a breeding set and restricts the PlugFemale and PlugMale to animals that are defined in that breeding cage."""
breeding = get_object_or_404(Breeding, pk=breeding_id)
if request.method == "POST":
form = BreedingPlugForm(request.POST, request.FILES)
if form.is_valid():
plug = form.save(commit=False)
plug.Breeding_id = breeding.id
plug.save()
form.save()
return HttpResponseRedirect(reverse("plugevents-list"))
else:
form = BreedingPlugForm()
form.fields["PlugFemale"].queryset = breeding.Females.all()
form.fields["PlugMale"].queryset = breeding.Male.all()
return render(request, 'breeding_plugevent_form.html', {'form':form, 'breeding':breeding}) | python | {
"resource": ""
} |
q41105 | PlugEventsListStrain.get_queryset | train | def get_queryset(self):
"""The queryset is over-ridden to show only plug events in which the strain matches the breeding strain."""
self.strain = get_object_or_404(Strain, Strain_slug__iexact=self.kwargs['slug'])
return PlugEvents.objects.filter(Breeding__Strain=self.strain) | python | {
"resource": ""
} |
q41106 | command | train | def command(state, args):
"""Fix cache issues caused by schema pre-v4."""
if len(args) > 1:
print(f'Usage: {args[0]}')
return
db = state.db
_refresh_incomplete_anime(db)
_fix_cached_completed(db) | python | {
"resource": ""
} |
q41107 | sync_local_to_remote | train | def sync_local_to_remote(force="no"):
"""
Sync your local postgres database with remote
Example:
fabrik prod sync_local_to_remote:force=yes
"""
_check_requirements()
if force != "yes":
message = "This will replace the remote database '%s' with your "\
"local '%s', are you sure [y/n]" % (env.psql_db, env.local_psql_db)
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks() # Bootstrap fabrik
# Create database dump
local_file = "sync_%s.sql.tar.gz" % int(time.time()*1000)
local_path = "/tmp/%s" % local_file
with context_managers.shell_env(PGPASSWORD=env.local_psql_password):
elocal("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % (
local_path, env.local_psql_user, env.local_psql_db
))
remote_path = "/tmp/%s" % local_file
# Upload sync file
put(remote_path, local_path)
# Import sync file by performing the following task (drop, create, import)
with context_managers.shell_env(PGPASSWORD=env.psql_password):
env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % (
env.psql_db,
env.psql_user,
remote_path)
)
# Cleanup
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path)
# Trigger hook
run_hook("postgres.after_sync_local_to_remote")
logger.info("Sync complete") | python | {
"resource": ""
} |
q41108 | sync_remote_to_local | train | def sync_remote_to_local(force="no"):
"""
Sync your remote postgres database with local
Example:
fabrik prod sync_remote_to_local
"""
_check_requirements()
if force != "yes":
message = "This will replace your local database '%s' with the "\
"remote '%s', are you sure [y/n]" % (env.local_psql_db, env.psql_db)
answer = prompt(message, "y")
if answer != "y":
logger.info("Sync stopped")
return
init_tasks() # Bootstrap fabrik
# Create database dump
remote_file = "postgresql/sync_%s.sql.tar.gz" % int(time.time()*1000)
remote_path = paths.get_backup_path(remote_file)
env.run("mkdir -p %s" % paths.get_backup_path("postgresql"))
with context_managers.shell_env(PGPASSWORD=env.psql_password):
env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % (
remote_path, env.psql_user, env.psql_db
))
local_path = "/tmp/%s" % remote_file
# Download sync file
get(remote_path, local_path)
# Import sync file by performing the following task (drop, create, import)
with context_managers.shell_env(PGPASSWORD=env.local_psql_password):
elocal("pg_restore --clean -h localhost -d %s -U %s '%s'" % (
env.local_psql_db,
env.local_psql_user,
local_path)
)
# Cleanup
env.run("rm %s" % remote_path)
elocal("rm %s" % local_path)
# Trigger hook
run_hook("postgres.after_sync_remote_to_local")
logger.info("Sync complete") | python | {
"resource": ""
} |
q41109 | TCPCheck.apply_check_config | train | def apply_check_config(self, config):
"""
Takes the `query` and `response` fields from a validated config
dictionary and sets the proper instance attributes.
"""
self.query = config.get("query")
self.expected_response = config.get("response") | python | {
"resource": ""
} |
q41110 | TCPCheck.perform | train | def perform(self):
"""
Performs a straightforward TCP request and response.
Sends the TCP `query` to the proper host and port, and loops over the
socket, gathering response chunks until a full line is acquired.
If the response line matches the expected value, the check passes. If
not, the check fails. The check will also fail if there's an error
during any step of the send/receive process.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
# if no query/response is defined, a successful connection is a pass
if not self.query:
sock.close()
return True
try:
sock.sendall(self.query)
except Exception:
logger.exception("Error sending TCP query message.")
sock.close()
return False
response, extra = sockutils.get_response(sock)
logger.debug("response: %s (extra: %s)", response, extra)
if response != self.expected_response:
logger.warn(
"Response does not match expected value: %s (expected %s)",
response, self.expected_response
)
sock.close()
return False
sock.close()
return True | python | {
"resource": ""
} |
q41111 | RLConfig._get_section_values | train | def _get_section_values(self, config, section):
""" extract src and dst values from a section
"""
src_host = self._get_hosts_from_names(config.get(section, 'src.host')) \
if config.has_option(section, 'src.host') else None
src_file = [self._get_abs_filepath(config.get(section, 'src.file'))] \
if config.has_option(section, 'src.file') else None
if src_host is None and src_file is None:
raise conferr('Section "%s" gets no sources' % section)
dst_host = self._get_hosts_from_names(config.get(section, 'dst.host')) \
if config.has_option(section, 'dst.host') else None
dst_file = [self._get_abs_filepath(config.get(section, 'dst.file'))] \
if config.has_option(section, 'dst.file') else None
if dst_host is None and dst_file is None:
raise conferr('Section "%s" gets no destinations' % section)
return (src_host, src_file, dst_host, dst_file) | python | {
"resource": ""
} |
q41112 | RLConfig._assemble_flowtable | train | def _assemble_flowtable(self, values):
""" generate a flowtable from a tuple of descriptors.
"""
values = map(lambda x: [] if x is None else x, values)
src = values[0] + values[1]
dst = values[2] + values[3]
thistable = dict()
for s in src:
thistable[s] = dst
return thistable | python | {
"resource": ""
} |
q41113 | RLConfig._detect_loop | train | def _detect_loop(self):
""" detect loops in flow table, raise error if being present
"""
for source, dests in self.flowtable.items():
if source in dests:
raise conferr('Loops detected: %s --> %s' % (source, source)) | python | {
"resource": ""
} |
q41114 | RLConfig._get_hosts_from_ports | train | def _get_hosts_from_ports(self, ports):
""" validate hostnames from a list of ports
"""
hosts = map(lambda x: 'localhost:%d' % int(x.strip()), ports.split(','))
return list(set(hosts)) | python | {
"resource": ""
} |
q41115 | RLConfig._get_hosts_from_names | train | def _get_hosts_from_names(self, names):
""" validate hostnames from a list of names
"""
result = set()
hosts = map(lambda x: x.strip(), names.split(','))
for h in hosts:
if valid_hostname(h.split(':')[0]):
result.add(h if ':' in h else '%s:%d' % (h, self.PORT))
else:
raise conferr('Invalid hostname: %s' % h.split(':')[0])
return list(result) | python | {
"resource": ""
} |
q41116 | RLConfig._get_abs_filepath | train | def _get_abs_filepath(self, ifile):
""" validate src or dst file path with self.config_file
"""
assert ifile is not None
ifile = ifile[7:] if ifile.startswith('file://') else ifile
if ifile[0] != '/':
basedir = os.path.abspath(os.path.dirname(self.config_file))
ifile = os.path.join(basedir, ifile)
return 'file://' + ifile | python | {
"resource": ""
} |
q41117 | RLConfig.flowtable | train | def flowtable(self):
""" get a flat flow table globally
"""
ftable = dict()
for table in self.flow_table:
for k, v in table.items():
if k not in ftable:
ftable[k] = set(v)
else:
[ftable[k].add(i) for i in v]
# convert set to list
for k in ftable:
ftable[k] = list(ftable[k])
return ftable | python | {
"resource": ""
} |
q41118 | _set_last_aid | train | def _set_last_aid(func):
"""Decorator for setting last_aid."""
@functools.wraps(func)
def new_func(self, *args, **kwargs):
# pylint: disable=missing-docstring
aid = func(self, *args, **kwargs)
self.last_aid = aid
return aid
return new_func | python | {
"resource": ""
} |
q41119 | AIDResultsManager.parse_aid | train | def parse_aid(self, text, default_key):
"""Parse argument text for aid.
May retrieve the aid from search result tables as necessary. aresults
determines which search results to use by default; True means aresults
is the default.
The last aid when no aid has been parsed yet is undefined.
The accepted formats, in order:
Last AID: .
Explicit AID: aid:12345
Explicit result number: key:12
Default result number: 12
"""
if default_key not in self:
raise ResultKeyError(default_key)
if text == '.':
return self.last_aid
elif text.startswith('aid:'):
return int(text[len('aid:'):])
if ':' in text:
match = self._key_pattern.search(text)
if not match:
raise InvalidSyntaxError(text)
key = match.group(1)
number = match.group(2)
else:
key = default_key
number = text
try:
number = int(number)
except ValueError:
raise InvalidSyntaxError(number)
try:
return self[key].get_aid(number)
except KeyError:
raise ResultKeyError(key)
except IndexError:
raise ResultNumberError(key, number) | python | {
"resource": ""
} |
q41120 | _connect | train | def _connect(dbfile: 'PathLike') -> apsw.Connection:
"""Connect to SQLite database file."""
conn = apsw.Connection(os.fspath(dbfile))
_set_foreign_keys(conn, 1)
assert _get_foreign_keys(conn) == 1
return conn | python | {
"resource": ""
} |
q41121 | AnimeCmd.cmdloop | train | def cmdloop(self):
"""Start CLI REPL."""
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print('No previous command.')
continue
if tokens[0] not in self.commands:
print('Invalid command')
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception('Error!') | python | {
"resource": ""
} |
q41122 | Volume.to_array | train | def to_array(self, channels=2):
"""Generate the array of multipliers for the dynamic"""
return np.linspace(self.volume, self.volume,
self.duration * channels).reshape(self.duration, channels) | python | {
"resource": ""
} |
q41123 | add_measurement | train | def add_measurement(request, experiment_id):
"""This is a view to display a form to add single measurements to an experiment.
It calls the object MeasurementForm, which has an autocomplete field for animal."""
experiment = get_object_or_404(Experiment, pk=experiment_id)
if request.method == 'POST':
form = MeasurementForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect( experiment.get_absolute_url() )
else:
form = MeasurementForm()
return render(request, "data_entry_form.html", {"form": form, "experiment": experiment }) | python | {
"resource": ""
} |
q41124 | litters_csv | train | def litters_csv(request):
"""This view generates a csv output file of all animal data for use in litter analysis.
The view writes to a csv table the birthdate, breeding cage and strain."""
animal_list = Animal.objects.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=litters.csv'
writer = csv.writer(response)
writer.writerow(["Born", "Breeding", "Strain"])
for animal in animal_list:
writer.writerow([
animal.Born,
animal.Breeding,
animal.Strain
])
return response | python | {
"resource": ""
} |
q41125 | data_csv | train | def data_csv(request, measurement_list):
"""This view generates a csv output of all data for a strain.
For this function to work, you have to provide the filtered set of measurements."""
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=data.csv'
writer = csv.writer(response)
writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"])
for measurement in measurement_list:
writer.writerow([
measurement.animal,
measurement.animal.Genotype,
measurement.animal.Gender,
measurement.assay,
measurement.values.split(',')[0],
measurement.animal.Strain,
measurement.animal.Background,
measurement.age(),
measurement.animal.Cage,
measurement.experiment.feeding_state,
measurement.animal.treatment_set.all(),
])
return response | python | {
"resource": ""
} |
q41126 | MeasurementListCSV.get | train | def get(self, request, *args, **kwargs):
'''The queryset returns all measurement objects'''
measurements = Measurement.objects.all()
return data_csv(self.request, measurements) | python | {
"resource": ""
} |
q41127 | Doc2VecVectorizer.train | train | def train(self, docs, retrain=False):
'''
Train Doc2Vec on a series of docs. Train from scratch or update.
Args:
docs: list of tuples (assetid, body_text) or dictionary {assetid : body_text}
retrain: boolean, retrain from scratch or update model
saves model in class to self.model
Returns: 0 if successful
'''
if type(docs) == dict:
docs = docs.items()
train_sentences = [self._gen_sentence(item) for item in docs]
if (self.is_trained) and (retrain == False):
## online training
self.update_model(train_sentences, update_labels_bool=True)
else:
## train from scratch
self.model = Doc2Vec(train_sentences, size=self.size, window=self.window, min_count=self.min_count, workers=self.workers)
self.is_trained = True
return 0 | python | {
"resource": ""
} |
q41128 | Doc2VecVectorizer._process | train | def _process(self, input):
'''
Takes in html-mixed body text as a string and returns a list of strings,
lower case and with punctuation given spacing.
Called by self._gen_sentence()
Args:
inpnut (string): body text
'''
input = re.sub("<[^>]*>", " ", input)
punct = list(string.punctuation)
for symbol in punct:
input = input.replace(symbol, " %s " % symbol)
input = filter(lambda x: x != u'', input.lower().split(' '))
return input | python | {
"resource": ""
} |
q41129 | Doc2VecVectorizer._gen_sentence | train | def _gen_sentence(self, assetid_body_tuple):
'''
Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence
Args:
assetid_body_tuple (tuple): (assetid, bodytext) pair
'''
asset_id, body = assetid_body_tuple
text = self._process(body)
sentence = LabeledSentence(text, labels=['DOC_%s' % str(asset_id)])
return sentence | python | {
"resource": ""
} |
q41130 | Doc2VecVectorizer._add_new_labels | train | def _add_new_labels(self, sentences):
'''
Adds new sentences to the internal indexing of the model.
Args:
sentences (list): LabeledSentences for each doc to be added
Returns:
int: number of sentences added to the model
'''
sentence_no = -1
total_words = 0
vocab = self.model.vocab
model_sentence_n = len([l for l in vocab if l.startswith("DOC_")])
n_sentences = 0
for sentence_no, sentence in enumerate(sentences):
sentence_length = len(sentence.words)
for label in sentence.labels:
total_words += 1
if label in vocab:
vocab[label].count += sentence_length
else:
vocab[label] = gensim.models.word2vec.Vocab(
count=sentence_length)
vocab[label].index = len(self.model.vocab) - 1
vocab[label].code = [0]
vocab[label].sample_probability = 1.
self.model.index2word.append(label)
n_sentences += 1
return n_sentences | python | {
"resource": ""
} |
q41131 | run | train | def run(targets, config_dir='.', check_licenses=False):
# type: (List[str], str, bool) -> None
"""Runs `pylint` and `flake8` commands and exits based off the evaluation
of both command results.
:param targets: List[str]
:param config_dir: str
:param check_licenses: bool
:return:
"""
pylint_return_state = False
flake8_return_state = False
if check_licenses:
run_license_checker(config_path=get_license_checker_config_path(config_dir))
pylint_options = get_pylint_options(config_dir=config_dir)
flake8_options = get_flake8_options(config_dir=config_dir)
if targets:
pylint_return_state = _run_command(command='pylint', targets=targets,
options=pylint_options)
flake8_return_state = _run_command(command='flake8', targets=targets,
options=flake8_options)
if not flake8_return_state and not pylint_return_state:
sys.exit(0)
else:
sys.exit(1) | python | {
"resource": ""
} |
q41132 | _run_command | train | def _run_command(command, targets, options):
# type: (str, List[str], List[str]) -> bool
"""Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool
"""
print('{0}: targets={1} options={2}'.format(command, targets, options))
cmd = [command] + targets + options
process = Popen(cmd)
process.wait()
return bool(process.returncode) | python | {
"resource": ""
} |
q41133 | ynnm | train | def ynnm(n, m):
"""Initial value for recursion formula"""
a = 1.0 / np.sqrt(4.0 * np.pi)
pm = np.abs(m)
out = 0.0
if(n < pm):
out = 0.0
elif(n == 0):
out = a
else:
out = a
for k in xrange(1, n + 1):
out *= np.sqrt((2.0 * k + 1.0) / 8.0 / k)
if(n != pm):
for k in xrange(n - 1, pm - 1, -1):
out *= np.sqrt((n + k + 1.0) / (n - k))
return out | python | {
"resource": ""
} |
q41134 | ynunm | train | def ynunm(n, m, L):
"""Fourier coefficients for spherical harmonics"""
out = np.zeros(L, dtype=np.float64)
tmp1 = 0
tmp2 = 0
tmp3 = 0
tmp4 = 0
if(np.abs(m) <= n):
out[n] = ynnm(n, m)
k = n - 2
if(k >= 0):
tmp1 = (n - k - 1.0) * (n + k + 2.0)
tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2
tmp4 = ((n - k) * (n + k + 1.0))
out[k] = (tmp1 + tmp2) * out[k + 2] / tmp4
for k in xrange(n - 4, -1, -2):
tmp1 = (n - k - 1.0) * (n + k + 2.0)
tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2
tmp3 = (n - k - 3.0) * (n + k + 4.0);
tmp4 = ((n - k) * (n + k + 1.0))
out[k] = ((tmp1 + tmp2) * out[k + 2] - tmp3 * out[k + 4]) / tmp4
return out | python | {
"resource": ""
} |
q41135 | smallest_prime_factor | train | def smallest_prime_factor(Q):
"""Find the smallest number factorable by the small primes 2, 3, 4, and 7
that is larger than the argument Q"""
A = Q;
while(A != 1):
if(np.mod(A, 2) == 0):
A = A / 2
elif(np.mod(A, 3) == 0):
A = A / 3
elif(np.mod(A, 5) == 0):
A = A / 5
elif(np.mod(A, 7) == 0):
A = A / 7;
else:
A = Q + 1;
Q = A;
return Q | python | {
"resource": ""
} |
q41136 | hkm_fc | train | def hkm_fc(fdata, Nmax, m, s):
""" Assume fdata has even rows"""
f = fdata[:, m]
L1 = f.size
MM = int(L1 / 2)
Q = s.size
ff = np.zeros(Q, dtype=np.complex128)
for n in xrange(MM, L1):
ff[n] = f[n - MM]
for n in xrange(0, MM):
ff[n] = f[n + MM]
# For larger problems, this speeds things up pretty good.
F = np.fft.fft(ff)
S = np.fft.fft(s)
out = 4 * np.pi * np.fft.ifft(F * S)
return out[0:Nmax + 1] | python | {
"resource": ""
} |
q41137 | mindx | train | def mindx(m, nmax, mmax):
"""index to the first n value for a give m within the spherical
coefficients vector. Used by sc_to_fc"""
ind = 0
NN = nmax + 1
if np.abs(m) > mmax:
raise Exception("|m| cannot be larger than mmax")
if (m != 0):
ind = NN
ii = 1
for i in xrange(1, np.abs(m)):
ind = ind + 2 * (NN - i)
ii = i + 1
if m > 0:
ind = ind + NN - ii
return ind | python | {
"resource": ""
} |
q41138 | sc_to_fc | train | def sc_to_fc(spvec, nmax, mmax, nrows, ncols):
"""assume Ncols is even"""
fdata = np.zeros([int(nrows), ncols], dtype=np.complex128)
for k in xrange(0, int(ncols / 2)):
if k < mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
kk = -(k + 1)
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
if k == mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
return fdata | python | {
"resource": ""
} |
q41139 | set_status | train | def set_status(
db,
aid: int,
complete: Any,
watched_episodes: int,
) -> None:
"""Set anime status."""
upsert(db, 'cache_anime', ['aid'], {
'aid': aid,
'complete': 1 if complete else 0,
'watched_episodes': watched_episodes,
}) | python | {
"resource": ""
} |
q41140 | Manager.create_component | train | def create_component(self, module_name):
'''Create a component out of a loaded module.
Turns a previously-loaded shared module into a component in the
manager. This will invalidate any objects that are children of this
node.
The @ref module_name argument can contain options that set various
properties of the new component. These must be appended to the module
name, prefixed by a question mark for each property, in key=value
format. For example, to change the instance name of the new component,
append '?instance_name=new_name' to the module name.
@param module_name Name of the module to turn into a component.
@raises FailedToCreateComponentError
'''
with self._mutex:
if not self._obj.create_component(module_name):
raise exceptions.FailedToCreateComponentError(module_name)
# The list of child components will have changed now, so it must be
# reparsed.
self._parse_component_children() | python | {
"resource": ""
} |
q41141 | Manager.delete_component | train | def delete_component(self, instance_name):
'''Delete a component.
Deletes the component specified by @ref instance_name from the manager.
This will invalidate any objects that are children of this node.
@param instance_name The instance name of the component to delete.
@raises FailedToDeleteComponentError
'''
with self._mutex:
if self._obj.delete_component(instance_name) != RTC.RTC_OK:
raise exceptions.FailedToDeleteComponentError(instance_name)
# The list of child components will have changed now, so it must be
# reparsed.
self._parse_component_children() | python | {
"resource": ""
} |
q41142 | Manager.load_module | train | def load_module(self, path, init_func):
'''Load a shared library.
Call this function to load a shared library (DLL file under Windows,
shared object under UNIX) into the manager.
@param path The path to the shared library.
@param init_func The name entry function in the library.
@raises FailedToLoadModuleError
'''
try:
with self._mutex:
if self._obj.load_module(path, init_func) != RTC.RTC_OK:
raise exceptions.FailedToLoadModuleError(path)
except CORBA.UNKNOWN as e:
if e.args[0] == UNKNOWN_UserException:
raise exceptions.FailedToLoadModuleError(path, 'CORBA User Exception')
else:
raise | python | {
"resource": ""
} |
q41143 | Manager.unload_module | train | def unload_module(self, path):
'''Unload a loaded shared library.
Call this function to remove a shared library (e.g. a component) that
was previously loaded.
@param path The path to the shared library.
@raises FailedToUnloadModuleError
'''
with self._mutex:
if self._obj.unload_module(path) != RTC.RTC_OK:
raise FailedToUnloadModuleError(path) | python | {
"resource": ""
} |
q41144 | Manager.components | train | def components(self):
'''The list of components in this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Component. That method is more useful as it returns
the tree entries for the components.
'''
with self._mutex:
if not self._components:
self._components = [c for c in self.children if c.is_component]
return self._components | python | {
"resource": ""
} |
q41145 | Manager.factory_profiles | train | def factory_profiles(self):
'''The factory profiles of all loaded modules.'''
with self._mutex:
if not self._factory_profiles:
self._factory_profiles = []
for fp in self._obj.get_factory_profiles():
self._factory_profiles.append(utils.nvlist_to_dict(fp.properties))
return self._factory_profiles | python | {
"resource": ""
} |
q41146 | Manager.set_config_parameter | train | def set_config_parameter(self, param, value):
'''Set a configuration parameter of the manager.
@param The parameter to set.
@value The new value for the parameter.
@raises FailedToSetConfigurationError
'''
with self._mutex:
if self._obj.set_configuration(param, value) != RTC.RTC_OK:
raise exceptions.FailedToSetConfigurationError(param, value)
# Force a reparse of the configuration
self._configuration = None | python | {
"resource": ""
} |
q41147 | Manager.configuration | train | def configuration(self):
'''The configuration dictionary of the manager.'''
with self._mutex:
if not self._configuration:
self._configuration = utils.nvlist_to_dict(self._obj.get_configuration())
return self._configuration | python | {
"resource": ""
} |
q41148 | Manager.profile | train | def profile(self):
'''The manager's profile.'''
with self._mutex:
if not self._profile:
profile = self._obj.get_profile()
self._profile = utils.nvlist_to_dict(profile.properties)
return self._profile | python | {
"resource": ""
} |
q41149 | Manager.loadable_modules | train | def loadable_modules(self):
'''The list of loadable module profile dictionaries.'''
with self._mutex:
if not self._loadable_modules:
self._loadable_modules = []
for mp in self._obj.get_loadable_modules():
self._loadable_modules.append(utils.nvlist_to_dict(mp.properties))
return self._loadable_modules | python | {
"resource": ""
} |
q41150 | Manager.loaded_modules | train | def loaded_modules(self):
'''The list of loaded module profile dictionaries.'''
with self._mutex:
if not self._loaded_modules:
self._loaded_modules = []
for mp in self._obj.get_loaded_modules():
self._loaded_modules.append(utils.nvlist_to_dict(mp.properties))
return self._loaded_modules | python | {
"resource": ""
} |
q41151 | Manager.slaves | train | def slaves(self):
'''The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager.
'''
with self._mutex:
if not self._slaves:
self._slaves = [c for c in self.children if c.is_manager]
return self._slaves | python | {
"resource": ""
} |
q41152 | OAuth2Authentication.authenticate_credentials | train | def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token) | python | {
"resource": ""
} |
q41153 | Cloud.request | train | def request(self, *args, **kwargs):
"""
The main purpose of this is to be a wrapper-like function to pass the api_token and all the other params to the
requests that are being made
:returns: An instance of RequestsHandler
"""
return RequestsHandler(*args, api_token=self.api_token,
verify=self.mist_client.verify,
job_id=self.mist_client.job_id, **kwargs) | python | {
"resource": ""
} |
q41154 | Cloud.delete | train | def delete(self):
"""
Delete the cloud from the list of added clouds in mist.io service.
:returns: A list of mist.clients' updated clouds.
"""
req = self.request(self.mist_client.uri + '/clouds/' + self.id)
req.delete()
self.mist_client.update_clouds() | python | {
"resource": ""
} |
q41155 | Cloud.enable | train | def enable(self):
"""
Enable the Cloud.
:returns: A list of mist.clients' updated clouds.
"""
payload = {
"new_state": "1"
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/clouds/'+self.id, data=data)
req.post()
self.enabled = True
self.mist_client.update_clouds() | python | {
"resource": ""
} |
q41156 | Cloud.disable | train | def disable(self):
"""
Disable the Cloud.
:returns: A list of mist.clients' updated clouds.
"""
payload = {
"new_state": "0"
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/clouds/'+self.id, data=data)
req.post()
self.enabled = False
self.mist_client.update_clouds() | python | {
"resource": ""
} |
q41157 | Cloud.sizes | train | def sizes(self):
"""
Available machine sizes to be used when creating a new machine.
:returns: A list of available machine sizes.
"""
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/sizes')
sizes = req.get().json()
return sizes | python | {
"resource": ""
} |
q41158 | Cloud.locations | train | def locations(self):
"""
Available locations to be used when creating a new machine.
:returns: A list of available locations.
"""
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/locations')
locations = req.get().json()
return locations | python | {
"resource": ""
} |
q41159 | Cloud.networks | train | def networks(self):
"""
Available networks.
:returns: A list of available networks associated to a provider.
"""
if self.provider in ['openstack', 'nephoscale']:
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/networks')
networks = req.get().json()
return networks
else:
print "Network actions not supported yet for %s provider" % self.provider | python | {
"resource": ""
} |
q41160 | Cloud.images | train | def images(self):
"""
Available images to be used when creating a new machine.
:returns: A list of all available images.
"""
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/images')
images = req.get().json()
return images | python | {
"resource": ""
} |
q41161 | Cloud._list_machines | train | def _list_machines(self):
"""
Request a list of all added machines.
Populates self._machines dict with mist.client.model.Machine instances
"""
try:
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/machines')
machines = req.get().json()
except:
# Eg invalid cloud credentials
machines = {}
if machines:
for machine in machines:
self._machines[machine['machine_id']] = Machine(machine, self)
else:
self._machines = {} | python | {
"resource": ""
} |
q41162 | Cloud.machines | train | def machines(self, id=None, name=None, search=None):
"""
Property-like function to call the _list_machines function in order to populate self._machines dict
:returns: A list of Machine instances.
"""
if self._machines is None:
self._machines = {}
self._list_machines()
if id:
return [self._machines[machine_id] for machine_id in self._machines.keys()
if str(id) == str(self._machines[machine_id].id)]
elif name:
return [self._machines[machine_id] for machine_id in self._machines.keys()
if name == self._machines[machine_id].name]
elif search:
return [self._machines[machine_id] for machine_id in self._machines.keys()
if str(search) == str(self._machines[machine_id].name)
or str(search) == str(self._machines[machine_id].id)]
else:
return [self._machines[machine_id] for machine_id in self._machines.keys()] | python | {
"resource": ""
} |
q41163 | Machine._toggle_monitoring | train | def _toggle_monitoring(self, action, no_ssh=False):
"""
Enable or disable monitoring on a machine
:param action: Can be either "enable" or "disable"
"""
payload = {
'action': action,
'name': self.name,
'no_ssh': no_ssh,
'public_ips': self.info['public_ips'],
'dns_name': self.info['extra'].get('dns_name', 'n/a')
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/monitoring",
data=data)
req.post() | python | {
"resource": ""
} |
q41164 | Machine.get_stats | train | def get_stats(self, start=int(time()), stop=int(time())+10, step=10):
"""
Get stats of a monitored machine
:param start: Time formatted as integer, from when to fetch stats (default now)
:param stop: Time formatted as integer, until when to fetch stats (default +10 seconds)
:param step: Step to fetch stats (default 10 seconds)
:returns: A dict of stats
"""
payload = {
'v': 2,
'start': start,
'stop': stop,
'step': step
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/stats", data=data)
stats = req.get().json()
return stats | python | {
"resource": ""
} |
q41165 | Machine.available_metrics | train | def available_metrics(self):
"""
List all available metrics that you can add to this machine
:returns: A list of dicts, each of which is a metric that you can add to a monitored machine
"""
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/metrics")
metrics = req.get().json()
return metrics | python | {
"resource": ""
} |
q41166 | Machine.add_metric | train | def add_metric(self, metric_id):
"""
Add a metric to a monitored machine
:param metric_id: Metric_id (provided by self.available_metrics)
"""
payload = {
'metric_id': metric_id
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/metrics", data=data)
req.put() | python | {
"resource": ""
} |
q41167 | Machine.remove_metric | train | def remove_metric(self, metric_id):
"""
Remove a metric from a monitored machine
:param metric_id: Metric_id (provided by self.get_stats() )
"""
payload = {
'metric_id': metric_id
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/metrics", data=data)
req.delete() | python | {
"resource": ""
} |
q41168 | Machine.add_python_plugin | train | def add_python_plugin(self, name, python_file, value_type="gauge", unit=None):
"""
Add a custom python plugin to the collectd instance of a monitored plugin
:param python_file: Path of the python file to be added as custom python plugin
:param name: Name of the plugin
:param value_type: Optional. Can be either "gauge" or "derive"
:param unit: Optional. If given the new plugin will be measured according to this unit
"""
if not os.path.isfile(python_file):
raise Exception(python_file, "is not a file or could not be found in tho given path")
with open(python_file) as f:
script = f.read()
payload = {
'plugin_type': 'python',
'name': name,
'unit': unit,
'value_type': value_type,
'read_function': script,
'host': self.info['public_ips'][0]
}
data = json.dumps(payload)
#PLugin id must be in lowercase
plugin_id = name.lower()
#PLugin id must contain only alphanumeric chars
pattern = re.compile('\W')
plugin_id = re.sub(pattern, "_", plugin_id)
#Plugin id should not have double underscores
while "__" in plugin_id:
pattern = "\r?__"
plugin_id = re.sub(pattern, "_", plugin_id)
#Plugin id should not have underscore as first or last char
if plugin_id[-1] == "_":
plugin_id = plugin_id[:-2]
if plugin_id[0] == "_":
plugin_id = plugin_id[1:]
req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/plugins/"+plugin_id,
data=data)
req.post() | python | {
"resource": ""
} |
q41169 | Key.private | train | def private(self):
"""
Return the private ssh-key
:returns: The private ssh-key as string
"""
req = self.request(self.mist_client.uri+'/keys/'+self.id+"/private")
private = req.get().json()
return private | python | {
"resource": ""
} |
q41170 | Key.public | train | def public(self):
"""
Return the public ssh-key
:returns: The public ssh-key as string
"""
req = self.request(self.mist_client.uri+'/keys/'+self.id+"/public")
public = req.get().json()
return public | python | {
"resource": ""
} |
q41171 | Key.rename | train | def rename(self, new_name):
"""
Rename a key
:param new_name: New name for the key (will also serve as the key's id)
:returns: An updated list of added keys
"""
payload = {
'new_name': new_name
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/keys/'+self.id, data=data)
req.put()
self.id = new_name
self.mist_client.update_keys() | python | {
"resource": ""
} |
q41172 | Key.set_default | train | def set_default(self):
"""
Set this key as the default key
:returns: An updated list of added keys
"""
req = self.request(self.mist_client.uri+'/keys/'+self.id)
req.post()
self.is_default = True
self.mist_client.update_keys() | python | {
"resource": ""
} |
q41173 | Key.delete | train | def delete(self):
"""
Delete this key from mist.io
:returns: An updated list of added keys
"""
req = self.request(self.mist_client.uri+'/keys/'+self.id)
req.delete()
self.mist_client.update_keys() | python | {
"resource": ""
} |
q41174 | Bug.status | train | def status(self, value):
"""
Property for getting or setting the bug status
>>> bug.status = "REOPENED"
"""
if self._bug.get('id', None):
if value in VALID_STATUS:
self._bug['status'] = value
else:
raise BugException("Invalid status type was used")
else:
raise BugException("Can not set status unless there is a bug id."
" Please call Update() before setting") | python | {
"resource": ""
} |
q41175 | Bug.update | train | def update(self):
"""
Update this object with the latest changes from Bugzilla
>>> bug.status
'NEW'
#Changes happen on Bugzilla
>>> bug.update()
>>> bug.status
'FIXED'
"""
if 'id' in self._bug:
result = self._bugsy.request('bug/%s' % self._bug['id'])
self._bug = dict(**result['bugs'][0])
else:
raise BugException("Unable to update bug that isn't in Bugzilla") | python | {
"resource": ""
} |
q41176 | Bug.get_comments | train | def get_comments(self):
"""
Obtain comments for this bug.
Returns a list of Comment instances.
"""
bug = str(self._bug['id'])
res = self._bugsy.request('bug/%s/comment' % bug)
return [Comment(bugsy=self._bugsy, **comments) for comments
in res['bugs'][bug]['comments']] | python | {
"resource": ""
} |
q41177 | Comment.add_tags | train | def add_tags(self, tags):
"""
Add tags to the comments
"""
if not isinstance(tags, list):
tags = [tags]
self._bugsy.request('bug/comment/%s/tags' % self._comment['id'],
method='PUT', json={"add": tags}) | python | {
"resource": ""
} |
q41178 | _readuntil | train | def _readuntil(f, end=_TYPE_END):
"""Helper function to read bytes until a certain end byte is hit"""
buf = bytearray()
byte = f.read(1)
while byte != end:
if byte == b'':
raise ValueError('File ended unexpectedly. Expected end byte {}.'.format(end))
buf += byte
byte = f.read(1)
return buf | python | {
"resource": ""
} |
q41179 | _encode_buffer | train | def _encode_buffer(string, f):
"""Writes the bencoded form of the input string or bytes"""
if isinstance(string, str):
string = string.encode()
f.write(str(len(string)).encode())
f.write(_TYPE_SEP)
f.write(string) | python | {
"resource": ""
} |
q41180 | bencode | train | def bencode(data, f=None):
"""
Writes a serializable data piece to f
The order of tests is nonarbitrary,
as strings and mappings are iterable.
If f is None, it writes to a byte buffer
and returns a bytestring
"""
if f is None:
f = BytesIO()
_bencode_to_file(data, f)
return f.getvalue()
else:
_bencode_to_file(data, f) | python | {
"resource": ""
} |
q41181 | command | train | def command(state, args):
"""Watch an anime."""
if len(args) < 2:
print(f'Usage: {args[0]} {{ID|aid:AID}} [EPISODE]')
return
aid = state.results.parse_aid(args[1], default_key='db')
anime = query.select.lookup(state.db, aid)
if len(args) < 3:
episode = anime.watched_episodes + 1
else:
episode = int(args[2])
anime_files = query.files.get_files(state.db, aid)
files = anime_files[episode]
if not files:
print('No files.')
return
file = state.file_picker.pick(files)
ret = subprocess.call(state.config['anime'].getargs('player') + [file])
if ret == 0 and episode == anime.watched_episodes + 1:
user_input = input('Bump? [Yn]')
if user_input.lower() in ('n', 'no'):
print('Not bumped.')
else:
query.update.bump(state.db, aid)
print('Bumped.') | python | {
"resource": ""
} |
q41182 | sin_fc | train | def sin_fc(fdata):
"""Apply sine in the Fourier domain."""
nrows = fdata.shape[0]
ncols = fdata.shape[1]
M = nrows / 2
fdata[int(M - 1), :] = 0
fdata[int(M + 1), :] = 0
work1 = np.zeros([nrows, ncols], dtype=np.complex128)
work2 = np.zeros([nrows, ncols], dtype=np.complex128)
work1[0, :] = fdata[-1, :]
work1[1:, :] = fdata[0:-1, :]
work2[0:-1] = fdata[1:, :]
work2[-1, :] = fdata[0, :]
fdata[:, :] = 1.0 / (2 * 1j) * (work1 - work2) | python | {
"resource": ""
} |
q41183 | divsin_fc | train | def divsin_fc(fdata):
"""Apply divide by sine in the Fourier domain."""
nrows = fdata.shape[0]
ncols = fdata.shape[1]
L = int(nrows / 2) # Assuming nrows is even, which it should be.
L2 = L - 2 # This is the last index in the recursion for division by sine.
g = np.zeros([nrows, ncols], dtype=np.complex128)
g[L2, :] = 2 * 1j * fdata[L - 1, :]
for k in xrange(L2, -L2, -1):
g[k - 1, :] = 2 * 1j * fdata[k, :] + g[k + 1, :]
fdata[:, :] = g | python | {
"resource": ""
} |
q41184 | dphi_fc | train | def dphi_fc(fdata):
"""Apply phi derivative in the Fourier domain."""
nrows = fdata.shape[0]
ncols = fdata.shape[1]
B = int(ncols / 2) # As always, we assume nrows and ncols are even
a = list(range(0, int(B)))
ap = list(range(-int(B), 0))
a.extend(ap)
dphi = np.zeros([nrows, ncols], np.complex128)
for k in xrange(0, nrows):
dphi[k, :] = a
fdata[:, :] = 1j * dphi * fdata | python | {
"resource": ""
} |
q41185 | sinLdot_fc | train | def sinLdot_fc(tfdata, pfdata):
"""Apply sin of theta times the L operator to the data in the Fourier
domain."""
dphi_fc(tfdata)
sin_fc(pfdata)
dtheta_fc(pfdata)
return 1j * (tfdata - pfdata) | python | {
"resource": ""
} |
q41186 | L_fc | train | def L_fc(fdata):
"""Apply L in the Fourier domain."""
fd = np.copy(fdata)
dphi_fc(fdata)
divsin_fc(fdata)
dtheta_fc(fd)
return (1j * fdata, -1j * fd) | python | {
"resource": ""
} |
q41187 | extract | train | def extract(dump_files, extractors=ALL_EXTRACTORS):
"""
Extracts cites from a set of `dump_files`.
:Parameters:
dump_files : str | `file`
A set of files MediaWiki XML dump files
(expects: pages-meta-history)
extractors : `list`(`extractor`)
A list of extractors to apply to the text
:Returns:
`iterable` -- a generator of extracted cites
"""
# Dump processor function
def process_dump(dump, path):
for page in dump:
if page.namespace != 0: continue
else:
for cite in extract_cite_history(page, extractors):
yield cite
# Map call
return mwxml.map(process_dump, dump_files) | python | {
"resource": ""
} |
q41188 | extract_ids | train | def extract_ids(text, extractors):
"""
Uses `extractors` to extract citation identifiers from a text.
:Parameters:
text : str
The text to process
extractors : `list`(`extractor`)
A list of extractors to apply to the text
:Returns:
`iterable` -- a generator of extracted identifiers
"""
for extractor in extractors:
for id in extractor.extract(text):
yield id | python | {
"resource": ""
} |
q41189 | Component.add_members | train | def add_members(self, rtcs):
'''Add other RT Components to this composite component as members.
This component must be a composite component.
'''
if not self.is_composite:
raise exceptions.NotCompositeError(self.name)
for rtc in rtcs:
if self.is_member(rtc):
raise exceptions.AlreadyInCompositionError(self.name, rtc.instance_name)
org = self.organisations[0].obj
org.add_members([x.object for x in rtcs])
# Force a reparse of the member information
self._orgs = [] | python | {
"resource": ""
} |
q41190 | Component.remove_members | train | def remove_members(self, rtcs):
'''Remove other RT Components from this composite component.
rtcs is a list of components to remove. Each element must be either an
rtctree.Component object or a string containing a component's instance
name. rtctree.Component objects are more reliable.
This component must be a composite component.
'''
if not self.is_composite:
raise exceptions.NotCompositeError(self.name)
org = self.organisations[0].obj
members = org.get_members()
for rtc in rtcs:
if type(rtc) == str:
rtc_name = rtc
else:
rtc_name = rtc.instance_name
# Check if the RTC actually is a member
if not self.is_member(rtc):
raise exceptions.NotInCompositionError(self.name, rtc_name)
# Remove the RTC from the composition
org.remove_member(rtc_name)
# Force a reparse of the member information
self._orgs = [] | python | {
"resource": ""
} |
q41191 | Component.is_member | train | def is_member(self, rtc):
'''Is the given component a member of this composition?
rtc may be a Component object or a string containing a component's
instance name. Component objects are more reliable.
Returns False if the given component is not a member of this
composition.
Raises NotCompositeError if this component is not a composition.
'''
if not self.is_composite:
raise exceptions.NotCompositeError(self.name)
members = self.organisations[0].obj.get_members()
if type(rtc) is str:
for m in members:
if m.get_component_profile().instance_name == rtc:
return True
else:
for m in members:
if m._is_equivalent(rtc.object):
return True
return False | python | {
"resource": ""
} |
q41192 | Component.members | train | def members(self):
'''Member components if this component is composite.'''
with self._mutex:
if not self._members:
self._members = {}
for o in self.organisations:
# TODO: Search for these in the tree
self._members[o.org_id] = o.obj.get_members()
return self._members | python | {
"resource": ""
} |
q41193 | Component.organisations | train | def organisations(self):
'''The organisations of this composition.'''
class Org:
def __init__(self, sdo_id, org_id, members, obj):
self.sdo_id = sdo_id
self.org_id = org_id
self.members = members
self.obj = obj
with self._mutex:
if not self._orgs:
for org in self._obj.get_owned_organizations():
owner = org.get_owner()
if owner:
sdo_id = owner._narrow(SDOPackage.SDO).get_sdo_id()
else:
sdo_id = ''
org_id = org.get_organization_id()
members = [m.get_sdo_id() for m in org.get_members()]
self._orgs.append(Org(sdo_id, org_id, members, org))
return self._orgs | python | {
"resource": ""
} |
q41194 | Component.parent_org_sdo_ids | train | def parent_org_sdo_ids(self):
'''The SDO IDs of the compositions this RTC belongs to.'''
return [sdo.get_owner()._narrow(SDOPackage.SDO).get_sdo_id() \
for sdo in self._obj.get_organizations() if sdo] | python | {
"resource": ""
} |
q41195 | Component.parent_organisations | train | def parent_organisations(self):
'''The organisations this RTC belongs to.'''
class ParentOrg:
def __init__(self, sdo_id, org_id):
self.sdo_id = sdo_id
self.org_id = org_id
with self._mutex:
if not self._parent_orgs:
for sdo in self._obj.get_organizations():
if not sdo:
continue
owner = sdo.get_owner()
if owner:
sdo_id = owner._narrow(SDOPackage.SDO).get_sdo_id()
else:
sdo_id = ''
org_id = sdo.get_organization_id()
self._parent_orgs.append(ParentOrg(sdo_id, org_id))
return self._parent_orgs | python | {
"resource": ""
} |
q41196 | Component.activate_in_ec | train | def activate_in_ec(self, ec_index):
'''Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.activate_component(self._obj) | python | {
"resource": ""
} |
q41197 | Component.deactivate_in_ec | train | def deactivate_in_ec(self, ec_index):
'''Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.deactivate_component(self._obj) | python | {
"resource": ""
} |
q41198 | Component.get_ec | train | def get_ec(self, ec_handle):
'''Get a reference to the execution context with the given handle.
@param ec_handle The handle of the execution context to look for.
@type ec_handle str
@return A reference to the ExecutionContext object corresponding to
the ec_handle.
@raises NoECWithHandleError
'''
with self._mutex:
for ec in self.owned_ecs:
if ec.handle == ec_handle:
return ec
for ec in self.participating_ecs:
if ec.handle == ec_handle:
return ec
raise exceptions.NoECWithHandleError | python | {
"resource": ""
} |
q41199 | Component.get_ec_index | train | def get_ec_index(self, ec_handle):
'''Get the index of the execution context with the given handle.
@param ec_handle The handle of the execution context to look for.
@type ec_handle str
@return The index into the owned + participated arrays, suitable for
use in methods such as @ref activate_in_ec, or -1 if the EC was not
found.
@raises NoECWithHandleError
'''
with self._mutex:
for ii, ec in enumerate(self.owned_ecs):
if ec.handle == ec_handle:
return ii
for ii, ec in enumerate(self.participating_ecs):
if ec.handle == ec_handle:
return ii + len(self.owned_ecs)
raise exceptions.NoECWithHandleError | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.