code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
result = [escape(obj.name)]
if obj.description:
result.append(escape(obj.description))
return " ".join(result) | def format_match(self, obj) | (HTML) formatted item for display in the dropdown | 4.487686 | 4.181018 | 1.073348 |
if isinstance(obj, logging.Logger):
return True
else:
return (inspect.isclass(obj)
and inspect.ismethod(getattr(obj, 'debug', None))
and inspect.ismethod(getattr(obj, 'isEnabledFor', None))
and inspect.ismethod(getattr(obj, 'setLevel', None))) | def loggable(obj) | Return "True" if the obj implements the minimum Logger API
required by the 'trace' decorator. | 2.409089 | 2.402766 | 1.002631 |
__mname = value.__module__
if __mname != '__main__':
return '%s = <%s.%s object at 0x%x>' \
% (name, __mname, value.__class__.__name__, id(value))
else:
return '%s = <%s object at 0x%x>' \
% (name, value.__class__.__name__, id(value)) | def _formatter_self(name, value) | Format the "self" variable and value on instance methods. | 2.546132 | 2.461377 | 1.034434 |
__mname = value.__module__
if __mname != '__main__':
return "%s = <type '%s.%s'>" % (name, __mname, value.__name__)
else:
return "%s = <type '%s'>" % (name, value.__name__) | def _formatter_class(name, value) | Format the "klass" variable and value on class methods. | 3.441146 | 3.139889 | 1.095945 |
if name in ('self', 'instance', 'this'):
return af_self
elif name == 'class':
return af_class
elif name in ('named', 'param', 'parameter'):
return af_named
elif name in ('default', 'optional'):
return af_default
# elif name in ('anonymous', 'arbitrary', 'unnamed'):
# return af_anonymous
elif name in ('keyword', 'pair', 'pairs'):
return af_keyword
else:
raise ValueError('unknown trace formatter %r' % name) | def get_formatter(name) | Return the named formatter function. See the function
"set_formatter" for details. | 4.227723 | 4.324346 | 0.977656 |
if name in ('self', 'instance', 'this'):
global af_self
af_self = _formatter_self if func is None else func
elif name == 'class':
global af_class
af_class = _formatter_class if func is None else func
elif name in ('named', 'param', 'parameter'):
global af_named
af_named = _formatter_named if func is None else func
elif name in ('default', 'optional'):
global af_default
af_default = _formatter_defaults if func is None else func
elif name in ('anonymous', 'arbitrary', 'unnamed'):
global af_anonymous
af_anonymous = chop if func is None else func
elif name in ('keyword', 'pair', 'pairs'):
global af_keyword
af_keyword = _formatter_named if func is None else func
else:
raise ValueError('unknown trace formatter %r' % name) | def set_formatter(name, func) | Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used. | 2.901392 | 1.996324 | 1.453367 |
global __builtin_functions
if __builtin_functions is None:
builtins = dict()
for proto in __builtins:
pos = proto.find('(')
name, params, defaults = proto[:pos], list(), dict()
for param in proto[pos + 1:-1].split(','):
pos = param.find('=')
if not pos < 0:
param, value = param[:pos], param[pos + 1:]
try:
defaults[param] = __builtin_defaults[value]
except KeyError:
raise ValueError(
'builtin function %s: parameter %s: '
'unknown default %r' % (name, param, value))
params.append(param)
builtins[name] = (params, defaults)
__builtin_functions = builtins
try:
params, defaults = __builtin_functions[name]
except KeyError:
params, defaults = tuple(), dict()
__builtin_functions[name] = (params, defaults)
print(
"Warning: builtin function %r is missing prototype" % name,
file=sys.stderr)
return len(params), params, defaults | def __lookup_builtin(name) | Lookup the parameter name and default parameter values for
builtin functions. | 2.632205 | 2.550829 | 1.031902 |
if inspect.ismodule(obj):
for name, fn in inspect.getmembers(obj, inspect.isfunction):
setattr(obj, name, decorator(fn))
for name, klass in inspect.getmembers(obj, inspect.isclass):
attach_to_class(decorator, klass, recursive)
elif inspect.isclass(obj):
attach_to_class(decorator, obj, recursive) | def attach(decorator, obj, recursive=True) | attach(decorator, class_or_module[, recursive = True])
Utility to attach a \val{decorator} to the \val{obj} instance.
If \val{obj} is a module, the decorator will be attached to every
function and class in the module.
If \val{obj} is a class, the decorator will be attached to every
method and subclass of the class.
if \val{recursive} is "True" then subclasses will be decorated. | 1.982226 | 2.193424 | 0.903713 |
# print("'%s','%s','%s','%s','%s'"
# %(instance, action, reverse, model, pk_set))
if action == "post_add":
if not reverse:
project = instance
for person in model.objects.filter(pk__in=pk_set):
log.change(
person,
"Added person to project %s leaders" % project)
log.change(
project,
"Added person %s to project leaders" % person)
else:
person = instance
for project in model.objects.filter(pk__in=pk_set):
log.change(
person,
"Added person to project %s leaders" % project)
log.change(
project,
"Added person %s to project leaders" % person)
elif action == "post_remove":
if not reverse:
project = instance
for person in model.objects.filter(pk__in=pk_set):
log.change(
person,
"Removed person from project %s leaders" % project)
log.change(
project,
"Removed person %s from project leaders" % person)
else:
person = instance
for project in model.objects.filter(pk__in=pk_set):
log.change(
person,
"Removed person from project %s leaders" % project)
log.change(
project,
"Removed person %s from project leaders" % person)
elif action == "pre_clear":
# This has to occur in pre_clear, not post_clear, as otherwise
# we won't see what project leaders need to be removed.
if not reverse:
project = instance
log.change(
project,
"Removed all project leaders")
for person in project.leaders.all():
log.change(
project,
"Removed person %s from project leaders" % person)
else:
person = instance
log.change(
person,
"Removed person from all project leaders")
for project in person.leads.all():
log.change(
project,
"Removed person %s from project leaders" % person) | def _leaders_changed(
sender, instance, action, reverse, model, pk_set, **kwargs) | Hook that executes whenever the group members are changed. | 1.787116 | 1.822396 | 0.980641 |
if util.is_admin(request):
queryset = Application.objects.all()
else:
queryset = Application.objects.get_for_applicant(request.user)
q_filter = ApplicationFilter(request.GET, queryset=queryset)
table = ApplicationTable(q_filter.qs.order_by("-expires"))
tables.RequestConfig(request).configure(table)
spec = []
for name, value in six.iteritems(q_filter.form.cleaned_data):
if value is not None and value != "":
name = name.replace('_', ' ').capitalize()
spec.append((name, value))
return render(
template_name="kgapplications/application_list.html",
context={
'table': table,
'filter': q_filter,
'spec': spec,
'title': "Application list",
},
request=request) | def application_list(request) | a user wants to see all applications possible. | 3.420871 | 3.311626 | 1.032988 |
config = tables.RequestConfig(request, paginate={"per_page": 5})
person = request.user
my_applications = Application.objects.get_for_applicant(person)
my_applications = ApplicationTable(my_applications, prefix="mine-")
config.configure(my_applications)
requires_attention = Application.objects.requires_attention(request)
requires_attention = ApplicationTable(requires_attention, prefix="attn-")
config.configure(requires_attention)
return render(
template_name='kgapplications/profile_applications.html',
context={
'person': request.user,
'my_applications': my_applications,
'requires_attention': requires_attention,
},
request=request) | def profile_application_list(request) | a logged in user wants to see all his pending applications. | 3.762406 | 3.629229 | 1.036696 |
application = base.get_application(pk=application_id)
state_machine = base.get_state_machine(application)
return state_machine.process(request, application, state, label) | def application_detail(request, application_id, state=None, label=None) | A authenticated used is trying to access an application. | 4.32697 | 4.241471 | 1.020158 |
application = base.get_application(secret_token=token)
if application.expires < datetime.datetime.now():
return render(
template_name='kgapplications/common_expired.html',
context={'application': application},
request=request)
roles = {'is_applicant', 'is_authorised'}
# redirect user to real url if possible.
if request.user.is_authenticated:
if request.user == application.applicant:
url = base.get_url(
request, application, roles, label)
return HttpResponseRedirect(url)
state_machine = base.get_state_machine(application)
return state_machine.process(
request, application, state, label, roles) | def application_unauthenticated(request, token, state=None, label=None) | An somebody is trying to access an application. | 5.521687 | 5.493119 | 1.005201 |
try:
cache = InstituteCache.objects.get(
institute=institute, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except InstituteCache.DoesNotExist:
return 0, 0 | def get_institute_usage(institute, start, end) | Return a tuple of cpu hours and number of jobs for an institute
for a given period
Keyword arguments:
institute --
start -- start date
end -- end date | 4.446018 | 4.148306 | 1.071767 |
try:
cache = ProjectCache.objects.get(
project=project, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except ProjectCache.DoesNotExist:
return 0, 0 | def get_project_usage(project, start, end) | Return a tuple of cpu hours and number of jobs for a project
for a given period
Keyword arguments:
project --
start -- start date
end -- end date | 4.612939 | 3.979575 | 1.159154 |
try:
cache = PersonCache.objects.get(
person=person, project=project, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except PersonCache.DoesNotExist:
return 0, 0 | def get_person_usage(person, project, start, end) | Return a tuple of cpu hours and number of jobs for a person in a
specific project
Keyword arguments:
person --
project -- The project the usage is from
start -- start date
end -- end date | 4.209135 | 3.842623 | 1.095381 |
try:
cache = MachineCache.objects.get(
machine=machine, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except MachineCache.DoesNotExist:
return 0, 0 | def get_machine_usage(machine, start, end) | Return a tuple of cpu hours and number of jobs for a machine
for a given period
Keyword arguments:
machine --
start -- start date
end -- end date | 4.50405 | 3.958251 | 1.137889 |
cache = MachineCategoryCache.objects.get(
date=datetime.date.today(),
start=start, end=end)
return cache | def get_machine_category_usage(start, end) | Return a tuple of cpu hours and number of jobs
for a given period
Keyword arguments:
start -- start date
end -- end date | 7.681218 | 10.09721 | 0.760727 |
try:
applicant = Person.active.get(email=email)
existing_person = True
except Person.DoesNotExist:
applicant = Applicant.objects.create(email=email)
existing_person = False
except Person.MultipleObjectsReturned:
applicant = None
existing_person = False
return applicant, existing_person | def get_applicant_from_email(email) | Get applicant from email address.
If the person exists, return (person, True)
If multiple matches, return (None, True)
Otherwise create applicant and return (applicant, False) | 2.500439 | 2.375352 | 1.05266 |
form = forms.InviteUserApplicationForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
email = form.cleaned_data['email']
applicant, existing_person = get_applicant_from_email(email)
# If applicant is None then there were multiple persons found.
if applicant is None:
return render(
template_name='kgapplications/'
'project_common_invite_multiple.html',
context={'form': form, 'email': email},
request=request)
if existing_person and 'existing' not in request.POST:
return render(
template_name='kgapplications/'
'project_common_invite_existing.html',
context={'form': form, 'person': applicant},
request=request)
application = form.save(commit=False)
application.applicant = applicant
application.project = project
application.save()
state_machine = get_application_state_machine()
response = state_machine.start(request, application)
return response
return render(
template_name='kgapplications/project_common_invite_other.html',
context={'form': form, 'project': project, },
request=request) | def _send_invitation(request, project) | The logged in project leader OR administrator wants to invite somebody. | 3.050801 | 2.993582 | 1.019114 |
project = None
if project_id is not None:
project = get_object_or_404(Project, id=project_id)
if project is None:
if not is_admin(request):
return HttpResponseForbidden('<h1>Access Denied</h1>')
else:
if not project.can_edit(request):
return HttpResponseForbidden('<h1>Access Denied</h1>')
return _send_invitation(request, project) | def send_invitation(request, project_id=None) | The logged in project leader wants to invite somebody to their project. | 2.300539 | 2.329592 | 0.987529 |
# Note default kgapplications/index.html will display error if user logged
# in.
if not settings.ALLOW_REGISTRATIONS:
return render(
template_name='kgapplications/project_common_disabled.html',
context={},
request=request)
roles = {'is_applicant', 'is_authorised'}
if not request.user.is_authenticated:
attrs, _ = saml.parse_attributes(request)
defaults = {'email': attrs['email']}
form = forms.UnauthenticatedInviteUserApplicationForm(
request.POST or None, initial=defaults)
if request.method == 'POST':
if form.is_valid():
email = form.cleaned_data['email']
applicant, existing_person = get_applicant_from_email(email)
# If applicant is None then there were multiple persons found.
# This should never happen as the
# UnauthenticatedInviteUserApplicationForm form disallows
# existing users applying unauthenticated.
assert applicant is not None
# Similarly existing_person should always be False here.
assert not existing_person
application = ProjectApplication()
application.applicant = applicant
application.save()
state_machine = get_application_state_machine()
state_machine.start(request, application, roles)
# we do not show unauthenticated users the application at this
# stage.
url = reverse('index')
return HttpResponseRedirect(url)
return render(
template_name='kgapplications/'
'project_common_invite_unauthenticated.html',
context={'form': form, },
request=request)
else:
if request.method == 'POST':
person = request.user
application = ProjectApplication()
application.applicant = person
application.save()
state_machine = get_application_state_machine()
response = state_machine.start(request, application, roles)
return response
return render(
template_name='kgapplications/'
'project_common_invite_authenticated.html',
context={},
request=request) | def new_application(request) | A new application by a user to start a new project. | 4.177543 | 4.097179 | 1.019614 |
# windows gcc does not support linking with unresolved symbols
if sys.platform == 'win32': # pragma: no cover (windows)
prefix = getattr(sys, 'real_prefix', sys.prefix)
libs = os.path.join(prefix, str('libs'))
return str('-L{} -lpython{}{}').format(libs, *sys.version_info[:2])
cc = subprocess.check_output(('go', 'env', 'CC')).decode('UTF-8').strip()
with _tmpdir() as tmpdir:
testf = os.path.join(tmpdir, 'test.c')
with io.open(testf, 'w') as f:
f.write('int f(int); int main(void) { return f(0); }\n')
for lflag in LFLAGS: # pragma: no cover (platform specific)
try:
subprocess.check_call((cc, testf, lflag), cwd=tmpdir)
return lflag
except subprocess.CalledProcessError:
pass
else: # pragma: no cover (platform specific)
# wellp, none of them worked, fall back to gcc and they'll get a
# hopefully reasonable error message
return LFLAG_GCC | def _get_ldflags() | Determine the correct link flags. This attempts dummy compiles similar
to how autotools does feature detection. | 4.253277 | 4.17797 | 1.018025 |
# check that the feasible_configurations are spin
feasible_configurations = specification.feasible_configurations
if specification.vartype is dimod.BINARY:
feasible_configurations = {tuple(2 * v - 1 for v in config): en
for config, en in feasible_configurations.items()}
# convert ising_quadratic_ranges to the form we expect
ising_quadratic_ranges = specification.ising_quadratic_ranges
quadratic_ranges = {(u, v): ising_quadratic_ranges[u][v] for u, v in specification.graph.edges}
bqm, gap = generate(specification.graph,
feasible_configurations,
specification.decision_variables,
specification.ising_linear_ranges,
quadratic_ranges,
specification.min_classical_gap,
None) # unspecified smt solver
try:
ground = max(feasible_configurations.values())
except ValueError:
ground = 0.0 # if empty
return pm.PenaltyModel.from_specification(specification, bqm, gap, ground) | def get_penalty_model(specification) | Factory function for penaltymodel_maxgap.
Args:
specification (penaltymodel.Specification): The specification
for the desired penalty model.
Returns:
:class:`penaltymodel.PenaltyModel`: Penalty model with the given specification.
Raises:
:class:`penaltymodel.ImpossiblePenaltyModel`: If the penalty cannot be built.
Parameters:
priority (int): -100 | 4.875709 | 5.014775 | 0.972269 |
if encoded_data is None:
encoded_data = {}
if 'num_variables' not in encoded_data:
encoded_data['num_variables'] = len(next(iter(feasible_configurations)))
if 'num_feasible_configurations' not in encoded_data:
encoded_data['num_feasible_configurations'] = len(feasible_configurations)
if 'feasible_configurations' not in encoded_data or 'energies' not in encoded_data:
encoded = {_serialize_config(config): en for config, en in feasible_configurations.items()}
configs, energies = zip(*sorted(encoded.items()))
encoded_data['feasible_configurations'] = json.dumps(configs, separators=(',', ':'))
encoded_data['energies'] = json.dumps(energies, separators=(',', ':'))
insert =
cur.execute(insert, encoded_data) | def insert_feasible_configurations(cur, feasible_configurations, encoded_data=None) | Insert a group of feasible configurations into the cache.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
feasible_configurations (dict[tuple[int]): The set of feasible
configurations. Each key should be a tuple of variable assignments.
The values are the relative energies.
encoded_data (dict, optional): If a dictionary is provided, it
will be populated with the serialized data. This is useful for
preventing encoding the same information many times.
Examples:
>>> feasible_configurations = {(-1, -1): 0.0, (+1, +1): 0.0}
>>> with pmc.cache_connect(':memory:') as cur:
... pmc.insert_feasible_configurations(cur, feasible_configurations) | 2.327659 | 2.358462 | 0.986939 |
select = \
for num_variables, feasible_configurations, energies in cur.execute(select):
configs = json.loads(feasible_configurations)
energies = json.loads(energies)
yield {_decode_config(config, num_variables): energy
for config, energy in zip(configs, energies)} | def iter_feasible_configurations(cur) | Iterate over all of the sets of feasible configurations in the cache.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
Yields:
dict[tuple(int): number]: The feasible_configurations. | 6.681154 | 7.169236 | 0.93192 |
def bits(c):
n = 1 << (num_variables - 1)
for __ in range(num_variables):
yield 1 if c & n else -1
n >>= 1
return tuple(bits(c)) | def _decode_config(c, num_variables) | inverse of _serialize_config, always converts to spin. | 4.435584 | 4.031056 | 1.100353 |
if encoded_data is None:
encoded_data = {}
# insert graph and partially populate encoded_data with graph info
insert_graph(cur, nodelist, edgelist, encoded_data=encoded_data)
# need to encode the biases
if 'linear_biases' not in encoded_data:
encoded_data['linear_biases'] = _serialize_linear_biases(linear, nodelist)
if 'quadratic_biases' not in encoded_data:
encoded_data['quadratic_biases'] = _serialize_quadratic_biases(quadratic, edgelist)
if 'offset' not in encoded_data:
encoded_data['offset'] = offset
if 'max_quadratic_bias' not in encoded_data:
encoded_data['max_quadratic_bias'] = max(itervalues(quadratic))
if 'min_quadratic_bias' not in encoded_data:
encoded_data['min_quadratic_bias'] = min(itervalues(quadratic))
if 'max_linear_bias' not in encoded_data:
encoded_data['max_linear_bias'] = max(itervalues(linear))
if 'min_linear_bias' not in encoded_data:
encoded_data['min_linear_bias'] = min(itervalues(linear))
insert = \
cur.execute(insert, encoded_data) | def insert_ising_model(cur, nodelist, edgelist, linear, quadratic, offset, encoded_data=None) | Insert an Ising model into the cache.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
nodelist (list): The nodes in the graph.
edgelist (list): The edges in the graph.
linear (dict): The linear bias associated with each node in nodelist.
quadratic (dict): The quadratic bias associated with teach edge in edgelist.
offset (float): The constant offset applied to the ising problem.
encoded_data (dict, optional): If a dictionary is provided, it
will be populated with the serialized data. This is useful for
preventing encoding the same information many times. | 1.956501 | 1.899818 | 1.029836 |
linear_bytes = struct.pack('<' + 'd' * len(linear), *[linear[i] for i in nodelist])
return base64.b64encode(linear_bytes).decode('utf-8') | def _serialize_linear_biases(linear, nodelist) | Serializes the linear biases.
Args:
linear: a interable object where linear[v] is the bias
associated with v.
nodelist (list): an ordered iterable containing the nodes.
Returns:
str: base 64 encoded string of little endian 8 byte floats,
one for each of the biases in linear. Ordered according
to nodelist.
Examples:
>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3])
'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA'
>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1])
'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/' | 2.973914 | 3.464838 | 0.858313 |
# assumes quadratic is upper-triangular or reflected in edgelist
quadratic_list = [quadratic[(u, v)] if (u, v) in quadratic else quadratic[(v, u)]
for u, v in edgelist]
quadratic_bytes = struct.pack('<' + 'd' * len(quadratic), *quadratic_list)
return base64.b64encode(quadratic_bytes).decode('utf-8') | def _serialize_quadratic_biases(quadratic, edgelist) | Serializes the quadratic biases.
Args:
quadratic (dict): a dict of the form {edge1: bias1, ...} where
each edge is of the form (node1, node2).
edgelist (list): a list of the form [(node1, node2), ...].
Returns:
str: base 64 encoded string of little endian 8 byte floats,
one for each of the edges in quadratic. Ordered by edgelist.
Example:
>>> _serialize_quadratic_biases({(0, 1): -1, (1, 2): 1, (0, 2): .4},
... [(0, 1), (1, 2), (0, 2)])
'AAAAAAAA8L8AAAAAAADwP5qZmZmZmdk/' | 3.45682 | 3.514021 | 0.983722 |
select = \
for linear_biases, quadratic_biases, num_nodes, edges, offset in cur.execute(select):
nodelist = list(range(num_nodes))
edgelist = json.loads(edges)
yield (nodelist, edgelist,
_decode_linear_biases(linear_biases, nodelist),
_decode_quadratic_biases(quadratic_biases, edgelist),
offset) | def iter_ising_model(cur) | Iterate over all of the Ising models in the cache.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
Yields:
tuple: A 5-tuple consisting of:
list: The nodelist for a graph in the cache.
list: the edgelist for a graph in the cache.
dict: The linear biases of an Ising Model in the cache.
dict: The quadratic biases of an Ising Model in the cache.
float: The constant offset of an Ising Model in the cache. | 4.770678 | 3.894228 | 1.225064 |
linear_bytes = base64.b64decode(linear_string)
return dict(zip(nodelist, struct.unpack('<' + 'd' * (len(linear_bytes) // 8), linear_bytes))) | def _decode_linear_biases(linear_string, nodelist) | Inverse of _serialize_linear_biases.
Args:
linear_string (str): base 64 encoded string of little endian
8 byte floats, one for each of the nodes in nodelist.
nodelist (list): list of the form [node1, node2, ...].
Returns:
dict: linear biases in a dict.
Examples:
>>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [1, 2, 3])
{1: -1.0, 2: 1.0, 3: 0.0}
>>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [3, 2, 1])
{1: 0.0, 2: 1.0, 3: -1.0} | 2.573374 | 3.194278 | 0.80562 |
quadratic_bytes = base64.b64decode(quadratic_string)
return {tuple(edge): bias for edge, bias in zip(edgelist,
struct.unpack('<' + 'd' * (len(quadratic_bytes) // 8), quadratic_bytes))} | def _decode_quadratic_biases(quadratic_string, edgelist) | Inverse of _serialize_quadratic_biases
Args:
quadratic_string (str) : base 64 encoded string of little
endian 8 byte floats, one for each of the edges.
edgelist (list): a list of edges of the form [(node1, node2), ...].
Returns:
dict: J. A dict of the form {edge1: bias1, ...} where each
edge is of the form (node1, node2).
Example:
>>> _decode_quadratic_biases('AAAAAAAA8L8AAAAAAADwP5qZmZmZmdk/',
... [(0, 1), (1, 2), (0, 2)])
{(0, 1): -1.0, (0, 2): 0.4, (1, 2): 1.0} | 2.717884 | 3.030285 | 0.896907 |
encoded_data = {}
linear, quadratic, offset = penalty_model.model.to_ising()
nodelist = sorted(linear)
edgelist = sorted(sorted(edge) for edge in penalty_model.graph.edges)
insert_graph(cur, nodelist, edgelist, encoded_data)
insert_feasible_configurations(cur, penalty_model.feasible_configurations, encoded_data)
insert_ising_model(cur, nodelist, edgelist, linear, quadratic, offset, encoded_data)
encoded_data['decision_variables'] = json.dumps(penalty_model.decision_variables, separators=(',', ':'))
encoded_data['classical_gap'] = penalty_model.classical_gap
encoded_data['ground_energy'] = penalty_model.ground_energy
insert = \
cur.execute(insert, encoded_data) | def insert_penalty_model(cur, penalty_model) | Insert a penalty model into the database.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
penalty_model (:class:`penaltymodel.PenaltyModel`): A penalty
model to be stored in the database.
Examples:
>>> import networkx as nx
>>> import penaltymodel.core as pm
>>> import dimod
>>> graph = nx.path_graph(3)
>>> decision_variables = (0, 2)
>>> feasible_configurations = {(-1, -1): 0., (+1, +1): 0.}
>>> spec = pm.Specification(graph, decision_variables, feasible_configurations, dimod.SPIN)
>>> linear = {v: 0 for v in graph}
>>> quadratic = {edge: -1 for edge in graph.edges}
>>> model = dimod.BinaryQuadraticModel(linear, quadratic, 0.0, vartype=dimod.SPIN)
>>> widget = pm.PenaltyModel.from_specification(spec, model, 2., -2)
>>> with pmc.cache_connect(':memory:') as cur:
... pmc.insert_penalty_model(cur, widget) | 3.960935 | 3.596743 | 1.101256 |
encoded_data = {}
nodelist = sorted(specification.graph)
edgelist = sorted(sorted(edge) for edge in specification.graph.edges)
encoded_data['num_nodes'] = len(nodelist)
encoded_data['num_edges'] = len(edgelist)
encoded_data['edges'] = json.dumps(edgelist, separators=(',', ':'))
encoded_data['num_variables'] = len(next(iter(specification.feasible_configurations)))
encoded_data['num_feasible_configurations'] = len(specification.feasible_configurations)
encoded = {_serialize_config(config): en for config, en in specification.feasible_configurations.items()}
configs, energies = zip(*sorted(encoded.items()))
encoded_data['feasible_configurations'] = json.dumps(configs, separators=(',', ':'))
encoded_data['energies'] = json.dumps(energies, separators=(',', ':'))
encoded_data['decision_variables'] = json.dumps(specification.decision_variables, separators=(',', ':'))
encoded_data['classical_gap'] = json.dumps(specification.min_classical_gap, separators=(',', ':'))
select = \
for row in cur.execute(select, encoded_data):
# we need to build the model
linear = _decode_linear_biases(row['linear_biases'], nodelist)
quadratic = _decode_quadratic_biases(row['quadratic_biases'], edgelist)
model = dimod.BinaryQuadraticModel(linear, quadratic, row['offset'], dimod.SPIN) # always spin
yield pm.PenaltyModel.from_specification(specification, model, row['classical_gap'], row['ground_energy']) | def iter_penalty_model_from_specification(cur, specification) | Iterate through all penalty models in the cache matching the
given specification.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
specification (:class:`penaltymodel.Specification`): A specification
for a penalty model.
Yields:
:class:`penaltymodel.PenaltyModel` | 3.249243 | 3.247305 | 1.000597 |
# Author note: there might be a way that avoids rechecking all of the values without
# side-effects or lots of repeated code, but this seems simpler and more explicit
return cls(specification.graph,
specification.decision_variables,
specification.feasible_configurations,
specification.vartype,
model,
classical_gap,
ground_energy,
ising_linear_ranges=specification.ising_linear_ranges,
ising_quadratic_ranges=specification.ising_quadratic_ranges) | def from_specification(cls, specification, model, classical_gap, ground_energy) | Construct a PenaltyModel from a Specification.
Args:
specification (:class:`.Specification`): A specification that was used
to generate the model.
model (:class:`dimod.BinaryQuadraticModel`): A binary quadratic model
that has ground states that match the feasible_configurations.
classical_gap (numeric): The difference in classical energy between the ground
state and the first excited state. Must be positive.
ground_energy (numeric): The minimum energy of all possible configurations.
Returns:
:class:`.PenaltyModel` | 7.499047 | 7.219474 | 1.038725 |
# just use the relabeling of each component
if inplace:
Specification.relabel_variables(self, mapping, inplace=True)
self.model.relabel_variables(mapping, inplace=True)
return self
else:
spec = Specification.relabel_variables(self, mapping, inplace=False)
model = self.model.relabel_variables(mapping, inplace=False)
return PenaltyModel.from_specification(spec, model, self.classical_gap, self.ground_energy) | def relabel_variables(self, mapping, inplace=True) | Relabel the variables and nodes according to the given mapping.
Args:
mapping (dict[hashable, hashable]): A dict with the current
variable labels as keys and new labels as values. A
partial mapping is allowed.
inplace (bool, optional, default=True):
If True, the penalty model is updated in-place; otherwise, a new penalty model
is returned.
Returns:
:class:`.PenaltyModel`: A PenaltyModel with the variables relabeled according to
mapping.
Examples:
>>> spec = pm.Specification(nx.path_graph(3), (0, 2), {(-1, -1), (1, 1)}, dimod.SPIN)
>>> model = dimod.BinaryQuadraticModel({0: 0, 1: 0, 2: 0}, {(0, 1): -1, (1, 2): -1}, 0.0, dimod.SPIN)
>>> penalty_model = pm.PenaltyModel.from_specification(spec, model, 2., -2.)
>>> relabeled_penalty_model = penalty_model.relabel_variables({0: 'a'}, inplace=False)
>>> relabeled_penalty_model.decision_variables
('a', 2)
>>> spec = pm.Specification(nx.path_graph(3), (0, 2), {(-1, -1), (1, 1)}, dimod.SPIN)
>>> model = dimod.BinaryQuadraticModel({0: 0, 1: 0, 2: 0}, {(0, 1): -1, (1, 2): -1}, 0.0, dimod.SPIN)
>>> penalty_model = pm.PenaltyModel.from_specification(spec, model, 2., -2.)
>>> __ = penalty_model.relabel_variables({0: 'a'}, inplace=True)
>>> penalty_model.decision_variables
('a', 2) | 3.924015 | 3.374629 | 1.162799 |
# only handles index-labelled nodes
if not _is_index_labelled(specification.graph):
relabel_applied = True
mapping, inverse_mapping = _graph_canonicalization(specification.graph)
specification = specification.relabel_variables(mapping, inplace=False)
else:
relabel_applied = False
# connect to the database. Note that once the connection is made it cannot be
# broken up between several processes.
if database is None:
conn = cache_connect()
else:
conn = cache_connect(database)
# get the penalty_model
with conn as cur:
try:
widget = next(iter_penalty_model_from_specification(cur, specification))
except StopIteration:
widget = None
# close the connection
conn.close()
if widget is None:
raise pm.MissingPenaltyModel("no penalty model with the given specification found in cache")
if relabel_applied:
# relabel the widget in-place
widget.relabel_variables(inverse_mapping, inplace=True)
return widget | def get_penalty_model(specification, database=None) | Factory function for penaltymodel_cache.
Args:
specification (penaltymodel.Specification): The specification
for the desired penalty model.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default.
Returns:
:class:`penaltymodel.PenaltyModel`: Penalty model with the given specification.
Raises:
:class:`penaltymodel.MissingPenaltyModel`: If the penalty model is not in the
cache.
Parameters:
priority (int): 100 | 5.291745 | 5.377735 | 0.98401 |
# only handles index-labelled nodes
if not _is_index_labelled(penalty_model.graph):
mapping, __ = _graph_canonicalization(penalty_model.graph)
penalty_model = penalty_model.relabel_variables(mapping, inplace=False)
# connect to the database. Note that once the connection is made it cannot be
# broken up between several processes.
if database is None:
conn = cache_connect()
else:
conn = cache_connect(database)
# load into the database
with conn as cur:
insert_penalty_model(cur, penalty_model)
# close the connection
conn.close() | def cache_penalty_model(penalty_model, database=None) | Caching function for penaltymodel_cache.
Args:
penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to
be cached.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default. | 6.048954 | 6.327957 | 0.955909 |
# Iterate through the available factories until one gives a penalty model
for factory in iter_factories():
try:
pm = factory(specification)
except ImpossiblePenaltyModel as e:
# information about impossible models should be propagated
raise e
except FactoryException:
# any other type of factory exception, continue through the list
continue
# if penalty model was found, broadcast to all of the caches. This could be done
# asynchronously
for cache in iter_caches():
cache(pm)
return pm
return None | def get_penalty_model(specification) | Retrieve a PenaltyModel from one of the available factories.
Args:
specification (:class:`.Specification`): The specification
for the desired PenaltyModel.
Returns:
:class:`.PenaltyModel`/None: A PenaltyModel as returned by
the highest priority factory, or None if no factory could
produce it.
Raises:
:exc:`ImpossiblePenaltyModel`: If the specification
describes a penalty model that cannot be built by any
factory. | 8.42205 | 7.19039 | 1.171292 |
# retrieve all of the factories with
factories = (entry.load() for entry in iter_entry_points(FACTORY_ENTRYPOINT))
# sort the factories from highest priority to lowest. Any factory with unknown priority
# gets assigned priority -1000.
for factory in sorted(factories, key=lambda f: getattr(f, 'priority', -1000), reverse=True):
yield factory | def iter_factories() | Iterate through all factories identified by the factory entrypoint.
Yields:
function: A function that accepts a :class:`.Specification` and
returns a :class:`.PenaltyModel`. | 5.605963 | 5.880004 | 0.953394 |
# check that the feasible_configurations are spin
feasible_configurations = specification.feasible_configurations
if specification.vartype is dimod.BINARY:
feasible_configurations = {tuple(2 * v - 1 for v in config): en
for config, en in iteritems(feasible_configurations)}
# convert ising_quadratic_ranges to the form we expect
ising_quadratic_ranges = specification.ising_quadratic_ranges
quadratic_ranges = {(u, v): ising_quadratic_ranges[u][v] for u, v in specification.graph.edges}
try:
bqm, gap = generate_bqm(specification.graph, feasible_configurations,
specification.decision_variables,
linear_energy_ranges=specification.ising_linear_ranges,
quadratic_energy_ranges=quadratic_ranges,
min_classical_gap=specification.min_classical_gap)
except ValueError:
raise pm.exceptions.FactoryException("Specification is for too large of a model")
return pm.PenaltyModel.from_specification(specification, bqm, gap, 0.0) | def get_penalty_model(specification) | Factory function for penaltymodel-lp.
Args:
specification (penaltymodel.Specification): The specification
for the desired penalty model.
Returns:
:class:`penaltymodel.PenaltyModel`: Penalty model with the given specification.
Raises:
:class:`penaltymodel.ImpossiblePenaltyModel`: If the penalty cannot be built.
Parameters:
priority (int): -100 | 4.743935 | 5.08411 | 0.933091 |
if linear_ranges is None:
linear_ranges = {}
for v in graph:
if v in linear_ranges:
# check
linear_ranges[v] = Specification._check_range(linear_ranges[v])
else:
# set default
linear_ranges[v] = [-2, 2]
return linear_ranges | def _check_ising_linear_ranges(linear_ranges, graph) | check correctness/populate defaults for ising_linear_ranges. | 3.370876 | 2.940115 | 1.146512 |
if quad_ranges is None:
quad_ranges = {}
# first just populate the top level so we can rely on the structure
for u in graph:
if u not in quad_ranges:
quad_ranges[u] = {}
# next let's propgate and check what is already present
for u, neighbors in iteritems(quad_ranges):
for v, rang in iteritems(neighbors):
# check the range
rang = Specification._check_range(rang)
if u in quad_ranges[v]:
# it's symmetric
if quad_ranges[u][v] != quad_ranges[v][u]:
raise ValueError("mismatched ranges for ising_quadratic_ranges")
quad_ranges[v][u] = quad_ranges[u][v] = rang
# finally fill in the missing stuff
for u, v in graph.edges:
if u not in quad_ranges[v]:
quad_ranges[u][v] = quad_ranges[v][u] = [-1, 1]
return quad_ranges | def _check_ising_quadratic_ranges(quad_ranges, graph) | check correctness/populate defaults for ising_quadratic_ranges. | 3.997959 | 3.670133 | 1.089323 |
try:
if not isinstance(range_, list):
range_ = list(range_)
min_, max_ = range_
except (ValueError, TypeError):
raise TypeError("each range in ising_linear_ranges should be a list of length 2.")
if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:
raise ValueError(("each range in ising_linear_ranges should be a 2-tuple "
"(min, max) where min <= max"))
return range_ | def _check_range(range_) | Check that a range is in the format we expect [min, max] and return | 3.548256 | 3.478994 | 1.019909 |
graph = self.graph
ising_linear_ranges = self.ising_linear_ranges
ising_quadratic_ranges = self.ising_quadratic_ranges
try:
old_labels = set(iterkeys(mapping))
new_labels = set(itervalues(mapping))
except TypeError:
raise ValueError("mapping targets must be hashable objects")
for v in new_labels:
if v in graph and v not in old_labels:
raise ValueError(('A variable cannot be relabeled "{}" without also relabeling '
"the existing variable of the same name").format(v))
if not inplace:
return Specification(nx.relabel_nodes(graph, mapping, copy=True), # also checks the mapping
tuple(mapping.get(v, v) for v in self.decision_variables),
self.feasible_configurations, # does not change
vartype=self.vartype, # does not change
ising_linear_ranges={mapping.get(v, v): ising_linear_ranges[v] for v in graph},
ising_quadratic_ranges={mapping.get(v, v): {mapping.get(u, u): r
for u, r in iteritems(neighbors)}
for v, neighbors in iteritems(ising_quadratic_ranges)})
else:
# now we need the ising_linear_ranges and ising_quadratic_ranges
shared = old_labels & new_labels
if shared:
# in this case we need to transform to an intermediate state
# counter will be used to generate the intermediate labels, as an easy optimization
# we start the counter with a high number because often variables are labeled by
# integers starting from 0
counter = itertools.count(2 * len(self))
old_to_intermediate = {}
intermediate_to_new = {}
for old, new in iteritems(mapping):
if old == new:
# we can remove self-labels
continue
if old in new_labels or new in old_labels:
# try to get a new unique label
lbl = next(counter)
while lbl in new_labels or lbl in old_labels:
lbl = next(counter)
# add it to the mapping
old_to_intermediate[old] = lbl
intermediate_to_new[lbl] = new
else:
old_to_intermediate[old] = new
# don't need to add it to intermediate_to_new because it is a self-label
Specification.relabel_variables(self, old_to_intermediate, inplace=True)
Specification.relabel_variables(self, intermediate_to_new, inplace=True)
return self
# modifies graph in place
nx.relabel_nodes(self.graph, mapping, copy=False)
# this is always a new object
self.decision_variables = tuple(mapping.get(v, v) for v in self.decision_variables)
# we can just relabel in-place without worrying about conflict
for v in old_labels:
if v in mapping:
ising_linear_ranges[mapping[v]] = ising_linear_ranges[v]
del ising_linear_ranges[v]
# need to do the deeper level first
for neighbors in itervalues(ising_quadratic_ranges):
for v in list(neighbors):
if v in mapping:
neighbors[mapping[v]] = neighbors[v]
del neighbors[v]
# now the top level
for v in old_labels:
if v in mapping:
ising_quadratic_ranges[mapping[v]] = ising_quadratic_ranges[v]
del ising_quadratic_ranges[v]
return self | def relabel_variables(self, mapping, inplace=True) | Relabel the variables and nodes according to the given mapping.
Args:
mapping (dict): a dict mapping the current variable/node labels
to new ones.
inplace (bool, optional, default=True):
If True, the specification is updated in-place; otherwise, a new specification
is returned.
Returns:
:class:`.Specification`: A Specification with the variables
relabeled according to mapping. If copy=False returns itself,
if copy=True returns a new Specification. | 3.282101 | 3.233424 | 1.015054 |
u, v = tuple_key
# Grab tuple-values from dictionary
tuple1 = dictionary.get((u, v), None)
tuple2 = dictionary.get((v, u), None)
# Return the first value that is not {None, 0, False}
return tuple1 or tuple2 or default_value | def get_item(dictionary, tuple_key, default_value) | Grab values from a dictionary using an unordered tuple as a key.
Dictionary should not contain None, 0, or False as dictionary values.
Args:
dictionary: Dictionary that uses two-element tuple as keys
tuple_key: Unordered tuple of two elements
default_value: Value that is returned when the tuple_key is not found in the dictionary | 5.182054 | 5.25899 | 0.985371 |
if len(spin_states) == 0:
return None
# Set up an empty matrix
n_states = len(spin_states)
m_linear = len(nodes)
m_quadratic = len(edges)
matrix = np.empty((n_states, m_linear + m_quadratic + 2)) # +2 columns for offset and gap
# Populate linear terms (i.e. spin states)
if spin_states.ndim == 1:
spin_states = np.expand_dims(spin_states, 1)
matrix[:, :m_linear] = spin_states
# Populate quadratic terms
node_indices = dict(zip(nodes, range(m_linear)))
for j, (u, v) in enumerate(edges):
u_ind = node_indices[u]
v_ind = node_indices[v]
matrix[:, j + m_linear] = np.multiply(matrix[:, u_ind], matrix[:, v_ind])
# Populate offset and gap columns, respectively
matrix[:, -2] = offset_weight
matrix[:, -1] = gap_weight
return matrix | def _get_lp_matrix(spin_states, nodes, edges, offset_weight, gap_weight) | Creates an linear programming matrix based on the spin states, graph, and scalars provided.
LP matrix:
[spin_states, corresponding states of edges, offset_weight, gap_weight]
Args:
spin_states: Numpy array of spin states
nodes: Iterable
edges: Iterable of tuples
offset_weight: Numpy 1-D array or number
gap_weight: Numpy 1-D array or a number | 2.479795 | 2.434353 | 1.018667 |
f = Fraction(x).limit_denominator(max_denominator)
return Real((f.numerator, f.denominator)) | def limitReal(x, max_denominator=1000000) | Creates an pysmt Real constant from x.
Args:
x (number): A number to be cast to a pysmt constant.
max_denominator (int, optional): The maximum size of the denominator.
Default 1000000.
Returns:
A Real constant with the given value and the denominator limited. | 4.124501 | 5.176832 | 0.796723 |
get_env().enable_infix_notation = True # not sure why we need this here
theta = cls.empty(dimod.SPIN)
theta.add_offset(Symbol('offset', REAL))
def Linear(v):
bias = Symbol('h_{}'.format(v), REAL)
min_, max_ = linear_energy_ranges[v]
theta.assertions.add(LE(bias, limitReal(max_)))
theta.assertions.add(GE(bias, limitReal(min_)))
return bias
def Quadratic(u, v):
bias = Symbol('J_{},{}'.format(u, v), REAL)
if (v, u) in quadratic_energy_ranges:
min_, max_ = quadratic_energy_ranges[(v, u)]
else:
min_, max_ = quadratic_energy_ranges[(u, v)]
theta.assertions.add(LE(bias, limitReal(max_)))
theta.assertions.add(GE(bias, limitReal(min_)))
return bias
for v in graph.nodes:
theta.add_variable(v, Linear(v))
for u, v in graph.edges:
theta.add_interaction(u, v, Quadratic(u, v))
return theta | def from_graph(cls, graph, linear_energy_ranges, quadratic_energy_ranges) | Create Theta from a graph and energy ranges.
Args:
graph (:obj:`networkx.Graph`):
Provides the structure for Theta.
linear_energy_ranges (dict):
A dict of the form {v: (min, max), ...} where min and max are the
range of values allowed to v.
quadratic_energy_ranges (dict):
A dict of the form {(u, v): (min, max), ...} where min and max
are the range of values allowed to (u, v).
Returns:
:obj:`.Theta` | 3.168919 | 2.991648 | 1.059255 |
linear = ((v, float(model.get_py_value(bias)))
for v, bias in self.linear.items())
quadratic = ((u, v, float(model.get_py_value(bias)))
for (u, v), bias in self.quadratic.items())
offset = float(model.get_py_value(self.offset))
return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN) | def to_bqm(self, model) | Given a pysmt model, return a bqm.
Adds the values of the biases as determined by the SMT solver to a bqm.
Args:
model: A pysmt model.
Returns:
:obj:`dimod.BinaryQuadraticModel` | 2.534556 | 2.525173 | 1.003716 |
if not isinstance(spin, int):
raise TypeError('spin must be an int')
if spin == -1:
return Times(Real((-1, 1)), bias) # -1 / 1
elif spin == 1:
# identity
return bias
else:
raise ValueError('expected spins to be -1., or 1.') | def SpinTimes(spin, bias) | Define our own multiplication for bias times spins. This allows for
cleaner log code as well as value checking.
Args:
spin (int): -1 or 1
bias (:class:`pysmt.shortcuts.Symbol`): The bias
Returns:
spins * bias | 6.115642 | 6.082258 | 1.005489 |
# auxiliary variables are any variables that are not decision
auxiliary_variables = set(n for n in theta.linear if n not in decision_variables)
# get the adjacency of the auxiliary subgraph
adj = {v: {u for u in theta.adj[v] if u in auxiliary_variables}
for v in theta.adj if v in auxiliary_variables}
# get the elimination order that minimizes treewidth
tw, order = dnx.treewidth_branch_and_bound(adj)
ancestors = {}
for n in order:
ancestors[n] = set(adj[n])
# now make v simplicial by making its neighborhood a clique, then
# continue
neighbors = adj[n]
for u, v in itertools.combinations(neighbors, 2):
adj[u].add(v)
adj[v].add(u)
for v in neighbors:
adj[v].discard(n)
del adj[n]
roots = {}
nodes = {v: {} for v in ancestors}
for vidx in range(len(order) - 1, -1, -1):
v = order[vidx]
if ancestors[v]:
for u in order[vidx + 1:]:
if u in ancestors[v]:
# v is a child of u
nodes[u][v] = nodes[v] # nodes[u][v] = children of v
break
else:
roots[v] = nodes[v] # roots[v] = children of v
return roots, ancestors | def _elimination_trees(theta, decision_variables) | From Theta and the decision variables, determine the elimination order and the induced
trees. | 3.888564 | 3.879246 | 1.002402 |
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# ok, let's start eliminating variables
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)
return energy | def energy_upperbound(self, spins) | A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed. | 9.924371 | 10.219156 | 0.971154 |
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# we need aux variables
av = next(self._auxvar_counter)
auxvars = {v: Symbol('aux{}_{}'.format(av, v), BOOL) for v in subtheta.linear}
if break_aux_symmetry and av == 0:
# without loss of generality, we can assume that the aux variables are all
# spin-up for one configuration
self.assertions.update(set(auxvars.values()))
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message(trees, {}, subtheta, auxvars), subtheta.offset)
return energy | def energy(self, spins, break_aux_symmetry=True) | A formula for the exact energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
break_aux_symmetry (bool, optional): Default True. If True, break
the aux variable symmetry by setting all aux variable to 1
for one of the feasible configurations. If the energy ranges
are not symmetric then this can make finding models impossible.
Returns:
Formula for the exact energy of Theta with spins fixed. | 8.533199 | 8.096422 | 1.053947 |
energy_sources = set()
for v, children in tree.items():
aux = auxvars[v]
assert all(u in spins for u in self._ancestors[v])
# build an iterable over all of the energies contributions
# that we can exactly determine given v and our known spins
# in these contributions we assume that v is positive
def energy_contributions():
yield subtheta.linear[v]
for u, bias in subtheta.adj[v].items():
if u in spins:
yield SpinTimes(spins[u], bias)
plus_energy = Plus(energy_contributions())
minus_energy = SpinTimes(-1, plus_energy)
# if the variable has children, we need to recursively determine their energies
if children:
# set v to be positive
spins[v] = 1
plus_energy = Plus(plus_energy, self.message(children, spins, subtheta, auxvars))
spins[v] = -1
minus_energy = Plus(minus_energy, self.message(children, spins, subtheta, auxvars))
del spins[v]
# we now need a real-valued smt variable to be our message
m = FreshSymbol(REAL)
ancestor_aux = {auxvars[u] if spins[u] > 0 else Not(auxvars[u])
for u in self._ancestors[v]}
plus_aux = And({aux}.union(ancestor_aux))
minus_aux = And({Not(aux)}.union(ancestor_aux))
self.assertions.update({LE(m, plus_energy),
LE(m, minus_energy),
Implies(plus_aux, GE(m, plus_energy)),
Implies(minus_aux, GE(m, minus_energy))
})
energy_sources.add(m)
return Plus(energy_sources) | def message(self, tree, spins, subtheta, auxvars) | Determine the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
auxvars (dict): The auxiliary variables for the given spins.
Returns:
The formula for the energy of the tree. | 4.70705 | 4.76087 | 0.988695 |
energy_sources = set()
for v, subtree in tree.items():
assert all(u in spins for u in self._ancestors[v])
# build an iterable over all of the energies contributions
# that we can exactly determine given v and our known spins
# in these contributions we assume that v is positive
def energy_contributions():
yield subtheta.linear[v]
for u, bias in subtheta.adj[v].items():
if u in spins:
yield Times(limitReal(spins[u]), bias)
energy = Plus(energy_contributions())
# if there are no more variables in the order, we can stop
# otherwise we need the next message variable
if subtree:
spins[v] = 1.
plus = self.message_upperbound(subtree, spins, subtheta)
spins[v] = -1.
minus = self.message_upperbound(subtree, spins, subtheta)
del spins[v]
else:
plus = minus = limitReal(0.0)
# we now need a real-valued smt variable to be our message
m = FreshSymbol(REAL)
self.assertions.update({LE(m, Plus(energy, plus)),
LE(m, Plus(Times(energy, limitReal(-1.)), minus))})
energy_sources.add(m)
return Plus(energy_sources) | def message_upperbound(self, tree, spins, subtheta) | Determine an upper bound on the energy of the elimination tree.
Args:
tree (dict): The current elimination tree
spins (dict): The current fixed spins
subtheta (dict): Theta with spins fixed.
Returns:
The formula for the energy of the tree. | 7.189621 | 7.455553 | 0.964331 |
spin_energy = self.energy(spins)
self.assertions.add(Equals(spin_energy, limitReal(target_energy))) | def set_energy(self, spins, target_energy) | Set the energy of Theta with spins fixed to target_energy.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
target_energy (float): The desired energy for Theta with spins fixed.
Notes:
Add equality constraint to assertions. | 12.740374 | 14.313787 | 0.890077 |
spin_energy = self.energy_upperbound(spins)
self.assertions.add(GE(spin_energy, self.gap + offset)) | def set_energy_upperbound(self, spins, offset=0) | Upper bound the energy of Theta with spins fixed to be greater than (gap + offset).
Args:
spins (dict): Spin values for a subset of the variables in Theta.
offset (float): A value that is added to the upper bound. Default value is 0.
Notes:
Add equality constraint to assertions. | 9.750901 | 7.987005 | 1.220846 |
next_poll = time.time()
while True:
next_poll += self._poll_period
timeout = next_poll - time.time()
if timeout < 0:
timeout = 0
try:
return self._stop_queue.get(timeout=timeout)
except TimeoutError:
# No stop, no problem
pass
try:
self.handle_changes(self.client.get_changes())
except Exception:
# TODO: should fault here?
self.log.exception("Error while getting changes") | def _poll_loop(self) | At self.poll_period poll for changes | 4.318529 | 3.96327 | 1.089638 |
split = re.findall(r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)", name)
ret = " ".join(split)
ret = ret[0].upper() + ret[1:]
return ret | def camel_to_title(name) | Takes a camelCaseFieldName and returns an Title Case Field Name
Args:
name (str): E.g. camelCaseFieldName
Returns:
str: Title Case converted name. E.g. Camel Case Field Name | 2.376084 | 2.773267 | 0.856782 |
ret = "".join(x.title() for x in name.split("_"))
ret = ret[0].lower() + ret[1:]
return ret | def snake_to_camel(name) | Takes a snake_field_name and returns a camelCaseFieldName
Args:
name (str): E.g. snake_field_name or SNAKE_FIELD_NAME
Returns:
str: camelCase converted name. E.g. capsFieldName | 2.438247 | 3.712107 | 0.656836 |
# type: () -> OrderedDict
d = OrderedDict()
if self.typeid:
d["typeid"] = self.typeid
for k in self.call_types:
# check_camel_case(k)
d[k] = serialize_object(getattr(self, k))
return d | def to_dict(self) | Create a dictionary representation of object attributes
Returns:
OrderedDict serialised version of self | 7.13584 | 7.88259 | 0.905266 |
filtered = {}
for k, v in d.items():
if k == "typeid":
assert v == cls.typeid, \
"Dict has typeid %s but %s has typeid %s" % \
(v, cls, cls.typeid)
elif k not in ignore:
filtered[k] = v
try:
inst = cls(**filtered)
except TypeError as e:
raise TypeError("%s raised error: %s" % (cls.typeid, str(e)))
return inst | def from_dict(cls, d, ignore=()) | Create an instance from a serialized version of cls
Args:
d(dict): Endpoints of cls to set
ignore(tuple): Keys to ignore
Returns:
Instance of this class | 3.085797 | 3.257829 | 0.947194 |
def decorator(subclass):
cls._subcls_lookup[typeid] = subclass
subclass.typeid = typeid
return subclass
return decorator | def register_subclass(cls, typeid) | Register a subclass so from_dict() works
Args:
typeid (str): Type identifier for subclass | 4.323792 | 5.147547 | 0.839971 |
try:
typeid = d["typeid"]
except KeyError:
raise FieldError("typeid not present in keys %s" % list(d))
subclass = cls._subcls_lookup.get(typeid, None)
if not subclass:
raise FieldError("'%s' not a valid typeid" % typeid)
else:
return subclass | def lookup_subclass(cls, d) | Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass | 4.312296 | 4.120986 | 1.046423 |
assert self.state == STOPPED, "Process already started"
self.state = STARTING
should_publish = self._start_controllers(
self._controllers.values(), timeout)
if should_publish:
self._publish_controllers(timeout)
self.state = STARTED | def start(self, timeout=None) | Start the process going
Args:
timeout (float): Maximum amount of time to wait for each spawned
process. None means forever | 5.137843 | 5.474681 | 0.938473 |
assert self.state == STARTED, "Process not started"
self.state = STOPPING
# Allow every controller a chance to clean up
self._run_hook(ProcessStopHook, timeout=timeout)
for s in self._spawned:
if not s.ready():
self.log.debug(
"Waiting for %s *%s **%s", s._function, s._args, s._kwargs)
s.wait(timeout=timeout)
self._spawned = []
self._controllers = OrderedDict()
self._unpublished = set()
self.state = STOPPED
self.log.debug("Done process.stop()") | def stop(self, timeout=None) | Stop the process and wait for it to finish
Args:
timeout (float): Maximum amount of time to wait for each spawned
object. None means forever | 6.127587 | 5.848678 | 1.047687 |
# type: (Callable[..., Any], *Any, **Any) -> Spawned
assert self.state != STOPPED, "Can't spawn when process stopped"
spawned = Spawned(function, args, kwargs)
self._spawned.append(spawned)
self._spawn_count += 1
# Filter out things that are ready to avoid memory leaks
if self._spawn_count > SPAWN_CLEAR_COUNT:
self._clear_spawn_list()
return spawned | def spawn(self, function, *args, **kwargs) | Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing | 4.530037 | 6.106475 | 0.741842 |
# type: (Controller, float) -> None
assert controller.mri not in self._controllers, \
"Controller already exists for %s" % controller.mri
self._controllers[controller.mri] = controller
controller.setup(self)
if self.state:
should_publish = self._start_controllers([controller], timeout)
if self.state == STARTED and should_publish:
self._publish_controllers(timeout) | def add_controller(self, controller, timeout=None) | Add a controller to be hosted by this process
Args:
controller (Controller): Its controller
timeout (float): Maximum amount of time to wait for each spawned
object. None means forever | 4.108785 | 5.363044 | 0.766129 |
# type: (str) -> Block
controller = self.get_controller(mri)
block = controller.block_view()
return block | def block_view(self, mri) | Get a Block view from a Controller with given mri | 5.674813 | 4.73207 | 1.199224 |
# type: (object, TitleInfo) -> None
with self._lock:
self._block.meta.set_label(info.title) | def update_title(self, _, info) | Set the label of the Block Meta object | 11.51555 | 6.573506 | 1.751812 |
# type: (object, HealthInfo) -> None
with self.changes_squashed:
alarm = info.alarm
if alarm.is_ok():
self._faults.pop(reporter, None)
else:
self._faults[reporter] = alarm
if self._faults:
# Sort them by severity
faults = sorted(self._faults.values(),
key=lambda a: a.severity.value)
alarm = faults[-1]
text = faults[-1].message
else:
alarm = None
text = "OK"
self.health.set_value(text, alarm=alarm) | def update_health(self, reporter, info) | Set the health attribute. Called from part | 3.837577 | 3.895908 | 0.985028 |
typ = field_data.field_type
subtyp = field_data.field_subtype
if typ in ("read", "xadc"):
writeable = False
else:
writeable = True
if typ == "time" or typ in ("param", "read") and subtyp == "time":
self._make_time_parts(field_name, field_data, writeable)
elif typ == "write" and subtyp == "action":
self._make_action_part(field_name, field_data)
elif typ in ("param", "read", "write", "xadc"):
self._make_param_part(field_name, field_data, writeable)
elif typ == "bit_out":
self._make_out(field_name, field_data, "bit")
elif typ == "pos_out":
self._make_out(field_name, field_data, "pos")
self._make_scale_offset(field_name)
self._make_out_capture(field_name, field_data)
elif typ == "ext_out":
self._make_out_capture(field_name, field_data)
elif typ == "bit_mux":
self._make_mux(field_name, field_data, "bit")
self._make_mux_delay(field_name)
elif typ == "pos_mux":
self._make_mux(field_name, field_data, "pos")
elif typ == "table":
self._make_table(field_name, field_data)
else:
raise ValueError("Unknown type %r subtype %r" % (typ, subtyp)) | def make_parts_for(self, field_name, field_data) | Create the relevant parts for this field
Args:
field_name (str): Short field name, e.g. VAL
field_data (FieldData): Field data object | 2.429404 | 2.431079 | 0.999311 |
# type: (str) -> Block
controller = self.get_controller(mri)
block = controller.block_view(weakref.proxy(self))
return block | def block_view(self, mri) | Get a view of a block
Args:
mri: The mri of the controller hosting the block
Returns:
Block: The block we control | 5.65543 | 6.821202 | 0.829096 |
self._notify_dispatch_request = notify_dispatch_request
self._notify_args = args | def set_notify_dispatch_request(self, notify_dispatch_request, *args) | Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched | 3.601123 | 4.82708 | 0.746025 |
self._sentinel_stop = object()
self._q.put(self._sentinel_stop) | def ignore_stops_before_now(self) | Ignore any stops received before this point | 12.767083 | 11.34524 | 1.125325 |
future = self.put_async(path, value)
self.wait_all_futures(
future, timeout=timeout, event_timeout=event_timeout)
return future.result() | def put(self, path, value, timeout=None, event_timeout=None) | Puts a value to a path and returns when it completes
Args:
path (list): The path to put to
value (object): The value to set
timeout (float): time in seconds to wait for responses, wait forever
if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None
Returns:
The value after the put completes | 3.612307 | 4.314757 | 0.837198 |
request = Put(self._get_next_id(), path, value)
request.set_callback(self._q.put)
future = self._dispatch_request(request)
return future | def put_async(self, path, value) | Puts a value to a path and returns immediately
Args:
path (list): The path to put to
value (object): The value to set
Returns:
Future: A single Future which will resolve to the result | 6.179734 | 7.445768 | 0.829966 |
future = self.post_async(path, params)
self.wait_all_futures(
future, timeout=timeout, event_timeout=event_timeout)
return future.result() | def post(self, path, params=None, timeout=None, event_timeout=None) | Synchronously calls a method
Args:
path (list): The path to post to
params (dict): parameters for the call
timeout (float): time in seconds to wait for responses, wait
forever if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None
Returns:
the result from 'method' | 3.805827 | 4.446843 | 0.855849 |
request = Post(self._get_next_id(), path, params)
request.set_callback(self._q.put)
future = self._dispatch_request(request)
return future | def post_async(self, path, params=None) | Asynchronously calls a function on a child block
Args:
path (list): The path to post to
params (dict): parameters for the call
Returns:
Future: as single Future that will resolve to the result | 6.723466 | 8.149645 | 0.825001 |
request = Subscribe(self._get_next_id(), path, delta=False)
request.set_callback(self._q.put)
# If self is in args, then make weak version of it
saved_args = []
for arg in args:
if arg is self:
saved_args.append(weakref.proxy(self))
else:
saved_args.append(arg)
self._subscriptions[request.id] = (callback, saved_args)
future = self._dispatch_request(request)
return future | def subscribe(self, path, callback, *args) | Subscribe to changes in a given attribute and call
``callback(future, value, *args)`` when it changes
Returns:
Future: A single Future which will resolve to the result | 4.617228 | 5.401759 | 0.854764 |
assert future not in self._pending_unsubscribes, \
"%r has already been unsubscribed from" % \
self._pending_unsubscribes[future]
subscribe = self._requests[future]
self._pending_unsubscribes[future] = subscribe
# Clear out the subscription
self._subscriptions.pop(subscribe.id)
request = Unsubscribe(subscribe.id)
request.set_callback(self._q.put)
try:
controller = self.get_controller(subscribe.path[0])
except ValueError:
# Controller has already gone, probably during tearDown
pass
else:
self.handle_request(controller, request) | def unsubscribe(self, future) | Terminates the subscription given by a future
Args:
future (Future): The future of the original subscription | 4.724767 | 5.027442 | 0.939795 |
futures = ((f, r) for f, r in self._requests.items()
if isinstance(r, Subscribe)
and f not in self._pending_unsubscribes)
if futures:
for future, request in futures:
if callback:
log.warn("Unsubscribing from %s", request.path)
cothread.Callback(self.unsubscribe, future)
else:
self.unsubscribe(future) | def unsubscribe_all(self, callback=False) | Send an unsubscribe for all active subscriptions | 4.523258 | 4.547796 | 0.994604 |
future = self.when_matches_async(path, good_value, bad_values)
self.wait_all_futures(
future, timeout=timeout, event_timeout=event_timeout) | def when_matches(self, path, good_value, bad_values=None, timeout=None,
event_timeout=None) | Resolve when an path value equals value
Args:
path (list): The path to wait to
good_value (object): the value to wait for
bad_values (list): values to raise an error on
timeout (float): time in seconds to wait for responses, wait
forever if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None | 3.539281 | 3.787581 | 0.934443 |
when = When(good_value, bad_values)
future = self.subscribe(path, when)
when.set_future_context(future, weakref.proxy(self))
return future | def when_matches_async(self, path, good_value, bad_values=None) | Wait for an attribute to become a given value
Args:
path (list): The path to wait to
good_value: If it is a callable then expect it to return
True if we are satisfied and raise on error. If it is not
callable then compare each value against this one and return
if it matches.
bad_values (list): values to raise an error on
Returns:
Future: a single Future that will resolve when the path matches
good_value or bad_values | 6.957849 | 9.669888 | 0.719538 |
# type: (Union[List[Future], Future, None], float, float) -> None
if timeout is None:
end = None
else:
end = time.time() + timeout
if not isinstance(futures, list):
if futures:
futures = [futures]
else:
futures = []
filtered_futures = []
for f in futures:
if f.done():
if f.exception() is not None:
raise f.exception()
else:
filtered_futures.append(f)
while filtered_futures:
if event_timeout is not None:
until = time.time() + event_timeout
if end is not None:
until = min(until, end)
else:
until = end
self._service_futures(filtered_futures, until) | def wait_all_futures(self, futures, timeout=None, event_timeout=None) | Services all futures until the list 'futures' are all done
then returns. Calls relevant subscription callbacks as they
come off the queue and raises an exception on abort
Args:
futures: a `Future` or list of all futures that the caller
wants to wait for
timeout: maximum total time in seconds to wait for responses, wait
forever if None
event_timeout: maximum time in seconds to wait between each response
event, wait forever if None | 2.384013 | 2.590415 | 0.920321 |
until = time.time() + seconds
try:
while True:
self._service_futures([], until)
except TimeoutError:
return | def sleep(self, seconds) | Services all futures while waiting
Args:
seconds (float): Time to wait | 12.573251 | 11.128187 | 1.129856 |
if until is None:
timeout = None
else:
timeout = until - time.time()
if timeout < 0:
timeout = 0
try:
response = self._q.get(timeout)
except TimeoutError:
raise TimeoutError(
"Timeout waiting for %s" % self._describe_futures(futures))
if response is self._sentinel_stop:
self._sentinel_stop = None
elif response is self.STOP:
if self._sentinel_stop is None:
# This is a stop we should listen to...
raise AbortedError(
"Aborted waiting for %s" % self._describe_futures(futures))
elif isinstance(response, Update):
# This is an update for a subscription
if response.id in self._subscriptions:
func, args = self._subscriptions[response.id]
func(response.value, *args)
elif isinstance(response, Return):
future = self._futures.pop(response.id)
del self._requests[future]
self._pending_unsubscribes.pop(future, None)
result = response.value
future.set_result(result)
try:
futures.remove(future)
except ValueError:
pass
elif isinstance(response, Error):
future = self._futures.pop(response.id)
del self._requests[future]
future.set_exception(response.message)
try:
futures.remove(future)
except ValueError:
pass
else:
raise response.message | def _service_futures(self, futures, until=None) | Args:
futures (list): The futures to service
until (float): Timestamp to wait until | 2.87859 | 2.889136 | 0.99635 |
# type: (float) -> T
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result | def get(self, timeout=None) | Return the result or raise the error the function has produced | 4.475245 | 4.2377 | 1.056055 |
VERSION_FILE = os.path.join(module_name, 'version.py')
txt = open(VERSION_FILE).read()
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', txt, re.M)
if mo:
version = mo.group(1)
bs_version = os.environ.get('MODULEVER', '0.0')
assert bs_version == "0.0" or bs_version == version, \
"Version {} specified by the build system doesn't match {} in " \
"version.py".format(bs_version, version)
return version
else:
raise RuntimeError('Unable to find version string in {0}.'
.format(VERSION_FILE)) | def get_version() | Extracts the version number from the version.py file. | 2.880871 | 2.801808 | 1.028219 |
# type: (Part, ConfigureParamsInfo) -> None
with self.changes_squashed:
# Update the dict
if part:
self.part_configure_params[part] = info
# No process yet, so don't do this yet
if self.process is None:
return
# Get the model of our configure method as the starting point
configure_model = MethodModel.from_callable(self.configure)
# These will not be inserted as the already exist
ignored = tuple(ConfigureHook.call_types)
# Re-calculate the following
required = []
takes_elements = OrderedDict()
defaults = OrderedDict()
# First do the required arguments
for k in configure_model.takes.required:
required.append(k)
takes_elements[k] = configure_model.takes.elements[k]
for part in self.parts.values():
try:
info = self.part_configure_params[part]
except KeyError:
continue
for k in info.required:
if k not in required and k not in ignored:
required.append(k)
takes_elements[k] = info.metas[k]
# Now the default and optional
for k in configure_model.takes.elements:
if k not in required:
takes_elements[k] = configure_model.takes.elements[k]
for part in self.parts.values():
try:
info = self.part_configure_params[part]
except KeyError:
continue
for k in info.metas:
if k not in required and k not in ignored:
takes_elements[k] = info.metas[k]
if k in info.defaults:
defaults[k] = info.defaults[k]
# Set the values
configure_model.takes.set_elements(takes_elements)
configure_model.takes.set_required(required)
configure_model.set_defaults(defaults)
# Update methods from the new metas
self._block.configure.set_takes(configure_model.takes)
self._block.configure.set_defaults(configure_model.defaults)
# Now make a validate model with returns
validate_model = MethodModel.from_dict(configure_model.to_dict())
returns = MapMeta.from_dict(validate_model.takes.to_dict())
for v in returns.elements.values():
v.set_writeable(False)
self._block.validate.set_takes(validate_model.takes)
self._block.validate.set_defaults(validate_model.defaults)
self._block.validate.set_returns(returns) | def update_configure_params(self, part=None, info=None) | Tell controller part needs different things passed to Configure | 3.147335 | 3.17899 | 0.990042 |
# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams
iterations = 10
# We will return this, so make sure we fill in defaults
for k, default in self._block.configure.defaults.items():
if k not in kwargs:
kwargs[k] = default
# The validated parameters we will eventually return
params = ConfigureParams(generator, axesToMove, **kwargs)
# Make some tasks just for validate
part_contexts = self.create_part_contexts()
# Get any status from all parts
status_part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in part_contexts.items())
while iterations > 0:
# Try up to 10 times to get a valid set of parameters
iterations -= 1
# Validate the params with all the parts
validate_part_info = self.run_hooks(
ValidateHook(p, c, status_part_info, **kwargs)
for p, c, kwargs in self._part_params(part_contexts, params))
tweaks = ParameterTweakInfo.filter_values(validate_part_info)
if tweaks:
for tweak in tweaks:
deserialized = self._block.configure.takes.elements[
tweak.parameter].validate(tweak.value)
setattr(params, tweak.parameter, deserialized)
self.log.debug(
"Tweaking %s to %s", tweak.parameter, deserialized)
else:
# Consistent set, just return the params
return params
raise ValueError("Could not get a consistent set of parameters") | def validate(self, generator, axesToMove=None, **kwargs) | Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state | 5.669981 | 5.618587 | 1.009147 |
# type: (AGenerator, AAxesToMove, **Any) -> None
params = self.validate(generator, axesToMove, **kwargs)
try:
self.transition(ss.CONFIGURING)
self.do_configure(params)
self.abortable_transition(ss.ARMED)
except AbortedError:
self.abort_queue.put(None)
raise
except Exception as e:
self.go_to_error_state(e)
raise | def configure(self, generator, axesToMove=None, **kwargs) | Validate the params then configure the device ready for run().
Try to prepare the device as much as possible so that run() is quick to
start, this may involve potentially long running activities like moving
motors.
Normally it will return in Armed state. If the user aborts then it will
return in Aborted state. If something goes wrong it will return in Fault
state. If the user disables then it will return in Disabled state. | 4.973125 | 5.603237 | 0.887545 |
# type: () -> None
if self.configured_steps.value < self.total_steps.value:
next_state = ss.ARMED
else:
next_state = ss.READY
try:
self.transition(ss.RUNNING)
hook = RunHook
going = True
while going:
try:
self.do_run(hook)
except AbortedError:
self.abort_queue.put(None)
# Wait for a response on the resume_queue
should_resume = self.resume_queue.get()
if should_resume:
# we need to resume
hook = ResumeHook
self.log.debug("Resuming run")
else:
# we don't need to resume, just drop out
raise
else:
going = False
self.abortable_transition(next_state)
except AbortedError:
raise
except Exception as e:
self.go_to_error_state(e)
raise | def run(self) | Run a device where configure() has already be called
Normally it will return in Ready state. If setup for multiple-runs with
a single configure() then it will return in Armed state. If the user
aborts then it will return in Aborted state. If something goes wrong it
will return in Fault state. If the user disables then it will return in
Disabled state. | 5.124503 | 5.010587 | 1.022735 |
# type: () -> None
# Tell _call_do_run not to resume
if self.resume_queue:
self.resume_queue.put(False)
self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort) | def abort(self) | Abort the current operation and block until aborted
Normally it will return in Aborted state. If something goes wrong it
will return in Fault state. If the user disables then it will return in
Disabled state. | 11.556921 | 15.506559 | 0.745292 |
# type: (ALastGoodStep) -> None
current_state = self.state.value
if lastGoodStep <= 0:
last_good_step = self.completed_steps.value
else:
last_good_step = lastGoodStep
if current_state == ss.RUNNING:
next_state = ss.PAUSED
else:
next_state = current_state
assert last_good_step < self.total_steps.value, \
"Cannot seek to after the end of the scan"
self.try_aborting_function(
ss.SEEKING, next_state, self.do_pause, last_good_step) | def pause(self, lastGoodStep=0) | Pause a run() so that resume() can be called later, or seek within
an Armed or Paused state.
The original call to run() will not be interrupted by pause(), it will
wait until the scan completes or is aborted.
Normally it will return in Paused state. If the user aborts then it will
return in Aborted state. If something goes wrong it will return in Fault
state. If the user disables then it will return in Disabled state. | 5.003314 | 5.095275 | 0.981952 |
# type: () -> None
self.transition(ss.RUNNING)
self.resume_queue.put(True) | def resume(self) | Resume a paused scan.
Normally it will return in Running state. If something goes wrong it
will return in Fault state. | 12.276322 | 18.004383 | 0.681852 |
# type: (Callable) -> ConfigureParamsInfo
call_types = getattr(configure_func, "call_types",
{}) # type: Dict[str, Anno]
metas = OrderedDict()
required = []
defaults = OrderedDict()
for k, anno in call_types.items():
if k not in cls.call_types:
scls = VMeta.lookup_annotype_converter(anno)
metas[k] = scls.from_annotype(anno, writeable=True)
if anno.default is NO_DEFAULT:
required.append(k)
elif anno.default is not None:
defaults[k] = anno.default
return ConfigureParamsInfo(metas, required, defaults) | def create_info(cls, configure_func) | Create a `ConfigureParamsInfo` describing the extra parameters
that should be passed at configure | 4.72521 | 4.440326 | 1.064158 |
# type: (Type[Hook], Callable, Optional[Callable]) -> None
if self.hooked is None:
self.hooked = {}
if args_gen is None:
args_gen = getattr(func, "call_types", {}).keys
if not isinstance(hooks, Sequence):
hooks = [hooks]
for hook_cls in hooks:
self.hooked[hook_cls] = (func, args_gen) | def register_hooked(self,
hooks, # type: Union[Type[Hook], Sequence[Type[Hook]]]
func, # type: Hooked
args_gen=None # type: Optional[ArgsGen]
) | Register func to be run when any of the hooks are run by parent
Args:
hooks: A Hook class or list of Hook classes of interest
func: The callable that should be run on that Hook
args_gen: Optionally specify the argument names that should be
passed to func. If not given then use func.call_types.keys | 3.035137 | 2.845665 | 1.066583 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.