_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q19000
OrderFulfillmentRecipient.display_name
train
def display_name(self, display_name): """ Sets the display_name of this OrderFulfillmentRecipient. The display name of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id. :param display_name: The display_name of this OrderFulfillmentRecipient. :type: str """ if display_name is None: raise ValueError("Invalid value for `display_name`, must not be `None`") if len(display_name) > 255: raise ValueError("Invalid value for `display_name`, length must be less than `255`") self._display_name = display_name
python
{ "resource": "" }
q19001
OrderFulfillmentRecipient.email_address
train
def email_address(self, email_address): """ Sets the email_address of this OrderFulfillmentRecipient. The email address of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id. :param email_address: The email_address of this OrderFulfillmentRecipient. :type: str """ if email_address is None: raise ValueError("Invalid value for `email_address`, must not be `None`") if len(email_address) > 255: raise ValueError("Invalid value for `email_address`, length must be less than `255`") self._email_address = email_address
python
{ "resource": "" }
q19002
OrderFulfillmentRecipient.phone_number
train
def phone_number(self, phone_number): """ Sets the phone_number of this OrderFulfillmentRecipient. The phone number of the fulfillment recipient. If provided, overrides the value from customer profile indicated by customer_id. :param phone_number: The phone_number of this OrderFulfillmentRecipient. :type: str """ if phone_number is None: raise ValueError("Invalid value for `phone_number`, must not be `None`") if len(phone_number) > 16: raise ValueError("Invalid value for `phone_number`, length must be less than `16`") self._phone_number = phone_number
python
{ "resource": "" }
q19003
Money.amount
train
def amount(self, amount): """ Sets the amount of this Money. The amount of money, in the smallest denomination of the currency indicated by `currency`. For example, when `currency` is `USD`, `amount` is in cents. :param amount: The amount of this Money. :type: int """ if amount is None: raise ValueError("Invalid value for `amount`, must not be `None`") if amount < 0: raise ValueError("Invalid value for `amount`, must be a value greater than or equal to `0`") self._amount = amount
python
{ "resource": "" }
q19004
ApiClient.to_path_value
train
def to_path_value(self, obj): """ Takes value and turn it into a string suitable for inclusion in the path, by url-encoding. :param obj: object or string value. :return string: quoted value. """ if type(obj) == list: return ','.join(obj) else: return str(obj)
python
{ "resource": "" }
q19005
ApiClient.deserialize
train
def deserialize(self, response, response_type): """ Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialzied object, or string of class name. :return: deserialized object. """ # handle file downloading # save response body into a tmp file and return the instance if "file" == response_type: return self.__deserialize_file(response) # fetch data from response object try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type)
python
{ "resource": "" }
q19006
Balancer._align_ast
train
def _align_ast(self, a): """ Aligns the AST so that the argument with the highest cardinality is on the left. :return: a new AST. """ try: if isinstance(a, BV): return self._align_bv(a) elif isinstance(a, Bool) and len(a.args) == 2 and a.args[1].cardinality > a.args[0].cardinality: return self._reverse_comparison(a) else: return a except ClaripyBalancerError: return a
python
{ "resource": "" }
q19007
Balancer._doit
train
def _doit(self): """ This function processes the list of truisms and finds bounds for ASTs. """ while len(self._truisms): truism = self._truisms.pop() if truism in self._processed_truisms: continue unpacked_truisms = self._unpack_truisms(truism) self._processed_truisms.add(truism) if len(unpacked_truisms): self._queue_truisms(unpacked_truisms, check_true=True) continue if not self._handleable_truism(truism): continue truism = self._adjust_truism(truism) assumptions = self._get_assumptions(truism) if truism not in self._identified_assumptions and len(assumptions): l.debug("Queued assumptions %s for truism %s.", assumptions, truism) self._truisms.extend(assumptions) self._identified_assumptions.update(assumptions) l.debug("Processing truism %s", truism) balanced_truism = self._balance(truism) l.debug("... handling") self._handle(balanced_truism)
python
{ "resource": "" }
q19008
Balancer._handleable_truism
train
def _handleable_truism(t): """ Checks whether we can handle this truism. The truism should already be aligned. """ if len(t.args) < 2: l.debug("can't do anything with an unop bool") elif t.args[0].cardinality > 1 and t.args[1].cardinality > 1: l.debug("can't do anything because we have multiple multivalued guys") return False else: return True
python
{ "resource": "" }
q19009
Balancer._adjust_truism
train
def _adjust_truism(t): """ Swap the operands of the truism if the unknown variable is on the right side and the concrete value is on the left side. """ if t.args[0].cardinality == 1 and t.args[1].cardinality > 1: swapped = Balancer._reverse_comparison(t) return swapped return t
python
{ "resource": "" }
q19010
Balancer._handle_comparison
train
def _handle_comparison(self, truism): """ Handles all comparisons. """ # print("COMP:", truism) is_lt, is_equal, is_unsigned = self.comparison_info[truism.op] size = len(truism.args[0]) int_max = 2**size-1 if is_unsigned else 2**(size-1)-1 int_min = -2**(size-1) left_min = self._min(truism.args[0], signed=not is_unsigned) left_max = self._max(truism.args[0], signed=not is_unsigned) right_min = self._min(truism.args[1], signed=not is_unsigned) right_max = self._max(truism.args[1], signed=not is_unsigned) bound_max = right_max if is_equal else (right_max-1 if is_lt else right_max+1) bound_min = right_min if is_equal else (right_min-1 if is_lt else right_min+1) if is_lt and bound_max < int_min: # if the bound max is negative and we're unsigned less than, we're fucked raise ClaripyBalancerUnsatError() elif not is_lt and bound_min > int_max: # if the bound min is too big, we're fucked raise ClaripyBalancerUnsatError() current_min = int_min current_max = int_max if is_lt: current_max = min(int_max, left_max, bound_max) self._add_upper_bound(truism.args[0], current_max) else: current_min = max(int_min, left_min, bound_min) self._add_lower_bound(truism.args[0], current_min)
python
{ "resource": "" }
q19011
SMTParser.consume_assignment_list
train
def consume_assignment_list(self): self.expect('(') self.expect('model') """Parses a list of expressions from the tokens""" assignments = [] while True: next_token = self.tokens.consume() self.tokens.add_extra_token(next_token) # push it back if next_token == ')': break assignments.append(self.expect_assignment_tuple()) self.expect(')') return assignments
python
{ "resource": "" }
q19012
ValueSet.copy
train
def copy(self): """ Make a copy of self and return. :return: A new ValueSet object. :rtype: ValueSet """ vs = ValueSet(bits=self.bits) vs._regions = self._regions.copy() vs._region_base_addrs = self._region_base_addrs.copy() vs._reversed = self._reversed vs._si = self._si.copy() return vs
python
{ "resource": "" }
q19013
ValueSet.apply_annotation
train
def apply_annotation(self, annotation): """ Apply a new annotation onto self, and return a new ValueSet object. :param RegionAnnotation annotation: The annotation to apply. :return: A new ValueSet object :rtype: ValueSet """ vs = self.copy() vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset) return vs
python
{ "resource": "" }
q19014
ValueSet.min
train
def min(self): """ The minimum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the minimum integer value of this value-set. :rtype: int """ if len(self.regions) != 1: raise ClaripyVSAOperationError("'min()' onlly works on single-region value-sets.") return self.get_si(next(iter(self.regions))).min
python
{ "resource": "" }
q19015
ValueSet.max
train
def max(self): """ The maximum integer value of a value-set. It is only defined when there is exactly one region. :return: A integer that represents the maximum integer value of this value-set. :rtype: int """ if len(self.regions) != 1: raise ClaripyVSAOperationError("'max()' onlly works on single-region value-sets.") return self.get_si(next(iter(self.regions))).max
python
{ "resource": "" }
q19016
ValueSet.identical
train
def identical(self, o): """ Used to make exact comparisons between two ValueSets. :param o: The other ValueSet to compare with. :return: True if they are exactly same, False otherwise. """ if self._reversed != o._reversed: return False for region, si in self.regions.items(): if region in o.regions: o_si = o.regions[region] if not si.identical(o_si): return False else: return False return True
python
{ "resource": "" }
q19017
Frontend.eval_to_ast
train
def eval_to_ast(self, e, n, extra_constraints=(), exact=None): """ Evaluates expression e, returning the results in the form of concrete ASTs. """ return [ ast.bv.BVV(v, e.size()) for v in self.eval(e, n, extra_constraints=extra_constraints, exact=exact) ]
python
{ "resource": "" }
q19018
Frontend._split_constraints
train
def _split_constraints(constraints, concrete=True): """ Returns independent constraints, split from this Frontend's `constraints`. """ splitted = [ ] for i in constraints: splitted.extend(i.split(['And'])) l.debug("... splitted of size %d", len(splitted)) concrete_constraints = [ ] variable_connections = { } constraint_connections = { } for n,s in enumerate(splitted): l.debug("... processing constraint with %d variables", len(s.variables)) connected_variables = set(s.variables) connected_constraints = { n } if len(connected_variables) == 0: concrete_constraints.append(s) for v in s.variables: if v in variable_connections: connected_variables |= variable_connections[v] if v in constraint_connections: connected_constraints |= constraint_connections[v] for v in connected_variables: variable_connections[v] = connected_variables constraint_connections[v] = connected_constraints unique_constraint_sets = set() for v in variable_connections: unique_constraint_sets.add((frozenset(variable_connections[v]), frozenset(constraint_connections[v]))) results = [ ] for v,c_indexes in unique_constraint_sets: results.append((set(v), [ splitted[c] for c in c_indexes ])) if concrete and len(concrete_constraints) > 0: results.append(({ 'CONCRETE' }, concrete_constraints)) return results
python
{ "resource": "" }
q19019
constraint_to_si
train
def constraint_to_si(expr): """ Convert a constraint to SI if possible. :param expr: :return: """ satisfiable = True replace_list = [ ] satisfiable, replace_list = backends.vsa.constraint_to_si(expr) # Make sure the replace_list are all ast.bvs for i in xrange(len(replace_list)): ori, new = replace_list[i] if not isinstance(new, Base): new = BVS(new.name, new._bits, min=new._lower_bound, max=new._upper_bound, stride=new._stride, explicit_name=True) replace_list[i] = (ori, new) return satisfiable, replace_list
python
{ "resource": "" }
q19020
Backend._make_expr_ops
train
def _make_expr_ops(self, op_list, op_dict=None, op_class=None): """ Fill up `self._op_expr` dict. :param op_list: A list of operation names. :param op_dict: A dictionary of operation methods. :param op_class: Where the operation method comes from. :return: """ for o in op_list: if op_dict is not None: if o in op_dict: self._op_expr[o] = op_dict[o] else: l.warning("Operation %s not in op_dict.", o) else: if hasattr(op_class, o): self._op_expr[o] = getattr(op_class, o) else: l.warning("Operation %s not in op_class %s.", o, op_class)
python
{ "resource": "" }
q19021
Backend.downsize
train
def downsize(self): """ Clears all caches associated with this backend. """ self._object_cache.clear() self._true_cache.clear() self._false_cache.clear()
python
{ "resource": "" }
q19022
Backend.convert
train
def convert(self, expr): #pylint:disable=R0201 """ Resolves a claripy.ast.Base into something usable by the backend. :param expr: The expression. :param save: Save the result in the expression's object cache :return: A backend object. """ ast_queue = [[expr]] arg_queue = [] op_queue = [] try: while ast_queue: args_list = ast_queue[-1] if args_list: ast = args_list.pop(0) if type(ast) in {bool, int, str, float} or not isinstance(ast, Base): converted = self._convert(ast) arg_queue.append(converted) continue if self in ast._errored: raise BackendError("%s can't handle operation %s (%s) due to a failed " "conversion on a child node" % (self, ast.op, ast.__class__.__name__)) if self._cache_objects: cached_obj = self._object_cache.get(ast._cache_key, None) if cached_obj is not None: arg_queue.append(cached_obj) continue op_queue.append(ast) if ast.op in self._op_expr: ast_queue.append(None) else: ast_queue.append(list(ast.args)) else: ast_queue.pop() if op_queue: ast = op_queue.pop() op = self._op_expr.get(ast.op, None) if op is not None: r = op(ast) else: args = arg_queue[-len(ast.args):] del arg_queue[-len(ast.args):] try: r = self._call(ast.op, args) except BackendUnsupportedError: r = self.default_op(ast) for a in ast.annotations: r = self.apply_annotation(r, a) if self._cache_objects: self._object_cache[ast._cache_key] = r arg_queue.append(r) except (RuntimeError, ctypes.ArgumentError) as e: raise ClaripyRecursionError("Recursion limit reached. Sorry about that.") from e except BackendError: for ast in op_queue: ast._errored.add(self) if isinstance(expr, Base): expr._errored.add(self) raise # Note: Uncomment the following assertions if you are touching the above implementation # assert len(op_queue) == 0, "op_queue is not empty" # assert len(ast_queue) == 0, "ast_queue is not empty" # assert len(arg_queue) == 1, ("arg_queue has unexpected length", len(arg_queue)) return arg_queue.pop()
python
{ "resource": "" }
q19023
Backend.call
train
def call(self, op, args): """ Calls operation `op` on args `args` with this backend. :return: A backend object representing the result. """ converted = self.convert_list(args) return self._call(op, converted)
python
{ "resource": "" }
q19024
Backend.is_true
train
def is_true(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument """ Should return True if `e` can be easily found to be True. :param e: The AST. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it. :param model_callback: a function that will be executed with recovered models (if any) :returns: A boolean. """ #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) if not isinstance(e, Base): return self._is_true(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback) try: return self._true_cache[e.cache_key] except KeyError: t = self._is_true(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback) self._true_cache[e.cache_key] = t if t is True: self._false_cache[e.cache_key] = False return t
python
{ "resource": "" }
q19025
Backend.is_false
train
def is_false(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument """ Should return True if e can be easily found to be False. :param e: The AST :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it :param model_callback: a function that will be executed with recovered models (if any) :return: A boolean. """ #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) if not isinstance(e, Base): return self._is_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback) try: return self._false_cache[e.cache_key] except KeyError: f = self._is_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback) self._false_cache[e.cache_key] = f if f is True: self._true_cache[e.cache_key] = False return f
python
{ "resource": "" }
q19026
Backend.has_true
train
def has_true(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument """ Should return True if `e` can possible be True. :param e: The AST. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it. :param model_callback: a function that will be executed with recovered models (if any) :return: A boolean """ #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._has_true(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
python
{ "resource": "" }
q19027
Backend.has_false
train
def has_false(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument """ Should return False if `e` can possibly be False. :param e: The AST. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver, for backends that require it. :param model_callback: a function that will be executed with recovered models (if any) :return: A boolean. """ #if self._solver_required and solver is None: # raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._has_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
python
{ "resource": "" }
q19028
Backend.add
train
def add(self, s, c, track=False): """ This function adds constraints to the backend solver. :param c: A sequence of ASTs :param s: A backend solver object :param bool track: True to enable constraint tracking, which is used in unsat_core() """ return self._add(s, self.convert_list(c), track=track)
python
{ "resource": "" }
q19029
Backend.batch_eval
train
def batch_eval(self, exprs, n, extra_constraints=(), solver=None, model_callback=None): """ Evaluate one or multiple expressions. :param exprs: A list of expressions to evaluate. :param n: Number of different solutions to return. :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param solver: A solver object, native to the backend, to assist in the evaluation. :param model_callback: a function that will be executed with recovered models (if any) :return: A list of up to n tuples, where each tuple is a solution for all expressions. """ if self._solver_required and solver is None: raise BackendError("%s requires a solver for batch evaluation" % self.__class__.__name__) converted_exprs = [ self.convert(ex) for ex in exprs ] return self._batch_eval( converted_exprs, n, extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback )
python
{ "resource": "" }
q19030
Backend.min
train
def min(self, expr, extra_constraints=(), solver=None, model_callback=None): """ Return the minimum value of `expr`. :param expr: expression (an AST) to evaluate :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: the minimum possible value of expr (backend object) """ if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._min(self.convert(expr), extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback)
python
{ "resource": "" }
q19031
Backend.max
train
def max(self, expr, extra_constraints=(), solver=None, model_callback=None): """ Return the maximum value of expr. :param expr: expression (an AST) to evaluate :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: the maximum possible value of expr (backend object) """ if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._max(self.convert(expr), extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback)
python
{ "resource": "" }
q19032
Backend.identical
train
def identical(self, a, b): """ This should return whether `a` is identical to `b`. Of course, this isn't always clear. True should mean that it is definitely identical. False eans that, conservatively, it might not be. :param a: an AST :param b: another AST """ return self._identical(self.convert(a), self.convert(b))
python
{ "resource": "" }
q19033
WarrenMethods.min_or
train
def min_or(a, b, c, d, w): """ Lower bound of result of ORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Lower bound of ORing 2-intervals """ m = (1 << (w - 1)) while m != 0: if ((~a) & c & m) != 0: temp = (a | m) & -m if temp <= b: a = temp break elif (a & (~c) & m) != 0: temp = (c | m) & -m if temp <= d: c = temp break m >>= 1 return a | c
python
{ "resource": "" }
q19034
WarrenMethods.max_or
train
def max_or(a, b, c, d, w): """ Upper bound of result of ORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Upper bound of ORing 2-intervals """ m = (1 << (w - 1)) while m != 0: if (b & d & m) != 0: temp = (b - m) | (m - 1) if temp >= a: b = temp break temp = (d - m) | (m - 1) if temp >= c: d = temp break m >>= 1 return b | d
python
{ "resource": "" }
q19035
WarrenMethods.min_and
train
def min_and(a, b, c, d, w): """ Lower bound of result of ANDing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Lower bound of ANDing 2-intervals """ m = (1 << (w - 1)) while m != 0: if (~a & ~c & m) != 0: temp = (a | m) & -m if temp <= b: a = temp break temp = (c | m) & -m if temp <= d: c = temp break m >>= 1 return a & c
python
{ "resource": "" }
q19036
WarrenMethods.max_and
train
def max_and(a, b, c, d, w): """ Upper bound of result of ANDing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Upper bound of ANDing 2-intervals """ m = (1 << (w - 1)) while m != 0: if ((~d) & b & m) != 0: temp = (b & ~m) | (m - 1) if temp >= a: b = temp break elif (d & (~b) & m) != 0: temp = (d & ~m) | (m - 1) if temp >= c: d = temp break m >>= 1 return b & d
python
{ "resource": "" }
q19037
WarrenMethods.min_xor
train
def min_xor(a, b, c, d, w): """ Lower bound of result of XORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Lower bound of XORing 2-intervals """ m = (1 << (w - 1)) while m != 0: if ((~a) & c & m) != 0: temp = (a | m) & -m if temp <= b: a = temp elif (a & (~c) & m) != 0: temp = (c | m) & -m if temp <= d: c = temp m >>= 1 return a ^ c
python
{ "resource": "" }
q19038
WarrenMethods.max_xor
train
def max_xor(a, b, c, d, w): """ Upper bound of result of XORing 2-intervals. :param a: Lower bound of first interval :param b: Upper bound of first interval :param c: Lower bound of second interval :param d: Upper bound of second interval :param w: bit width :return: Upper bound of XORing 2-intervals """ m = (1 << (w - 1)) while m != 0: if (b & d & m) != 0: temp = (b - m) | (m - 1) if temp >= a: b = temp else: temp = (d - m) | (m - 1) if temp >= c: d = temp m >>= 1 return b ^ d
python
{ "resource": "" }
q19039
StridedInterval.eval
train
def eval(self, n, signed=False): """ Evaluate this StridedInterval to obtain a list of concrete integers. :param n: Upper bound for the number of concrete integers :param signed: Treat this StridedInterval as signed or unsigned :return: A list of at most `n` concrete integers """ if self.is_empty: # no value is available return [ ] if self._reversed: return self._reverse().eval(n, signed=signed) results = [ ] if self.stride == 0 and n > 0: results.append(self.lower_bound) else: if signed: # View it as a signed integer bounds = self._signed_bounds() else: # View it as an unsigned integer bounds = self._unsigned_bounds() for lb, ub in bounds: while len(results) < n and lb <= ub: results.append(lb) lb += self.stride # It will not overflow return results
python
{ "resource": "" }
q19040
StridedInterval._nsplit
train
def _nsplit(self): """ Split `self` at the north pole, which is the same as in signed arithmetic. :return: A list of split StridedIntervals """ north_pole_left = self.max_int(self.bits - 1) # 01111...1 north_pole_right = 2 ** (self.bits - 1) # 1000...0 # Is `self` straddling the north pole? straddling = False if self.upper_bound >= north_pole_right: if self.lower_bound > self.upper_bound: # Yes it does! straddling = True elif self.lower_bound <= north_pole_left: straddling = True else: if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left: straddling = True if straddling: a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride) a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound, uninitialized=self.uninitialized) b_lower_bound = a_upper_bound + self.stride b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound, uninitialized=self.uninitialized) return [ a, b ] else: return [ self.copy() ]
python
{ "resource": "" }
q19041
StridedInterval._psplit
train
def _psplit(self): """ Split `self` at both north and south poles. :return: A list of split StridedIntervals """ nsplit_list = self._nsplit() psplit_list = [ ] for si in nsplit_list: psplit_list.extend(si._ssplit()) return psplit_list
python
{ "resource": "" }
q19042
StridedInterval._signed_bounds
train
def _signed_bounds(self): """ Get lower bound and upper bound for `self` in signed arithmetic. :return: a list of (lower_bound, upper_bound) tuples """ nsplit = self._nsplit() if len(nsplit) == 1: lb = nsplit[0].lower_bound ub = nsplit[0].upper_bound lb = self._unsigned_to_signed(lb, self.bits) ub = self._unsigned_to_signed(ub, self.bits) return [(lb, ub)] elif len(nsplit) == 2: # nsplit[0] is on the left hemisphere, and nsplit[1] is on the right hemisphere # The left one lb_1 = nsplit[0].lower_bound ub_1 = nsplit[0].upper_bound # The right one lb_2 = nsplit[1].lower_bound ub_2 = nsplit[1].upper_bound # Then convert them to negative numbers lb_2 = self._unsigned_to_signed(lb_2, self.bits) ub_2 = self._unsigned_to_signed(ub_2, self.bits) return [ (lb_1, ub_1), (lb_2, ub_2) ] else: raise Exception('WTF')
python
{ "resource": "" }
q19043
StridedInterval._unsigned_bounds
train
def _unsigned_bounds(self): """ Get lower bound and upper bound for `self` in unsigned arithmetic. :return: a list of (lower_bound, upper_bound) tuples. """ ssplit = self._ssplit() if len(ssplit) == 1: lb = ssplit[0].lower_bound ub = ssplit[0].upper_bound return [ (lb, ub) ] elif len(ssplit) == 2: # ssplit[0] is on the left hemisphere, and ssplit[1] is on the right hemisphere lb_1 = ssplit[0].lower_bound ub_1 = ssplit[0].upper_bound lb_2 = ssplit[1].lower_bound ub_2 = ssplit[1].upper_bound return [ (lb_1, ub_1), (lb_2, ub_2) ] else: raise Exception('WTF')
python
{ "resource": "" }
q19044
StridedInterval._rshift_logical
train
def _rshift_logical(self, shift_amount): """ Logical shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval """ if self.is_empty: return self # If straddling the south pole, we'll have to split it into two, perform logical right shift on them # individually, then union the result back together for better precision. Note that it's an improvement from # the original WrappedIntervals paper. ssplit = self._ssplit() if len(ssplit) == 1: l = self.lower_bound >> shift_amount u = self.upper_bound >> shift_amount stride = max(self.stride >> shift_amount, 1) return StridedInterval(bits=self.bits, lower_bound=l, upper_bound=u, stride=stride, uninitialized=self.uninitialized ) else: a = ssplit[0]._rshift_logical(shift_amount) b = ssplit[1]._rshift_logical(shift_amount) return a.union(b)
python
{ "resource": "" }
q19045
StridedInterval._rshift_arithmetic
train
def _rshift_arithmetic(self, shift_amount): """ Arithmetic shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval """ if self.is_empty: return self # If straddling the north pole, we'll have to split it into two, perform arithmetic right shift on them # individually, then union the result back together for better precision. Note that it's an improvement from # the original WrappedIntervals paper. nsplit = self._nsplit() if len(nsplit) == 1: # preserve the highest bit :-) highest_bit_set = self.lower_bound > StridedInterval.signed_max_int(nsplit[0].bits) l = self.lower_bound >> shift_amount u = self.upper_bound >> shift_amount stride = max(self.stride >> shift_amount, 1) mask = ((2 ** shift_amount - 1) << (self.bits - shift_amount)) if highest_bit_set: l = l | mask u = u | mask if l == u: stride = 0 return StridedInterval(bits=self.bits, lower_bound=l, upper_bound=u, stride=stride, uninitialized=self.uninitialized ) else: a = nsplit[0]._rshift_arithmetic(shift_amount) b = nsplit[1]._rshift_arithmetic(shift_amount) return a.union(b)
python
{ "resource": "" }
q19046
StridedInterval.identical
train
def identical(self, o): """ Used to make exact comparisons between two StridedIntervals. Usually it is only used in test cases. :param o: The other StridedInterval to compare with. :return: True if they are exactly same, False otherwise. """ return self.bits == o.bits and self.stride == o.stride and self.lower_bound == o.lower_bound and self.upper_bound == o.upper_bound
python
{ "resource": "" }
q19047
StridedInterval.SLT
train
def SLT(self, o): """ Signed less than :param o: The other operand :return: TrueResult(), FalseResult(), or MaybeResult() """ signed_bounds_1 = self._signed_bounds() signed_bounds_2 = o._signed_bounds() ret = [ ] for lb_1, ub_1 in signed_bounds_1: for lb_2, ub_2 in signed_bounds_2: if ub_1 < lb_2: ret.append(TrueResult()) elif lb_1 >= ub_2: ret.append(FalseResult()) else: ret.append(MaybeResult()) if all(r.identical(TrueResult()) for r in ret): return TrueResult() elif all(r.identical(FalseResult()) for r in ret): return FalseResult() else: return MaybeResult()
python
{ "resource": "" }
q19048
StridedInterval.ULT
train
def ULT(self, o): """ Unsigned less than. :param o: The other operand :return: TrueResult(), FalseResult(), or MaybeResult() """ unsigned_bounds_1 = self._unsigned_bounds() unsigned_bounds_2 = o._unsigned_bounds() ret = [] for lb_1, ub_1 in unsigned_bounds_1: for lb_2, ub_2 in unsigned_bounds_2: if ub_1 < lb_2: ret.append(TrueResult()) elif lb_1 >= ub_2: ret.append(FalseResult()) else: ret.append(MaybeResult()) if all(r.identical(TrueResult()) for r in ret): return TrueResult() elif all(r.identical(FalseResult()) for r in ret): return FalseResult() else: return MaybeResult()
python
{ "resource": "" }
q19049
StridedInterval.complement
train
def complement(self): """ Return the complement of the interval Refer section 3.1 augmented for managing strides :return: """ # case 1 if self.is_empty: return StridedInterval.top(self.bits) # case 2 if self.is_top: return StridedInterval.empty(self.bits) # case 3 y_plus_1 = StridedInterval._modular_add(self.upper_bound, 1, self.bits) x_minus_1 = StridedInterval._modular_sub(self.lower_bound, 1, self.bits) # the new stride has to be the GCD between the old stride and the distance # between the new lower bound and the new upper bound. This assure that in # the new interval the boundaries are valid solution when the SI is # evaluated. dist = StridedInterval._wrapped_cardinality(y_plus_1, x_minus_1, self.bits) - 1 # the new SI is an integer if dist < 0: new_stride = 0 elif self._stride == 0: new_stride = 1 else: new_stride = fractions.gcd(self._stride, dist) return StridedInterval(lower_bound=y_plus_1, upper_bound=x_minus_1, bits=self.bits, stride=new_stride, uninitialized=self.uninitialized)
python
{ "resource": "" }
q19050
StridedInterval.is_top
train
def is_top(self): """ If this is a TOP value. :return: True if this is a TOP """ return (self.stride == 1 and self.lower_bound == self._modular_add(self.upper_bound, 1, self.bits) )
python
{ "resource": "" }
q19051
StridedInterval._gap
train
def _gap(src_interval, tar_interval): """ Refer section 3.1; gap function. :param src_interval: first argument or interval 1 :param tar_interval: second argument or interval 2 :return: Interval representing gap between two intervals """ assert src_interval.bits == tar_interval.bits, "Number of bits should be same for operands" # use the same variable names as in paper s = src_interval t = tar_interval (_, b) = (s.lower_bound, s.upper_bound) (c, _) = (t.lower_bound, t.upper_bound) w = s.bits # case 1 if (not t._surrounds_member(b)) and (not s._surrounds_member(c)): #FIXME: maybe we can do better here and to not fix the stride to 1 #FIXME: found the first common integer for more precision return StridedInterval(lower_bound=c, upper_bound=b, bits=w, stride=1).complement # otherwise return StridedInterval.empty(w)
python
{ "resource": "" }
q19052
StridedInterval.top
train
def top(bits, name=None, uninitialized=False): """ Get a TOP StridedInterval. :return: """ return StridedInterval(name=name, bits=bits, stride=1, lower_bound=0, upper_bound=StridedInterval.max_int(bits), uninitialized=uninitialized)
python
{ "resource": "" }
q19053
StridedInterval._unsigned_to_signed
train
def _unsigned_to_signed(v, bits): """ Convert an unsigned integer to a signed integer. :param v: The unsigned integer :param bits: How many bits this integer should be :return: The converted signed integer """ if StridedInterval._is_msb_zero(v, bits): return v else: return -(2 ** bits - v)
python
{ "resource": "" }
q19054
StridedInterval._wrapped_overflow_add
train
def _wrapped_overflow_add(a, b): """ Determines if an overflow happens during the addition of `a` and `b`. :param a: The first operand (StridedInterval) :param b: The other operand (StridedInterval) :return: True if overflows, False otherwise """ if a.is_integer and a.lower_bound == 0: # Special case: if `a` or `b` is a zero card_self = 0 else: card_self = StridedInterval._wrapped_cardinality(a.lower_bound, a.upper_bound, a.bits) if b.is_integer and b.lower_bound == 0: # Special case: if `a` or `b` is a zero card_b = 0 else: card_b = StridedInterval._wrapped_cardinality(b.lower_bound, b.upper_bound, b.bits) return (card_self + card_b) > (StridedInterval.max_int(a.bits) + 1)
python
{ "resource": "" }
q19055
StridedInterval._wrapped_unsigned_mul
train
def _wrapped_unsigned_mul(a, b): """ Perform wrapped unsigned multiplication on two StridedIntervals. :param a: The first operand (StridedInterval) :param b: The second operand (StridedInterval) :return: The multiplication result """ if a.bits != b.bits: logger.warning("Signed mul: two parameters have different bit length") bits = max(a.bits, b.bits) lb = a.lower_bound * b.lower_bound ub = a.upper_bound * b.upper_bound uninit_flag = a.uninitialized | b.uninitialized if (ub - lb) < (2 ** bits): if b.is_integer: # Multiplication with an integer, and it does not overflow! stride = abs(a.stride * b.lower_bound) elif a.is_integer: stride = abs(a.lower_bound * b.stride) else: stride = fractions.gcd(a.stride, b.stride) return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub, uninitialized=uninit_flag) else: # Overflow occurred return StridedInterval.top(bits, uninitialized=False)
python
{ "resource": "" }
q19056
StridedInterval._is_surrounded
train
def _is_surrounded(self, b): """ Perform a wrapped LTE comparison only considering the SI bounds :param a: The first operand :param b: The second operand :return: True if a <= b, False otherwise """ a = self if a.is_empty: return True if a.is_top and b.is_top: return True elif a.is_top: return False elif b.is_top: return True if b._surrounds_member(a.lower_bound) and b._surrounds_member(a.upper_bound): if ((b.lower_bound == a.lower_bound and b.upper_bound == a.upper_bound) or not a._surrounds_member(b.lower_bound) or not a._surrounds_member(b.upper_bound)): return True return False
python
{ "resource": "" }
q19057
StridedInterval.rshift_logical
train
def rshift_logical(self, shift_amount): """ Logical shift right. :param StridedInterval shift_amount: The amount of shifting :return: The shifted StridedInterval :rtype: StridedInterval """ lower, upper = self._pre_shift(shift_amount) # Shift the lower_bound and upper_bound by all possible amounts, and union all possible results ret = None for amount in xrange(lower, upper + 1): si_ = self._rshift_logical(amount) ret = si_ if ret is None else ret.union(si_) ret.normalize() ret.uninitialized = self.uninitialized return ret
python
{ "resource": "" }
q19058
StridedInterval.rshift_arithmetic
train
def rshift_arithmetic(self, shift_amount): """ Arithmetic shift right. :param StridedInterval shift_amount: The amount of shifting :return: The shifted StridedInterval :rtype: StridedInterval """ lower, upper = self._pre_shift(shift_amount) # Shift the lower_bound and upper_bound by all possible amounts, and union all possible results ret = None for amount in xrange(lower, upper + 1): si_ = self._rshift_arithmetic(amount) ret = si_ if ret is None else ret.union(si_) ret.normalize() ret.uninitialized = self.uninitialized return ret
python
{ "resource": "" }
q19059
StridedInterval.union
train
def union(self, b): """ The union operation. It might return a DiscreteStridedIntervalSet to allow for better precision in analysis. :param b: Operand :return: A new DiscreteStridedIntervalSet, or a new StridedInterval. """ if not allow_dsis: return StridedInterval.least_upper_bound(self, b) else: if self.cardinality > discrete_strided_interval_set.DEFAULT_MAX_CARDINALITY_WITHOUT_COLLAPSING or \ b.cardinality > discrete_strided_interval_set.DEFAULT_MAX_CARDINALITY_WITHOUT_COLLAPSING: return StridedInterval.least_upper_bound(self, b) else: dsis = DiscreteStridedIntervalSet(bits=self._bits, si_set={ self }) return dsis.union(b)
python
{ "resource": "" }
q19060
StridedInterval._bigger
train
def _bigger(interval1, interval2): """ Return interval with bigger cardinality Refer Section 3.1 :param interval1: first interval :param interval2: second interval :return: Interval or interval2 whichever has greater cardinality """ if interval2.cardinality > interval1.cardinality: return interval2.copy() return interval1.copy()
python
{ "resource": "" }
q19061
StridedInterval.least_upper_bound
train
def least_upper_bound(*intervals_to_join): """ Pseudo least upper bound. Join the given set of intervals into a big interval. The resulting strided interval is the one which in all the possible joins of the presented SI, presented the least number of values. The number of joins to compute is linear with the number of intervals to join. Draft of proof: Considering three generic SI (a,b, and c) ordered from their lower bounds, such that a.lower_bund <= b.lower_bound <= c.lower_bound, where <= is the lexicographic less or equal. The only joins which have sense to compute are: * a U b U c * b U c U a * c U a U b All the other combinations fall in either one of these cases. For example: b U a U c does not make make sense to be calculated. In fact, if one draws this union, the result is exactly either (b U c U a) or (a U b U c) or (c U a U b). :param intervals_to_join: Intervals to join :return: Interval that contains all intervals """ assert len(intervals_to_join) > 0, "No intervals to join" # Check if all intervals are of same width all_same = all(x.bits == intervals_to_join[0].bits for x in intervals_to_join) assert all_same, "All intervals to join should be same" # Optimization: If we have only one interval, then return that interval as result if len(intervals_to_join) == 1: return intervals_to_join[0].copy() # Optimization: If we have only two intervals, the pseudo-join is fine and more precise if len(intervals_to_join) == 2: return StridedInterval.pseudo_join(intervals_to_join[0], intervals_to_join[1]) # sort the intervals in increasing left bound sorted_intervals = sorted(intervals_to_join, key=lambda x: x.lower_bound) # Fig 3 of the paper ret = None # we try all possible joins (linear with the number of SI to join) # and we return the one with the least number of values. for i in xrange(len(sorted_intervals)): # let's join all of them si = reduce(lambda x, y: StridedInterval.pseudo_join(x, y, False), sorted_intervals[i:] + sorted_intervals[0:i]) if ret is None or ret.n_values > si.n_values: ret = si if any([x for x in intervals_to_join if x.uninitialized]): ret.uninitialized = True return ret
python
{ "resource": "" }
q19062
StridedInterval._minimal_common_integer_splitted
train
def _minimal_common_integer_splitted(si_0, si_1): """ Calculates the minimal integer that appears in both StridedIntervals. It's equivalent to finding an integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal si_0.stride, si_1.stride being a and c, and si_0.lower_bound, si_1.lower_bound being b and d, respectively. Upper bounds are used to check whether the minimal common integer exceeds the bound or not. None is returned if no minimal common integers can be found within the range. Some assumptions: # - None of the StridedIntervals straddles the south pole. Consequently, we have x <= max_int(si.bits) and y <= # max_int(si.bits) # - a, b, c, d are all positive integers # - x >= 0, y >= 0 :param StridedInterval si_0: the first StridedInterval :param StridedInterval si_1: the second StrideInterval :return: the minimal common integer, or None if there is no common integer """ a, c = si_0.stride, si_1.stride b, d = si_0.lower_bound, si_1.lower_bound # if any of them is an integer if si_0.is_integer: if si_1.is_integer: return None if si_0.lower_bound != si_1.lower_bound else si_0.lower_bound elif si_0.lower_bound >= si_1.lower_bound and \ si_0.lower_bound <= si_1.upper_bound and \ (si_0.lower_bound - si_1.lower_bound) % si_1.stride == 0: return si_0.lower_bound else: return None elif si_1.is_integer: return StridedInterval._minimal_common_integer_splitted(si_1, si_0) # shortcut if si_0.upper_bound < si_1.lower_bound or si_1.upper_bound < si_0.lower_bound: # They don't overlap at all return None if (d - b) % StridedInterval.gcd(a, c) != 0: # They don't overlap return None """ Given two strided intervals a = sa[lba, uba] and b = sb[lbb, ubb], the first integer shared by them is found by finding the minimum values of ka and kb which solve the equation: ka * sa + lba = kb * sb + lbb In particular one can solve the above diophantine equation and find the parameterized solutions of ka and kb, with respect to a parameter t. The minimum natural value of the parameter t which gives two positive natural values of ka and kb is used to resolve ka and kb, and finally to solve the above equation and get the minimum shared integer. """ x, y = StridedInterval.diop_natural_solution_linear(-(b-d), a, -c) if a is None or b is None: return None first_integer = x * a + b assert first_integer == y*c + d if first_integer >= si_0.lower_bound and first_integer <= si_0.upper_bound and \ first_integer >= si_1.lower_bound and first_integer <= si_1.upper_bound: return first_integer else: return None
python
{ "resource": "" }
q19063
StridedInterval.reverse
train
def reverse(self): """ This is a delayed reversing function. All it really does is to invert the _reversed property of this StridedInterval object. :return: None """ if self.bits == 8: # We cannot reverse a one-byte value return self si = self.copy() si._reversed = not si._reversed return si
python
{ "resource": "" }
q19064
SMTLibScriptDumperMixin.get_smtlib_script_satisfiability
train
def get_smtlib_script_satisfiability(self, extra_constraints=(), extra_variables=()): """ Return an smt-lib script that check the satisfiability of the current constraints :return string: smt-lib script """ try: e_csts = self._solver_backend.convert_list(extra_constraints + tuple(self.constraints)) e_variables = self._solver_backend.convert_list(extra_variables) variables, csts = self._solver_backend._get_all_vars_and_constraints(e_c=e_csts, e_v=e_variables) return self._solver_backend._get_satisfiability_smt_script(csts, variables) except BackendError as e: raise ClaripyFrontendError("Backend error during smtlib script generation") from e
python
{ "resource": "" }
q19065
BackendVSA.apply_annotation
train
def apply_annotation(self, bo, annotation): """ Apply an annotation on the backend object. :param BackendObject bo: The backend object. :param Annotation annotation: The annotation to be applied :return: A new BackendObject :rtype: BackendObject """ # Currently we only support RegionAnnotation if not isinstance(annotation, RegionAnnotation): return bo if not isinstance(bo, ValueSet): # Convert it to a ValueSet first # Note that the original value is not kept at all. If you want to convert a StridedInterval to a ValueSet, # you gotta do the conversion by calling AST.annotate() from outside. bo = ValueSet.empty(bo.bits) return bo.apply_annotation(annotation)
python
{ "resource": "" }
q19066
BackendZ3._generic_model
train
def _generic_model(self, z3_model): """ Converts a Z3 model to a name->primitive dict. """ model = { } for m_f in z3_model: n = _z3_decl_name_str(m_f.ctx.ctx, m_f.ast).decode() m = m_f() me = z3_model.eval(m) model[n] = self._abstract_to_primitive(me.ctx.ctx, me.ast) return model
python
{ "resource": "" }
q19067
Base._calc_hash
train
def _calc_hash(op, args, keywords): """ Calculates the hash of an AST, given the operation, args, and kwargs. :param op: The operation. :param args: The arguments to the operation. :param keywords: A dict including the 'symbolic', 'variables', and 'length' items. :returns: a hash. We do it using md5 to avoid hash collisions. (hash(-1) == hash(-2), for example) """ args_tup = tuple(a if type(a) in (int, float) else hash(a) for a in args) # HASHCONS: these attributes key the cache # BEFORE CHANGING THIS, SEE ALL OTHER INSTANCES OF "HASHCONS" IN THIS FILE to_hash = ( op, args_tup, str(keywords.get('length', None)), hash(keywords['variables']), keywords['symbolic'], hash(keywords.get('annotations', None)), ) # Why do we use md5 when it's broken? Because speed is more important # than cryptographic integrity here. Then again, look at all those # allocations we're doing here... fast python is painful. hd = hashlib.md5(pickle.dumps(to_hash, -1)).digest() return md5_unpacker.unpack(hd)[0]
python
{ "resource": "" }
q19068
Base.remove_annotation
train
def remove_annotation(self, a): """ Removes an annotation from this AST. :param a: the annotation to remove :returns: a new AST, with the annotation removed """ return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa != a))
python
{ "resource": "" }
q19069
Base.remove_annotations
train
def remove_annotations(self, remove_sequence): """ Removes several annotations from this AST. :param remove_sequence: a sequence/set of the annotations to remove :returns: a new AST, with the annotations removed """ return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa not in remove_sequence))
python
{ "resource": "" }
q19070
Base.shallow_repr
train
def shallow_repr(self, max_depth=8, explicit_length=False, details=LITE_REPR): """ Returns a string representation of this AST, but with a maximum depth to prevent floods of text being printed. :param max_depth: The maximum depth to print. :param explicit_length: Print lengths of BVV arguments. :param details: An integer value specifying how detailed the output should be: LITE_REPR - print short repr for both operations and BVs, MID_REPR - print full repr for operations and short for BVs, FULL_REPR - print full repr of both operations and BVs. :return: A string representing the AST """ ast_queue = [(0, iter([self]))] arg_queue = [] op_queue = [] while ast_queue: try: depth, args_iter = ast_queue[-1] arg = next(args_iter) if not isinstance(arg, Base): arg_queue.append(arg) continue if max_depth is not None: if depth >= max_depth: arg_queue.append('<...>') continue if arg.op in operations.reversed_ops: op_queue.append((depth + 1, operations.reversed_ops[arg.op], len(arg.args), arg.length)) ast_queue.append((depth + 1, reversed(arg.args))) else: op_queue.append((depth + 1, arg.op, len(arg.args), arg.length)) ast_queue.append((depth + 1, iter(arg.args))) except StopIteration: ast_queue.pop() if op_queue: depth, op, num_args, length = op_queue.pop() args_repr = arg_queue[-num_args:] del arg_queue[-num_args:] length = length if explicit_length else None inner_repr = self._op_repr(op, args_repr, depth > 1, length, details) arg_queue.append(inner_repr) assert len(op_queue) == 0, "op_queue is not empty" assert len(ast_queue) == 0, "arg_queue is not empty" assert len(arg_queue) == 1, ("repr_queue has unexpected length", len(arg_queue)) return "<{} {}>".format(self._type_name(), arg_queue.pop())
python
{ "resource": "" }
q19071
Base.children_asts
train
def children_asts(self): """ Return an iterator over the nested children ASTs. """ ast_queue = deque([iter(self.args)]) while ast_queue: try: ast = next(ast_queue[-1]) except StopIteration: ast_queue.pop() continue if isinstance(ast, Base): ast_queue.append(iter(ast.args)) l.debug("Yielding AST %s with hash %s with %d children", ast, hash(ast), len(ast.args)) yield ast
python
{ "resource": "" }
q19072
Base.leaf_asts
train
def leaf_asts(self): """ Return an iterator over the leaf ASTs. """ seen = set() ast_queue = deque([self]) while ast_queue: ast = ast_queue.pop() if isinstance(ast, Base) and id(ast.cache_key) not in seen: seen.add(id(ast.cache_key)) if ast.depth == 1: yield ast continue ast_queue.extend(ast.args) continue
python
{ "resource": "" }
q19073
Base.swap_args
train
def swap_args(self, new_args, new_length=None): """ This returns the same AST, with the arguments swapped out for new_args. """ if len(self.args) == len(new_args) and all(a is b for a,b in zip(self.args, new_args)): return self #symbolic = any(a.symbolic for a in new_args if isinstance(a, Base)) #variables = frozenset.union(frozenset(), *(a.variables for a in new_args if isinstance(a, Base))) length = self.length if new_length is None else new_length a = self.__class__(self.op, new_args, length=length) #if a.op != self.op or a.symbolic != self.symbolic or a.variables != self.variables: # raise ClaripyOperationError("major bug in swap_args()") return a
python
{ "resource": "" }
q19074
Base.replace_dict
train
def replace_dict(self, replacements, variable_set=None, leaf_operation=None): """ Returns this AST with subexpressions replaced by those that can be found in `replacements` dict. :param variable_set: For optimization, ast's without these variables are not checked for replacing. :param replacements: A dictionary of hashes to their replacements. :param leaf_operation: An operation that should be applied to the leaf nodes. :return: An AST with all instances of ast's in replacements. """ if variable_set is None: variable_set = set() if leaf_operation is None: leaf_operation = lambda x: x arg_queue = [iter([self])] rep_queue = [] ast_queue = [] while arg_queue: try: ast = next(arg_queue[-1]) repl = ast if not isinstance(ast, Base): rep_queue.append(repl) continue elif ast.cache_key in replacements: repl = replacements[ast.cache_key] elif ast.variables >= variable_set: if ast.op in operations.leaf_operations: repl = leaf_operation(ast) if repl is not ast: replacements[ast.cache_key] = repl elif ast.depth > 1: arg_queue.append(iter(ast.args)) ast_queue.append(ast) continue rep_queue.append(repl) continue except StopIteration: arg_queue.pop() if ast_queue: ast = ast_queue.pop() repl = ast args = rep_queue[-len(ast.args):] del rep_queue[-len(ast.args):] # Check if replacement occurred. if any((a is not b for a, b in zip(ast.args, args))): repl = ast.make_like(ast.op, tuple(args)) replacements[ast.cache_key] = repl rep_queue.append(repl) assert len(arg_queue) == 0, "arg_queue is not empty" assert len(ast_queue) == 0, "ast_queue is not empty" assert len(rep_queue) == 1, ("rep_queue has unexpected length", len(rep_queue)) return rep_queue.pop()
python
{ "resource": "" }
q19075
Base.replace
train
def replace(self, old, new, variable_set=None, leaf_operation=None): # pylint:disable=unused-argument """ Returns this AST but with the AST 'old' replaced with AST 'new' in its subexpressions. """ self._check_replaceability(old, new) replacements = {old.cache_key: new} return self.replace_dict(replacements, variable_set=old.variables)
python
{ "resource": "" }
q19076
Base.ite_burrowed
train
def ite_burrowed(self): """ Returns an equivalent AST that "burrows" the ITE expressions as deep as possible into the ast, for simpler printing. """ if self._burrowed is None: self._burrowed = self._burrow_ite() # pylint:disable=attribute-defined-outside-init self._burrowed._burrowed = self._burrowed # pylint:disable=attribute-defined-outside-init return self._burrowed
python
{ "resource": "" }
q19077
Base.ite_excavated
train
def ite_excavated(self): """ Returns an equivalent AST that "excavates" the ITE expressions out as far as possible toward the root of the AST, for processing in static analyses. """ if self._excavated is None: self._excavated = self._excavate_ite() # pylint:disable=attribute-defined-outside-init # we set the flag for the children so that we avoid re-excavating during # VSA backend evaluation (since the backend evaluation recursively works on # the excavated ASTs) self._excavated._excavated = self._excavated return self._excavated
python
{ "resource": "" }
q19078
FPS
train
def FPS(name, sort, explicit_name=None): """ Creates a floating-point symbol. :param name: The name of the symbol :param sort: The sort of the floating point :param explicit_name: If False, an identifier is appended to the name to ensure uniqueness. :return: An FP AST. """ n = _make_name(name, sort.length, False if explicit_name is None else explicit_name, prefix='FP_') return FP('FPS', (n, sort), variables={n}, symbolic=True, length=sort.length)
python
{ "resource": "" }
q19079
FP.to_fp
train
def to_fp(self, sort, rm=None): """ Convert this float to a different sort :param sort: The sort to convert to :param rm: Optional: The rounding mode to use :return: An FP AST """ if rm is None: rm = fp.RM.default() return fpToFP(rm, self, sort)
python
{ "resource": "" }
q19080
FP.val_to_bv
train
def val_to_bv(self, size, signed=True, rm=None): """ Convert this floating point value to an integer. :param size: The size of the bitvector to return :param signed: Optional: Whether the target integer is signed :param rm: Optional: The rounding mode to use :return: A bitvector whose value is the rounded version of this FP's value """ if rm is None: rm = fp.RM.default() op = fpToSBV if signed else fpToUBV return op(rm, self, size)
python
{ "resource": "" }
q19081
DiscreteStridedIntervalSet.cardinality
train
def cardinality(self): """ This is an over-approximation of the cardinality of this DSIS. :return: """ cardinality = 0 for si in self._si_set: cardinality += si.cardinality return cardinality
python
{ "resource": "" }
q19082
DiscreteStridedIntervalSet.collapse
train
def collapse(self): """ Collapse into a StridedInterval instance. :return: A new StridedInterval instance. """ if self.cardinality: r = None for si in self._si_set: r = r._union(si) if r is not None else si return r else: # This is an empty StridedInterval... return StridedInterval.empty(self._bits)
python
{ "resource": "" }
q19083
DiscreteStridedIntervalSet._union_with_si
train
def _union_with_si(self, si): """ Union with another StridedInterval. :param si: :return: """ dsis = self.copy() for si_ in dsis._si_set: if BoolResult.is_true(si_ == si): return dsis dsis._si_set.add(si) dsis._update_bounds(si) return dsis.normalize()
python
{ "resource": "" }
q19084
DiscreteStridedIntervalSet._union_with_dsis
train
def _union_with_dsis(self, dsis): """ Union with another DiscreteStridedIntervalSet. :param dsis: :return: """ copied = self.copy() for a in dsis._si_set: copied = copied.union(a) if isinstance(copied, DiscreteStridedIntervalSet): copied._update_bounds(dsis) return copied.normalize()
python
{ "resource": "" }
q19085
_expr_to_smtlib
train
def _expr_to_smtlib(e, daggify=True): """ Dump the symbol in its smt-format depending on its type :param e: symbol to dump :param daggify: The daggify parameter can be used to switch from a linear-size representation that uses ‘let’ operators to represent the formula as a dag or a simpler (but possibly exponential) representation that expands the formula as a tree :return string: smt-lib representation of the symbol """ if e.is_symbol(): return "(declare-fun %s %s)" % (e.symbol_name(), e.symbol_type().as_smtlib()) else: return "(assert %s)" % e.to_smtlib(daggify=daggify)
python
{ "resource": "" }
q19086
String.raw_to_bv
train
def raw_to_bv(self): """ A counterpart to FP.raw_to_bv - does nothing and returns itself. """ if self.symbolic: return BVS(next(iter(self.variables)).replace(self.STRING_TYPE_IDENTIFIER, self.GENERATED_BVS_IDENTIFIER), self.length) else: return BVV(ord(self.args[0]), self.length)
python
{ "resource": "" }
q19087
StrPrefixOf
train
def StrPrefixOf(prefix, input_string): """ Return True if the concrete value of the input_string starts with prefix otherwise false. :param prefix: prefix we want to check :param input_string: the string we want to check :return: True if the input_string starts with prefix else false """ return re.match(r'^' + prefix.value, input_string.value) is not None
python
{ "resource": "" }
q19088
CompositeFrontend._shared_solvers
train
def _shared_solvers(self, others): """ Returns a sequence of the solvers that self and others share. """ solvers_by_id = { id(s): s for s in self._solver_list } common_solvers = set(solvers_by_id.keys()) other_sets = [ { id(s) for s in cs._solver_list } for cs in others ] for o in other_sets: common_solvers &= o return [ solvers_by_id[s] for s in common_solvers ]
python
{ "resource": "" }
q19089
BV.chop
train
def chop(self, bits=1): """ Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits. :returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e. most significant) bits. """ s = len(self) if s % bits != 0: raise ValueError("expression length (%d) should be a multiple of 'bits' (%d)" % (len(self), bits)) elif s == bits: return [ self ] else: return list(reversed([ self[(n+1)*bits - 1:n*bits] for n in range(0, s // bits) ]))
python
{ "resource": "" }
q19090
BV.get_byte
train
def get_byte(self, index): """ Extracts a byte from a BV, where the index refers to the byte in a big-endian order :param index: the byte to extract :return: An 8-bit BV """ pos = self.size() // 8 - 1 - index return self[pos * 8 + 7 : pos * 8]
python
{ "resource": "" }
q19091
BV.get_bytes
train
def get_bytes(self, index, size): """ Extracts several bytes from a bitvector, where the index refers to the byte in a big-endian order :param index: the byte index at which to start extracting :param size: the number of bytes to extract :return: A BV of size ``size * 8`` """ pos = self.size() // 8 - 1 - index return self[pos * 8 + 7 : (pos - size + 1) * 8]
python
{ "resource": "" }
q19092
BV.val_to_fp
train
def val_to_fp(self, sort, signed=True, rm=None): """ Interpret this bitvector as an integer, and return the floating-point representation of that integer. :param sort: The sort of floating point value to return :param signed: Optional: whether this value is a signed integer :param rm: Optional: the rounding mode to use :return: An FP AST whose value is the same as this BV """ if rm is None: rm = fp.fp.RM.default() if sort is None: sort = fp.fp.FSort.from_size(self.length) op = fp.fpToFP if signed else fp.fpToFPUnsigned return op(rm, self, sort)
python
{ "resource": "" }
q19093
BV.raw_to_fp
train
def raw_to_fp(self): """ Interpret the bits of this bitvector as an IEEE754 floating point number. The inverse of this function is raw_to_bv. :return: An FP AST whose bit-pattern is the same as this BV """ sort = fp.fp.FSort.from_size(self.length) return fp.fpToFP(self, sort)
python
{ "resource": "" }
q19094
ModelCache.eval_ast
train
def eval_ast(self, ast): """Eval the ast, replacing symbols by their last value in the model. """ # If there was no last value, it was not constrained, so we can use # anything. new_ast = ast.replace_dict(self.replacements, leaf_operation=self._leaf_op) return backends.concrete.eval(new_ast, 1)[0]
python
{ "resource": "" }
q19095
ModelCache.eval_constraints
train
def eval_constraints(self, constraints): """Returns whether the constraints is satisfied trivially by using the last model.""" # eval_ast is concretizing symbols and evaluating them, this can raise # exceptions. try: return all(self.eval_ast(c) for c in constraints) except errors.ClaripyZeroDivisionError: return False
python
{ "resource": "" }
q19096
ModelCacheMixin.update
train
def update(self, other): """ Updates this cache mixin with results discovered by the other split off one. """ acceptable_models = [ m for m in other._models if set(m.model.keys()) == self.variables ] self._models.update(acceptable_models) self._eval_exhausted.update(other._eval_exhausted) self._max_exhausted.update(other._max_exhausted) self._min_exhausted.update(other._min_exhausted)
python
{ "resource": "" }
q19097
SeekpathKpath.kpath_from_seekpath
train
def kpath_from_seekpath(cls, seekpath, point_coords): r"""Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} } """ # convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)] # to our preferred representation [[l1, l2, l3], [l4, l5]] path = [[seekpath[0][0]]] for (k1, k2) in seekpath: if path[-1] and path[-1][-1] == k1: path[-1].append(k2) else: path.append([k1, k2]) # Rebuild kpoints dictionary skipping any positions not on path # (chain(*list) flattens nested list; set() removes duplicates.) kpoints = {p: point_coords[p] for p in set(chain(*path))} # Every path should include Gamma-point. Change the label to \Gamma assert 'GAMMA' in kpoints kpoints[r'\Gamma'] = kpoints.pop('GAMMA') path = [[label.replace('GAMMA', r'\Gamma') for label in subpath] for subpath in path] return {'kpoints': kpoints, 'path': path}
python
{ "resource": "" }
q19098
BradCrackKpath._get_bravais_lattice
train
def _get_bravais_lattice(spg_symbol, lattice_type, a, b, c, unique): """Get Bravais lattice symbol from symmetry data""" if lattice_type == 'triclinic': return('triclinic') elif lattice_type == 'monoclinic': if 'P' in spg_symbol: if unique == 0: return('mon_p_a') elif unique == 1: return('mon_p_b') elif unique == 2: return('mon_p_c') elif 'C' in spg_symbol: if unique == 0: return('mon_c_a') elif unique == 1: return('mon_c_b') elif unique == 2: return('mon_c_c') elif lattice_type == 'orthorhombic': if 'P' in spg_symbol: return('orth_p') elif 'A' in spg_symbol or 'C' in spg_symbol: if a > b: return('orth_c_a') elif b > a: return('orth_c_b') elif 'F' in spg_symbol: if (1/a**2 < 1/b**2 + 1/c**2 and 1/b**2 < 1/c**2 + 1/a**2 and 1/c**2 < 1/a**2 + 1/b**2): return('orth_f_1') elif 1/c**2 > 1/a**2 + 1/b**2: return('orth_f_2') elif 1/b**2 > 1/a**2 + 1/c**2: return('orth_f_3') elif 1/a**2 > 1/c**2 + 1/b**2: return('orth_f_4') elif 'I' in spg_symbol: if a > b and a > c: return('orth_i_a') elif b > a and b > c: return('orth_i_b') elif c > a and c > b: return('orth_i_c') elif lattice_type == 'tetragonal': if 'P' in spg_symbol: return('tet_p') elif 'I' in spg_symbol: if a > c: return('tet_i_a') else: return('tet_i_c') elif (lattice_type == 'trigonal' or lattice_type == 'hexagonal' or lattice_type == 'rhombohedral'): if 'R' in spg_symbol: if a > np.sqrt(2) * c: return('trig_r_a') else: return('trig_r_c') elif 'P' in spg_symbol: if unique == 0: return('trig_p_a') elif unique == 2: return('trig_p_c') elif lattice_type == "cubic": if 'P' in spg_symbol: return('cubic_p') elif 'I' in spg_symbol: return('cubic_i') elif 'F' in spg_symbol: return('cubic_f')
python
{ "resource": "" }
q19099
get_cached_colour
train
def get_cached_colour(element, orbital, colours=None, cache=None): """Get a colour for a particular elemental and orbital combination. If the element is not specified in the colours dictionary, the cache is checked. If this element-orbital combination has not been chached before, a new colour is drawn from the current matplotlib colour cycle and cached. The default cache is sumo.plotting.colour_cache. To reset this cache, use ``sumo.plotting.colour_cache.clear()``. Args: element (:obj:`str`): The element. orbital (:obj:`str`): The orbital. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. cache (:obj:`dict`, optional): Cache of colour values already assigned. The format is the same as the custom colours dict. If None, the module-level cache ``sumo.plotting.colour_cache`` is used. Returns: tuple: (colour, cache) """ if cache is None: cache = colour_cache def _get_colour_with_cache(element, orbital, cache, colour_series): """Return cached colour if available, or fetch and cache from cycle""" from itertools import chain if element in cache and orbital in cache[element]: return cache[element][orbital], cache else: # Iterate through colours to find one which is unused for colour in colour_series: # Iterate through cache to check if colour already used if colour not in chain(*[[col for _, col in orb.items()] for _, orb in cache.items()]): break else: raise Exception('Not enough colours available for orbitals! ' 'Try a different theme.') if element not in cache: cache[element] = {} cache[element].update({orbital: colour}) return colour, cache colour_series = matplotlib.rcParams['axes.prop_cycle'].by_key()['color'] if isinstance(colours, configparser.ConfigParser): try: return colours.get(element, orbital), cache except(configparser.NoSectionError, configparser.NoOptionError): return _get_colour_with_cache(element, orbital, cache, colour_series) elif isinstance(colours, dict): try: return colours[element][orbital] except KeyError: return _get_colour_with_cache(element, orbital, cache, colour_series) elif colours is None: return _get_colour_with_cache(element, orbital, cache, colour_series) else: raise TypeError('Argument "colours" should be dict, ' 'ConfigParser or None.')
python
{ "resource": "" }