query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Takes in a list of column headers and the Data object and returns a list of the mean values for each column. Use the builtin numpy functions to execute this calculation.
def mean(headers, data): column_matrix=data.get_data(headers) mean_values=column_matrix.mean(0) return mean_values
[ "def colAvg(table):\r\n \r\n # get the dimensions of the table\r\n rows = len(table)\r\n cols = len(table[0])\r\n \r\n # initialize list for column averages\r\n col_avg = []\r\n \r\n # for each columns...\r\n for col in range(cols):\r\n \r\n # ...initialize the sum\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stdev Takes in a list of column headers and the Data object and returns a list of the standard deviation for each specified column. Use the builtin numpy functions to execute this calculation.
def stdev(headers, data): column_matrix=data.get_data(headers) mean_values=column_matrix.std(0) std_values=mean_values.tolist() return std_values
[ "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def column_stdev(column_values, mean):\n\n try:\n stdev = math.sqrt(\n sum([(mean-x)**2 for x in column_values]) / le...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in a list of column headers and the Data object and returns a matrix with each column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1.
def normalize_columns_separately(headers, data): column_matrix=data.get_data(headers) column_max=column_matrix.max(1) column_min=column_matrix.min(1) range=column_max-column_min nomalized=(column_matrix-column_min)/range return nomalized
[ "def normalize_columns_together(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmax=column_matrix.max()\n\tprint \"The maximum:\t \", max\n\tmin=column_matrix.min()\n\tprint \"The minimum:\t \", min\n\trange=max-min\n\tprint \"range: \", range\n\tcolumn_matrix=column_matrix-min\n\tnormalized=column_matri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in a list of column headers and the Data object and returns a matrix with each entry normalized so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value is mapped to 1.
def normalize_columns_together(headers, data): column_matrix=data.get_data(headers) max=column_matrix.max() print "The maximum: ", max min=column_matrix.min() print "The minimum: ", min range=max-min print "range: ", range column_matrix=column_matrix-min normalized=column_matrix/range return normalized
[ "def normalize_columns_separately(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\trange=column_max-column_min\n\tnomalized=(column_matrix-column_min)/range\n\treturn nomalized", "def normalize01(data):\n maxi = np.amax(data, a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the numeric matrices with sorted columns
def sort(headers, data): # extension column_matrix=data.get_data(headers) # get raw matrix data for numeric values print "\n before sorting \n " print column_matrix column_matrix=column_matrix.tolist() column_array=np.asarray(column_matrix) column_array.sort(axis=0) print "\n \n done sorting here is your m...
[ "def sort_mat( m ):\n print(\"m\")\n print(m)\n m = sort_rows(m)\n print(m)\n m = sort_rows(m.T)\n print(m)\n return m", "def sort_matrix(matrix):\n tuples = zip(matrix.col, matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)", "def _sort_rows(matrix, num_ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_attr(attr, editor, clazz = None) Registers EDITOR as the editor for atrribute ATTR of class CLAZZ, or for any class if CLAZZ is None. EDITOR can be either a Tk widget subclass of editobj.editor.Editor, or None to hide the attribute. MRO is used in order to allow subclasses to use the editor registered for thei...
def register_attr(attr, editor, clazz = None): for_attr = _attr_editors.get(attr) if for_attr: for_attr[clazz] = editor else: _attr_editors[attr] = { clazz : editor }
[ "def register_children_attr(attr, insert = \"insert\", del_ = \"__delitem__\", clazz = None):\n \n if clazz: _children_attrs[clazz] = (attr, insert, del_)\n else: _children_attrs[None].append((attr, insert, del_))", "def register_on_edit(func, clazz):\n \n _on_edit[clazz] = func", "def attr(self, attr:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_children_attr(attr, insert = "insert", del_ = "__delitem__", clazz = None) Registers ATTR as an attribute that can act as the "content" or the "children" of an object of class CLAZZ (or any class if None). If ATTR is None, the object is used as its own list of children (automatically done for list / dict subcl...
def register_children_attr(attr, insert = "insert", del_ = "__delitem__", clazz = None): if clazz: _children_attrs[clazz] = (attr, insert, del_) else: _children_attrs[None].append((attr, insert, del_))
[ "def addChildren(self,**kwargs):\n self.__children.update(**kwargs)\n self.childrenParamsUpdate()", "def insert_child(self, obj, index, child):\n getattr(obj, self.children_member)[index:index] = [child]", "def register_attr(attr, editor, clazz = None):\n \n for_attr = _attr_editors.get(a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_method(method, clazz, args_editor) Registers METHOD as a method that must be displayed in EditObj for instance of CLAZZ. METHOD can be either a method name (a string), or a function (in this case, it is not a method, strictly speaking). ARGS_EDITOR are the editors used for entering the argument, e.g. use edito...
def register_method(method, clazz, *args_editor): methods = _methods.get(clazz) if methods: methods.append((method, args_editor)) else: _methods[clazz] = [(method, args_editor)]
[ "def Register_Method(self, method_name, method, ClassOrReg, **options):\n self.method_name.append(method_name)\n self.method.append(method)\n self.options.append(options)\n self.ClassOrReg.append(ClassOrReg)", "def register_method(self, method: str, cb: InvokeMethodCallable) -> None:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_available_children(children_codes, clazz) Register the CHILDREN_CODES that are proposed for addition in an instance of CLAZZ. If CHILDREN_CODES is a list of strings (Python code), EditObj will display a dialog box. If CHILDREN_CODES is a single string, no dialog box will be displayed, and this code will automa...
def register_available_children(children_codes, clazz): if isinstance(children_codes, list): try: _available_children[clazz].extend(children_codes) except: _available_children[clazz] = children_codes else: _available_children[clazz] = children_codes
[ "def addChildren(self,**kwargs):\n self.__children.update(**kwargs)\n self.childrenParamsUpdate()", "def _children(self, children):\n existing = self._immed_raw_children()\n if (existing is None) or (existing != children):\n for fn in self.__ch_cbs.values():\n self.__zk._run_async(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_values(attr, code_expressions) Registers CODE_EXPRESSIONS as a proposed value for ATTR.
def register_values(attr, code_expressions): code_expressions = map(unicodify, code_expressions) try: _values[attr].extend(code_expressions) except KeyError: _values[attr] = list(code_expressions)
[ "def add_expression_parser_magic_values(self, names: List[str], values: List[int]) -> None:\n\t\tif len(names) == 0 or len(values) == 0 or (not len(names) == len(values)):\n\t\t\treturn\n\n\t\tnames_buf = (ctypes.c_char_p * len(names))()\n\t\tfor i in range(0, len(names)):\n\t\t\tnames_buf[i] = names[i].encode('cha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_on_edit(func, clazz) Register FUNC as an "on_edit" event for CLAZZ. When an instance of CLAZZ is edited, FUNC is called with the instance and the editor Tkinter window as arguments.
def register_on_edit(func, clazz): _on_edit[clazz] = func
[ "def addEdit( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,\n cText=None,\n textListenerProc=None,\n cReadOnly=None,\n cMultiline=None,\n cAutoVScroll=None):\n self.addControl( \"com...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register_on_children_visible(func, clazz) Register FUNC as an "on_children_visible" event for CLAZZ. When the children of an instance of CLAZZ are shown or hidden, FUNC is called with the instance and the new visibility status (0 or 1) as arguments.
def register_on_children_visible(func, clazz): _on_children_visible[clazz] = func
[ "def dock_visibilty_changed_callback(self, visible):\n self.hidden = not visible", "def on_children_changed(self, callback, remove=False):\n self._children_changed_handlers.register_callback(callback, remove=remove)", "def GroupVisible(self, Group, Visible):", "def register_available_children(childr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method uses to check if the food name in our database or not. name It is the name of the food from the users. true if food in databases, false othewise
def findFood(self,name): name = name.lower() return dictfood.has_key(name)
[ "def check_name(self, name): \n self.cursor.execute(\"SELECT name FROM users WHERE name = ?\", (name.title(),))\n if len(self.cursor.fetchall()) > 0:\n return True\n return False", "def check_name(name):\n if name == \"\":\n print(\"Nom invalide: entrée vide\")\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IFieldWidget factory for LocationWidget.
def LocationFieldWidget(field, request): return FieldWidget(field, LocationWidget(request))
[ "def MapLocationFieldWidget(field, request):\n return FieldWidget(field, MapLocationWidget(request))", "def create_location(self, location):\n \"Does nothing\"", "def newLocation(self, **attrlinks):\n return Location(self, **attrlinks)", "def add_simple_widget(self, name, widget, label=None, valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save files like the dataset, mask and settings as pickle files so they can be loaded in the ``Aggregator``
def save_attributes_for_aggregator(self, paths): # These functions save the objects we will later access using the aggregator. They are saved via the `pickle` # module in Python, which serializes the data on to the hard-disk. with open(f"{paths.pickle_path}/dataset.pickle", "wb") as f: ...
[ "def save_datasets(self):\n file_prefix = self._load_datasets_from\n\n self._train_set.save_images(file_prefix + \"_training.pkl\")\n self._test_set.save_images(file_prefix + \"_testing.pkl\")", "def save(self):\n self.save_checkpoint()\n self.save_reconstructions()\n self.save_sampl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers new managers with the component manager. Managers are configured and setup before components.
def add_managers(self, managers: Union[List[Any], Tuple[Any]]): for m in self._flatten(managers): self.apply_configuration_defaults(m) self._managers.add(m)
[ "def _configure_manager(self):\n self._network_manager = BaseManager(\n self, resource_class=Network, uri_base=\"cloud_networks\",\n )\n self._load_balancer_pool_manager = LoadBalancerPoolManager(\n self, resource_class=LoadBalancerPool,\n uri_base=\"load_balanc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all components that are an instance of ``component_type``.
def get_components_by_type( self, component_type: Union[type, Tuple[type, ...]] ) -> List[Any]: return [c for c in self._components if isinstance(c, component_type)]
[ "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)", "def get_components(self, component_type):\n return self._components[component_type]", "def get_entities_by_component_type(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the component with name ``name``. Names are guaranteed to be unique.
def get_component(self, name: str) -> Any: for c in self._components: if c.name == name: return c raise ValueError(f"No component found with name {name}")
[ "def lookupComponent(self, name):\n for component in self.components:\n if component.name == name:\n return component\n\n return None", "def get(self, name):\n try:\n return self.__components[name]\n except KeyError:\n return None", "def get_component(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a mapping of component names to components held by the manager. Returns Dict[str, Any] A mapping of component names to components.
def list_components(self) -> Dict[str, Any]: return {c.name: c for c in self._components}
[ "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()", "def registered_components():\n return list(_components.keys())", "def _getComponentsInfo(self):\n result = {}\n et = ElementTree()\n components = self.agentCompleteConfig.listComponents_() + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Separately configure and set up the managers and components held by the component manager, in that order. The setup process involves applying default configurations and then calling the manager or component's setup method. This can result in new components as a side effect of setup because components themselves have ac...
def setup_components(self, builder: "Builder"): self._setup_components(builder, self._managers + self._components)
[ "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def init_component(self):\n\n pass", "def configureComponents(self, context):\n\n myComponents = self.components(context)\n\n aliases = {}\n for component in myComponen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all components that are an instance of ``component_type``.
def get_components_by_type( self, component_type: Union[type, Tuple[type, ...]] ) -> List[Any]: return self._manager.get_components_by_type(component_type)
[ "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]", "def get_components(self, component_type):\n return self._components[component_type]", "def get_entities_by_compo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a mapping of component names to components held by the manager. Returns Dict[str, Any] A dictionary mapping component names to components.
def list_components(self) -> Dict[str, Any]: return self._manager.list_components()
[ "def list_components(self) -> Dict[str, Any]:\n return {c.name: c for c in self._components}", "def _getComponentsInfo(self):\n result = {}\n et = ElementTree()\n components = self.agentCompleteConfig.listComponents_() + \\\n self.agentCompleteConfig.listWebapps_()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\ creates gaussian kernel with side length l and a sigma of sig
def gkern(l=5, sig=1.): ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l) xx, yy = np.meshgrid(ax, ax) kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig)) return kernel
[ "def gaussian_kernel(l=5, sig=1.):\n\n ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)\n\n kernel = np.exp(-0.5 * np.square(ax) / np.square(sig))\n\n return kernel / np.sum(kernel)", "def gaussian2d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy = np.meshgrid(ax, ax)\n\n ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the histogram of the input image
def compute_histogram(self, image): hist = [0] * 256 x, y = image.shape[:2] #print(image.shape) for i in range(x): for j in range(y): hist[image[i, j]] += 1 return hist
[ "def histogram(img):\n\n return np.histogram(img, bins=64, range=[0,255])[0]", "def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram", "def histogram() -> None:\n\n\tcol_h, gray_h = analyser.histogr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Comptues the binary image of the the input image based on histogram analysis and thresholding take as input
def binarize(self, image, threshold): bin_img = image.copy() for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] >= threshold: bin_img[i, j] = 0 else: bin_img[i, j] = 255 return bin_img
[ "def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld whit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append new animation. If \p _widget exists in animations, then it target will be changed
def _addLinearAnimation(self, _widget, _target): self._linear_animations[_widget] = _target
[ "def _addPulseAnimation(self, _widget, _target):\n self._pulse_animations[_widget] = _target", "def add_animation(self, animation, key):\n\t\tif animation.from_value == animation.to_value:\n\t\t\treturn\n\t\tanimation.attribute = key\n\t\tanimation.layer = self\n\t\tself.animations[key] = animation", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append new animation. If \p _widget exists in animations, then it target will be changed
def _addPulseAnimation(self, _widget, _target): self._pulse_animations[_widget] = _target
[ "def _addLinearAnimation(self, _widget, _target):\n self._linear_animations[_widget] = _target", "def add_animation(self, animation, key):\n\t\tif animation.from_value == animation.to_value:\n\t\t\treturn\n\t\tanimation.attribute = key\n\t\tanimation.layer = self\n\t\tself.animations[key] = animation", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates input icons relative to mouse state
def _updateOnMouseState(self, state): x = state.X.abs y = state.Y.abs mscale = self.mouse_icon.getScale() if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width: x = x - mscale[0] - 10 else: x += self.mouse_offset ...
[ "def update_button_hover_status(self):\n for button in self.playing_buttons:\n button.update(self.mousePos)", "def update_mouse_click(mouse_pos):\n global cur_slider_ix\n global cur_control_ix\n global mouse_pressed\n x = (mouse_pos[0] - sliders_x)\n y = (mouse_pos[1] - sliders_y)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This funtion should perform the job of projecting the input pointcloud onto the frame of an image captured by a camera with camera matrix as given, of dimensions as given, in pixels. points is an 3 x N array where the ith entry is an (x, y, z) point in 3D space, in the reference frame of the depth camera. This correspo...
def project_points(points, cam_matrix, trans, rot): # STEP 1: Transform pointcloud into new reference frame. points = np.dot(rot, points) + trans[:, None] # STEP 2: Project new pointcloud onto image frame using K matrix. # gives a 3 x N array of image plane coordinates in homogenous coordinates. h...
[ "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to purchase all goods listed in the dict order, depositing them in the Player's cargo holds. Does not care about max cargo. dryRun simply checks if the purchase is possible. remaining controls if the order should be 100% purchased (True), or only purchase goods the player lacks Returns False if some goods are ...
def buyCargo(self, order, dryRun=False, remaining=True): ply = self.window.playerShip shop = self.planet.goods toBuy = order.copy() for mat in toBuy: if remaining and mat in ply.cargo: toBuy[mat] -= ply.cargo[mat].quantity if toBuy[mat] > 0: if mat not in shop: return False if shop[mat]*toBuy[...
[ "def purchase(game, player, purchase_order, end_game=False):\n ensure_action(game, 'purchase', player)\n if sum(purchase_order.values()) > 3:\n raise GamePlayNotAllowedError('can only purchase at most three shares')\n purchases = {}\n subtotal = 0\n for hotel_name, shares in purchase_order.ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize with a QPrinter object and a list of pages. pageList may be a list of twotuples (num, page). Otherwise, the pages are numbered from 1 in the progress message. The pages are copied.
def __init__(self, printer, pageList, parent=None): super().__init__(parent) self.printer = printer self.setPageList(pageList)
[ "def setPageList(self, pageList):\n self.pageList = []\n for n, page in enumerate(pageList, 1):\n if isinstance(page, tuple):\n pageNum, page = page\n else:\n pageNum = n\n page = page.copy()\n # set zoom to 1.0 so computations ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the pagelist to print. pageList may be a list of twotuples (num, page). Otherwise, the pages are numbered from 1 in the progress message. The pages are copied.
def setPageList(self, pageList): self.pageList = [] for n, page in enumerate(pageList, 1): if isinstance(page, tuple): pageNum, page = page else: pageNum = n page = page.copy() # set zoom to 1.0 so computations based on geom...
[ "def setPageSequence(self, pageSequenceList):\r\n\r\n for index in range(self.pageCount() - 1, -1, -1):\r\n page = self.page(index)\r\n if page:\r\n self.removePage(page)\r\n\r\n count = 0\r\n for pageTitle in pageSequenceList:\r\n self.insertPage...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Paint the pages to the printer in the background.
def work(self): p = self.printer p.setFullPage(True) painter = QPainter(p) for n, (num, page) in enumerate(self.pageList): if self.isInterruptionRequested(): self.aborted = True return p.abort() self.progress.emit(num, n+1, len(self...
[ "def PaintPageBreak():\n pass", "def draw(self):\n while not self.quitFlag:\n self.populateLines()\n Page.draw(self)", "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes ourselves with the print job and optional parent widget.
def __init__(self, job, parent=None): super().__init__(parent) self._job = job job.progress.connect(self.showProgress) job.finished.connect(self.jobFinished) self.canceled.connect(job.requestInterruption) self.setMinimumDuration(0) self.setRange(0, len(job.pageLis...
[ "def __init__(self, printer, pageList, parent=None):\n super().__init__(parent)\n self.printer = printer\n self.setPageList(pageList)", "def __init__(self, *args, **kwargs):\n _richtext.RichTextPrintout_swiginit(self,_richtext.new_RichTextPrintout(*args, **kwargs))", "def __init__(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method will enable delivery confirmations and schedule the first message to be sent to RabbitMQ
def start_publishing(self): print(f"{self._connection_param}: Issuing consumer related RPC commands") # self._channel.confirm_delivery(self.on_delivery_confirmation) self.schedule_next_message(self.SLOW_SEND)
[ "def schedule_next_message(self):\r\n if self._stopping:\r\n return\r\n LOGGER.info('Scheduling next message for %0.1f seconds',\r\n self.PUBLISH_INTERVAL)\r\n self._connection.add_timeout(self.PUBLISH_INTERVAL,\r\n self.publish_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Computes the chisquare value of the sample data Notes
def _chisquare_value(self): x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 / self.expected) return x2
[ "def compute_chisq(self):\n import scipy.stats as stats\n\n flux = self.table[\"flux\"]\n yexp = np.mean(flux)\n yobs = flux.data\n chi2, pval = stats.chisquare(yobs, yexp)\n return chi2, pval", "def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Finds the pvalue of the chisquare statistic. Notes
def _p_value(self): pval = chi2.sf(self.chi_square, self.degrees_of_freedom) return pval
[ "def _chisquare_value(self):\n x2 = np.sum((np.absolute(self.observed - self.expected) - (0.5 * self.continuity_correction)) ** 2 /\n self.expected)\n\n return x2", "def compute_chisq(self):\n import scipy.stats as stats\n\n flux = self.table[\"flux\"]\n yexp ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BibTeX comment explaining error
def bibtex(self): return "@comment{%(id)s: %(message)s}" % \ {'id': self.id, 'message': self.message}
[ "def DocComment(self) -> bool:", "def Comment(text=None): # real signature unknown; restored from __doc__\n pass", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def comment(self, content):\n pass", "def Comment(self) -> CodeComme...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Corrects the BibTeX key because the MR API cannot get its act together
def correct_key(goodkey,code): db = pybtex.database.parse_string(code,"bibtex") keys = [key for key in db.entries.keys()] badkey = keys[0] return code.replace(badkey,goodkey)
[ "def bibtex_key(paper):\n\n # combine the first few capitalized words of the title\n nr_words = 4\n trunc_len = 5\n min_len = 4\n title = [\n word[0:trunc_len]\n for word in rm_special_char(paper[\"title\"]).split()\n if len(word) >= min_len\n ][0:nr_words]\n\n # conf name ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetches citations for keys in key_list into a dictionary indexed by key
def mr2bib_dict(key_list): keys = [] d = {} # validate keys for key in key_list: if is_valid(key): keys.append(key) else: d[key] = ReferenceErrorInfo("Invalid Mathematical Reviews identifier", key) if len(keys) == 0: return d # make the api call entries = {} for key in keys: ...
[ "def get_titles_dict(name_url_list):\n\tbib_dict = {}\n\t\n\tfor name_url in name_url_list:\n\t\ttitles = get_author_titles(name_url[1])\n\t\tbib_dict[name_url] = titles\n\n\treturn bib_dict", "def _set_of_course_keys(course_list, key_attribute_name='id'):\n return {getattr(c, key_attribute_name) for c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the rates, add noise based on numreg
def add_white_noise(rates, numreg): rtemp = rates.copy().getA() sdrates = np.sqrt(rtemp * (1 - rtemp) / numreg) + 1e-10 noise = np.random.normal(0, sdrates) rtemp += noise return np.matrix(rtemp)
[ "def noise(self, freq: int, /) -> None:", "def add_uniform_noise(rates, percent):\n raise 0 < percent < 1 or AssertionError\n rtemp = rates.copy().getA()\n noise = np.random.uniform(1 - percent, 1 + percent, np.shape(rtemp))\n rtemp = rtemp * noise\n return np.matrix(rtemp)", "def add_noise(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the rates, sample new rate uniformly between ((1percent)rates, (1+percent)rates)
def add_uniform_noise(rates, percent): raise 0 < percent < 1 or AssertionError rtemp = rates.copy().getA() noise = np.random.uniform(1 - percent, 1 + percent, np.shape(rtemp)) rtemp = rtemp * noise return np.matrix(rtemp)
[ "def create_same_sample_rate(data1, sample_rate1, data2, sample_rate2):\n\n if sample_rate1 < sample_rate2:\n data1 = resampling(data1, sample_rate2, sample_rate1)\n sample_rate1 = sample_rate2\n elif sample_rate1 > sample_rate2:\n data2 = resampling(data2, sample_rate1, sample_rate2)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function runs the estimation procedure for the first time slice for given number of demes and repeats the process reps number of times. The values of mean pop size and mig rates is preset but will be changed in future versions. The third parameter here controls the noise amount in the estimates of coalescent inten...
def run_Over_Grid(numdemes = 2, reps = 10, numreg = 100, t = 1000): Nmean = 2000 Nsd = 100 migMean = 0.0001 migsd = 1e-06 ndc2 = numdemes * (numdemes - 1) / 2 rows = ndc2 + numdemes + 1 I = np.matrix(np.eye(rows)) Ck = I[0:rows - 1, :] Dk = I[rows - 1, :] output = [] for r in...
[ "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the true and the estimated parameter values this function computes the error in the parameter estimates. The order controls the norm used, by default its the maximum so sup norm
def compute_error(true, estimate, order = np.inf): print true print estimate errs = [] for i in xrange(len(true)): estError = abs(true[i] - estimate[i]) for j in xrange(len(true[i])): if true[i][j] != 0: estError[j] = estError[j] / true[i][j] errs.app...
[ "def convergence_order(N, err):\n import numpy as np\n\n if len(N) != len(err):\n raise ValueError('Convergence order args do not have same length')\n\n A = np.ones([len(err), 2])\n B = np.ones([len(err), 1])\n # ERR = A*N + B\n for i in range( len(N) ) :\n A[i,0] = np.log(N[i])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function processes the timestring from PSMC and converts this to list of time slice lengths
def process_time_string(timestr): timestr = timestr.strip() toks = timestr.split('+') timeslices = [] for t in toks: tm = t.strip() mobj = re.search('\\*', tm) if mobj == None: timeslices += [int(tm)] else: tms = tm.split('*') timeslice...
[ "def rttm2simple(rttm:list) -> list:\n output = list()\n for line in rttm:\n _, _, _, start, duration, _, _, label, _, _ = line.split()\n end = float(start)+float(duration)\n output.append((f\"{label}\", float(start), end))\n return output", "def _parse_ps_output(string):\n t = st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The coalescence matrix C as a vectorization of the upper triangular matrix and npop, the number of demes.
def mkCoalMatrix(C, npop): C = np.array(C).flatten() M = np.zeros((npop, npop)) cnt = 0 for i in range(npop): for j in range(i, npop): M[i, j] = C[cnt] if i != j: M[j, i] = M[i, j] cnt += 1 return M
[ "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def c_matrix(x1,x2,x3):\n\tC ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The rates obtained from PSMC are the prob of coal in that timeslice, not the prob of coal in that timeslice AND not coalescing in any other timeslice. We need the conditional probability of coal in that timeslice given lines have not coalesced in any of the previous timeslices. This function converts the PSMC values in...
def modify_rates(self): if self.modified: print 'Already Modified Probabilities' elif self.varGiven: print 'You must enter the conditional coalescent probabilties if you want to supply variance of' print 'the coalescent probabilities. Required since we cannot compute ...
[ "def CSM2CRPEps(CSM, eps):\n CRP = np.zeros(CSM.shape)\n CRP[CSM <= eps] = 1\n return CRP", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def pc_nproduced_avg(self):\n return _mack_sdr_rossi_swig.BCH_decoder_ATSC_sptr_pc_nproduced_avg(self)", "def _get_precip_probabili...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function collapses the time slices and the coalescent prbabilities using the time string
def collapse_using_timeStr(self): if self.modified == True: raise Exception('Probabilities already modified.\nCollapsing after modification will lead to incorrect results.') timeUnits = np.array(process_time_string(self.timeStr)) if len(self.timeslices) + 1 == np.sum(timeUnits): ...
[ "def clean_prep_break(dataframe):\n times = dataframe['test_time']\n drop_labels = ['Mode', 'Rest', 'Charge CC', 'Charge C-Rate',\n 'Discharge CC', 'Discharge C-Rate', 'TestTime']\n drop_index = [i for i, time_0 in enumerate(times) if time_0 in drop_labels]\n index = [i for i, time_0 i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Authenticate a request. Returns a `User` if a valid token has been supplied using HTTP Basic authentication. Otherwise returns `None`.
def authenticate(self, request): auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b"basic": return None if len(auth) == 1: raise AuthenticationFailed( "Invalid Basic authorization header. No credentials provided." ...
[ "def authenticateRequest(request, storeSessionCookie=False):\n if SESSION_KEY in request.session:\n user = ezidapp.models.getUserById(request.session[SESSION_KEY])\n if user != None and user.loginEnabled:\n return user\n else:\n return None\n elif \"HTTP_AUTHORIZATIO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do not enforce CSRF.
def enforce_csrf(self, request): return # To not perform the csrf check previously happening
[ "def enforce_csrf(self, request):\n return", "def csrf_protect():\n if request.method == 'POST':\n token = session.pop('_csrf_token', None)\n if not token or token != request.form.get('_csrf_token'):\n abort(403)", "def process_request(self, req):\n attr = \"_dont_enfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all files below the given folder that match the pattern.
def _list_files(folder, pattern): for root, folders, files in os.walk(folder): for filename in files: if fnmatch.fnmatch(filename, pattern): yield os.path.join(root, filename)
[ "def list_files(folder, pattern, full_path=False):\n if not folder:\n folder = \".\"\n folder = os.path.abspath(folder)\n fpaths = os.listdir(folder)\n fpattern = re.compile(pattern)\n file_list = list(filter(fpattern.search, fpaths))\n if full_path:\n file_list = [folder + \"/\" + f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of files changed for this pull request / push. If running on a public CI like Travis or Circle this is used to only run tests/lint for changed files.
def _get_changed_files(): if not ci_diff_helper: return None try: config = ci_diff_helper.get_config() except OSError: # Not on CI. return None changed_files = ci_diff_helper.get_changed_files('HEAD', config.base) changed_files = set([ './{}'.format(filename) for ...
[ "def changed_files(self):\n commits = ['-r {}'.format(c) for c in self.commits]\n command = [self.vcs, 'diff', '--stat'] + commits\n result = _execute(' '.join(command))\n lines = result.strip().split('\\n')[:-1]\n files = [\n line.split('|')[0].strip()\n for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filers the list of sample directories to only include directories that contain files in the list of changed files.
def _filter_samples(sample_dirs, changed_files): result = [] for sample_dir in sample_dirs: for changed_file in changed_files: if changed_file.startswith(sample_dir): result.append(sample_dir) return list(set(result))
[ "def get_list_of_copied_sample_files(self, repo, changeset_revision, dir):\n deleted_sample_files = []\n sample_files = []\n for changeset in hg_util.reversed_upper_bounded_changelog(repo, changeset_revision):\n changeset_ctx = repo.changectx(changeset)\n for ctx_file in c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines all import names that should be considered "local". This is used when running the linter to insure that import order is properly checked.
def _determine_local_import_names(start_dir): file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] return [ basename for basename, extension in file_ext_pairs if extension == '.py' or os.path.isdir( os.path.join(start_dir, basename)) and...
[ "def _patch_import_nolocal(name, globals=None, locals=None, fromlist=(), level=0):\n if name in ('sascfg_personal'):\n raise ImportError\n else:\n return _real_import(name, globals=globals, locals=locals, fromlist=fromlist, level=level)", "def test_localImportStar(self):\r\n self.flakes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Installs the App Engine SDK, if needed.
def _setup_appengine_sdk(session): session.env['GAE_SDK_PATH'] = os.path.join(_GAE_ROOT, 'google_appengine') session.run('gcp-devrel-py-tools', 'download-appengine-sdk', _GAE_ROOT)
[ "def load_sdk():\n # Try to import the appengine code from the system path.\n try:\n from google.appengine.api import apiproxy_stub_map\n except ImportError, e:\n # Hack to fix reports of import errors on Ubuntu 9.10.\n if 'google' in sys.modules:\n del sys.modules['google']\n # Not on the syste...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all sample directories that do not have tests.
def missing_tests(session): print('The following samples do not have tests:') for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES): print('* {}'.format(sample))
[ "def file_list_emptydirs(load):\n # TODO - implement this\n _init()\n\n return []", "def emptydirs(ctx, checkdir, relpath):\n if relpath:\n basepath = os.getcwd()\n for i in emptydirs_yield(checkdir):\n click.echo(os.path.relpath(i, basepath))\n else:\n for i in empt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Re)generates the readme for a sample.
def readmegen(session, sample): session.install('jinja2', 'pyyaml') if os.path.exists(os.path.join(sample, 'requirements.txt')): session.install('-r', os.path.join(sample, 'requirements.txt')) in_file = os.path.join(sample, 'README.rst.in') session.run('python', 'scripts/readme-gen/readme_gen....
[ "def readme_md(cls):\n\n template = Helpers.File(Settings.readme_me_template).read()\n\n template = Helpers.Regex(\n template, r\"%%version%%\", replace_with=Settings.version\n ).replace()\n template = Helpers.Regex(\n template, r\"%%lenHosts%%\", replace_with=forma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a paranoid_pb2.TestResultsEntry protobuf ready for the checks. The created paranoid_pb2.TestResultsEntry is appropriate to be used on tests and have the paranoid_pb2.TestResultsEntry.result filled by the Check function (i.e., set as weak or not).
def _CreateTestResult(self) -> paranoid_pb2.TestResultsEntry: if self.severity is None: raise KeyError("Please specify self.severity for %s." % self.check_name) return paranoid_pb2.TestResultsEntry( severity=self.severity, test_name=self.check_name, result=False)
[ "def SetTestResult(test_info: paranoid_pb2.TestInfo,\n test_result: paranoid_pb2.TestResultsEntry):\n if not test_info.paranoid_lib_version:\n # Stores version value in test_info. As checks can be updated and become\n # stronger, this attribute can be useful to know when it makes sense to\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the 'program'. 'program' The path to the program to run. 'arguments' A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'stdin' Content of standard input for the program. 'context' A 'Context' giving runtime parameters to the test. 'result' A 'Result' object....
def RunProgram(self, program, arguments, stdin, context, result): # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): ...
[ "def RunProgram(self, program, arguments, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = self.stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore a database from a backup file. 'database' A database specification. 'backupfile' A backup file name. 'arguments' A list of the arguments to the GBAK without backup file name and database location. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modif...
def RestoreDatabase(self, database, backupfile, arguments, result): self.RunProgram("\""+self.__context["gbak_path"]+"\"", [ self.__context["gbak_path"] ] + [ "-C ", backupfile ] + arguments + [ database ], "", self.__context, result)
[ "def restore_database_snapshot(*args):\n return _ida_kernwin.restore_database_snapshot(*args)", "def restore(context, backup_id, restore_location):\n return AGENT.execute_restore(context, backup_id, restore_location)", "def restore(self, filename, dbname=None, username=None):\n\n # Set default argume...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run an ISQL script. 'database' A database specification. 'script' An ISQL script. 'arguments' A list of the arguments to the ISQL without database location. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other th...
def RunScript(self, database, script, arguments, result): self.RunProgram("\""+self.__context["isql_path"]+"\"", [ self.__context["isql_path"] ] + [ database ] + arguments, script, self.__context, result)
[ "def execute(self, script):\n if not isinstance(script, Script):\n raise ValueError(\"Expected script to be an instance of Script\")\n scriptString = script.scriptString\n if script.scriptType == \"dml\":\n if scriptString.endswith(\".dml\"):\n if scriptStri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run an ISQL script. 'script' An (optional) GSEC script. 'arguments' A list of the arguments to the GSEC without ISC4 database location and sysdba username and password. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outco...
def RunGsec(self, script, arguments, result): try: self.RunProgram("\""+self.__context["gsec_path"]+"\"", [ self.__context["gsec_path"], "-database", self.__context["server_location"]+ self.__context["isc4_path"], "-user", "SYSDBA", "-password", "masterkey" ]+arguments, ...
[ "def RunScript(self, database, script, arguments, result):\n\n self.RunProgram(\"\\\"\"+self.__context[\"isql_path\"]+\"\\\"\",\n [ self.__context[\"isql_path\"] ] + [ database ] + arguments,\n script, self.__context, result)", "def execute_script(self, script,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the 'program'. 'program' The path to the program to run. 'arguments' A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'context' A 'Context' giving runtime parameters to the test. 'result' A 'Result' object. The outcome will be 'Result.PASS' when this method...
def RunProgram(self, program, arguments, context, result): # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = self.stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): ...
[ "def RunProgram(self, program, arguments, stdin, context, result):\n\n # Construct the environment.\n environment = self.MakeEnvironment(context)\n e_stdin = stdin\n c = {}\n for pair in context.items():\n c[pair[0]] = pair[1]\n for substitution in c.keys():\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform substitutions on a body of text. returns The string 'text', processed with the substitutions configured for this test instance.
def __PerformSubstitutions(self, text): for substitution in self.substitutions: pattern, replacement = self.SplitValue(substitution) text = re.compile(pattern,re.M).sub(replacement, text) return text
[ "def process_text(self):\n emit(latex_substitutions.process_text(self.text, self.language, \"TEXT\", packages))", "def transform(self, text):\n return re.sub(self.regex, self.repl, text)", "def postprocess(\n self,\n text: 'str',\n ) -> 'str':", "def translate_text(target, text):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For level 3 you will be able to use the previous methods, 'level 1 stay' and 'level 2 switch'. The method 'level 3 monte carlo' will be called for 2000 rounds (4000 times). The following conditions must be
def level_3_monte_carlo(self, revealed): # Write your code here if self.seed < 1000: return self.level_1_stay(revealed) return self.level_2_switch(revealed)
[ "def method3_1(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n first_sync = True\r\n last_sync_level = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n c2 = condition_C2(new_bad_twin)\r\n c3 = condition_C3(new_bad_twin)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get articles for a gives news source
def for_source(source, articles=None): if not articles: articles = load_articles(nl.read_data()) source_arts = [a for a in articles if a.source == source] for art in source_arts: yield art
[ "def fetch_news(self):\n\n articles = []\n\n # the classifier for prediction of political attributes\n\n for source in self.sources:\n\n try:\n url = None\n\n if source is 'nachrichtenleicht':\n url = 'http://www.nachrichtenleicht.de/n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Huber function. An analytic function that is quadratic around its minimum n and linear in its tails. Its minimum is at offset. Quadratic between offsetdelta and offset + delta and linear outside.
def huber(x, offset, delta): i = np.abs(x - offset) < delta return (x-offset)**2/2 * i + (1 - i)*delta*(np.abs(x-offset) - delta/2)
[ "def bernstein(u,n,i):\n b = np.zeros(len(u)) \n\n for j in range(0,len(u)):\n b[j] = (u[j]**i)*(1-u[j])**(n-i)\n \n return b", "def basis(n, potential):\n x0 = potential[0]\n x1 = potential[1]\n\n def func(x):\n if x0 < x < x1:\n return(np.sin(np.pi*(x-x0)*n/(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds founder to the project model form, saves the project in the database, calls the generate_matches() function to find & save projectuser matches, and redirects to the newly created project.
def form_valid(self, form): form.instance.founder = self.request.user print('Project Create user:', self.request.user) form.save() tc_lib.generate_user_matches(form) return super(ProjectCreate, self).form_valid(form)
[ "def post_project(request):\n if request.method == \"POST\":\n form = AddProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save(commit=False)\n project.profile = request.user\n project.save()\n\n return redirect(\"index\")\n e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create Glue Dev Endpoint
def create_dev_endpoint(self): self.dev_endpoint = self.glue_engine.create_dev_endpoint( EndpointName=self.dev_endpoint_name, RoleArn=self.dev_endpoint_role, PublicKey=self.dev_endpoint_pub_rsa, NumberOfNodes=2, ExtraPythonLibsS3Path=self.python_libra...
[ "def create_endpoint(self, context, endpoint_values):", "def create_endpoint(EndpointName=None, EndpointConfigName=None, Tags=None):\n pass", "def create_dev_endpoint(self, EndpointName: str, RoleArn: str, SecurityGroupIds: List = None, SubnetId: str = None, PublicKey: str = None, PublicKeys: List = None, Nu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete Glue Dev Endpoint
def delete_dev_endpoint(self): self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)
[ "def delete_endpoint(EndpointName=None):\n pass", "def delete_endpoint_and_config():\n env.client().delete_endpoint(EndpointName=env.setting('model_name'))\n env.client().delete_endpoint_config(EndpointConfigName=env.setting('model_name'))", "def delete_dev_endpoint(self, EndpointName: str) -> Dict:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to Glue Dev Endpoint
def connect_dev_endpoint(self): done = False while not done: endpoint = self.glue_engine.get_dev_endpoint(EndpointName=self.dev_endpoint_name) status = endpoint["DevEndpoint"]["Status"] done = status == "READY" if status == "PROVISIONING": ...
[ "def create_dev_endpoint(self):\n\n self.dev_endpoint = self.glue_engine.create_dev_endpoint(\n EndpointName=self.dev_endpoint_name,\n RoleArn=self.dev_endpoint_role,\n PublicKey=self.dev_endpoint_pub_rsa,\n NumberOfNodes=2,\n ExtraPythonLibsS3Path=self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method support to config the advance option of zd syslog feature
def _set_advance_syslog(zd, **kwargs): xlocs = LOCATOR_CFG_SYSTEM_NETWORKMGMT adv_opt = ['zd_facility_name', 'zd_priority_level', 'ap_facility_name', 'ap_priority_level'] adv_cfg = {'pause': 1} adv_cfg.update(kwargs) if zd.s.is_element_present(xlocs['syslog_advanced_setting_collapse']): ...
[ "def get_enable_syslog(self) -> Union[bool, None]:\n # read the original value passed by the command\n enable_syslog = self.raw_param.get(\"enable_syslog\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_syslog", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the country code and related option
def set_country_code(zd, option, **kwargs): cfg_option = {'country_code': '', 'channel_optimization': '', 'channel_mode':''} cfg_option.update(option) xloc = LOCATOR_CFG_SYSTEM_COUNTRY_CODE xloc_map = { 'country_code': xloc['country_code_listbox'], ...
[ "def domain_settings_set_country(self, country):\n return self._request('domain/settings/set_country', inspect_args_func(inspect.currentframe()))", "def country_code(self, country_code):\n \n self._country_code = country_code", "def country(self, country):\n self._country = country",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clear and reload the menu with a new set of options. valueList list of new options value initial value to set the optionmenu's menubutton to
def SetMenu(self,valueList,value=None): self['menu'].delete(0,'end') for item in valueList: self['menu'].add_command(label=item, command=_setit(self.variable,item,self.command)) if value: self.variable.set(value)
[ "def UpdateDictDropDown(self,NewList) :\n\t\tself.AvailDictsMenu['menu'].delete(0,'end')\n\t\tif (NewList is not None) and (len(NewList) > 0) :\n\t\t\tself.AvailDictsVar.set(str(NewList[0][0]))\n\t\t\tfor choice,readable in NewList :\n\t\t\t\tchoicestr = '%s: %s'%(choice,readable)\n\t\t\t\tself.AvailDictsMenu['menu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodings for "Embarked" column 2 == "S" == Southampton == 644 people 0 == "C" == Cherbourg == 168 people 1 == "Q" == Queenstown == 77 people 3 == "Unknown" == 2 people 177 records missing age values set to the average age Missing embark_towns are set to "Other" Encodings for "Class" First class == 0 Second class == 1 ...
def prepare_titanic_data(df): df.embark_town.fillna('Other', inplace=True) # Drop deck and embarked_town df.drop(columns=['deck', 'embark_town'], inplace=True) # Encoding: Objects (Categorical Variables) to Numeric # Use sklearn's LabelEncoder encoder = LabelEncoder() # Set Unknown and e...
[ "def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n for i in range(len(feat_info)):\n missing = feat_info.iloc[i]['missing_or_unknown']\n missing = missing.replace('[','')\n missing = missing.replace(']','...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
0 == 'setosa' 1 == 'versicolor' 2 == 'virginica' This function will encode the species by default, but can optionally show the species name as a string when the second argument is False. prepare_iris_data(df) returns encoded species name prepare_iris_data(df, False) returns species name
def prepare_iris_data(df, encode=True): # Drop primary/foreign keys df = df.drop(columns=["measurement_id", "species_id"]) # Rename "species_name" to species df = df.rename(columns={"species_name": "species"}) if(encode): encoder = LabelEncoder() encoder.fit(df.species) ...
[ "def iris(species=None):\n # get subset or full data\n if species is not None:\n data = DATA.loc[DATA.species == species, :]\n else:\n data = DATA\n\n # convert to JSON and return to endpoint\n sdata = data.to_json(orient=\"index\")\n jdata = json.loads(sdata)\n return jdata", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the snapchat IDs that have already been downloaded and returns them in a set.
def get_downloaded(): result = set() for name in os.listdir(PATH): filename, ext = name.split('.') if ext not in EXTENSIONS: continue ts, username, id = filename.split('+') result.add(id) return result
[ "def ids(self):\n return set(self._ids)", "def get_id_set(self):\n s = set()\n for player in Player.select(Player.player_id):\n s.add(player.player_id)\n return s", "def find_new_uids(cfg, imap):\n uids = check_imap_response(imap.uid(\"SEARCH\", None,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download a specific snap, given output from s.get_snaps().
def download(s, snap): id = snap['id'] name = snap['sender'] ts = str(snap['sent']).replace(':', '-') result = s.get_media(id) if not result: return False ext = s.is_media(result) filename = '{}+{}+{}.{}'.format(ts, name, id, ext) path = PATH + filename with open(path, 'w...
[ "def download_snaps(s):\n\n existing = get_downloaded()\n\n snaps = s.get_snaps()\n for snap in snaps:\n id = snap['id']\n if id[-1] == 's' or id in existing:\n print 'Skipping:', id\n continue\n\n result = download(s, snap)\n\n if not result:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download all snaps that haven't already been downloaded.
def download_snaps(s): existing = get_downloaded() snaps = s.get_snaps() for snap in snaps: id = snap['id'] if id[-1] == 's' or id in existing: print 'Skipping:', id continue result = download(s, snap) if not result: print 'FAILED:', id...
[ "def download_all_imgs(self):\n # get all images that haven't been downloaded\n imgs = self.db.images.find({'downloaded': False})\n for img in imgs:\n success = self.download_img(img['url'])\n if success:\n self.db.images.update({'_id': img['_id']},\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encodes a native Python value in a way that the API expects. Encodes lists and dicts to JSON and boolean values to 'true' or 'false'.
def api_encode(value): if type(value) in (dict, list): return json_encode(value) elif type(value) == bool: return str(value).lower() return value
[ "def encode(self, value: Any) -> Any:", "def JsonEncode(py_value):\n return JSON_ENCODER.encode(py_value)", "def encode(self, value):\r\n pass", "def encode(self, value):\r\n # type: (Union[Tuple, List, OrderedDict, Dict, bool, int, str, bytes]) -> bytes\r\n return self.encoder.encode(va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lowlevel method for making API calls. It handles encoding the parameters, constructing authentication headers, decoding the response, and converting API error responses into Python exceptions.
def call(self, api_call, **kwargs): # Encode values for the API (JSON, bools, nulls) params = dict((key, api_encode(value)) for key, value in kwargs.iteritems() if value is not None) params.update(self.defaults) if api_call[0] != "/": api_call = "/" + api_call ...
[ "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the response from the API, decoding the JSON and converting errors into exceptions.
def parse_response(self, response): data = json_decode(response) if data['stat'] == 'error': self.logger.debug("Response:\n" + json_encode(data, indent=4)) try: message = data['error_description'] except KeyError: message = data['messa...
[ "def _handle_response(self, resp):\n\n try:\n resp.raise_for_status()\n results = json.loads(resp.text)\n except requests.RequestException:\n raise Exception(resp.text)\n except JSONDecodeError:\n raise Exception(\"Error in parsing: {}\".format(resp.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sign the API call by generating an "Authentication" header. This method will add headers to the request object and remove auth_token, client_id, and client_secret from the parameters if they exist.
def sign_request(self, request, api_call, params): for key, value in params.items(): params[key] = value.encode('utf-8') # Do not POST authentication parameters. Use them to create an # authentication header instead. access_token = params.pop('access_token', None) cl...
[ "def sign_request(self, url, method, body, headers):\n headers['Authorization'] = self.auth_header()", "def _addAuthenticationToRequestHeader(request, client):\n request.addAuthorization(client.id, client.secret)", "def _add_auth_headers(self, request):\n if not self.apisecret:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates softmax across a desired axis. Arguments
def softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray: return jnp.exp(x) / jnp.expand_dims(jnp.sum(jnp.exp(x), axis=axis), axis)
[ "def Softmax(x):\r\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(input, dim=None):\n return F.softmax(input, dim)", "def softmax4(x):\n ndim = K.ndim(x)\n if ndim == 2:\n return K.softmax(x)\n elif ndim == 3:\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates logsoftmax across a desired axis. Arguments
def log_softmax(x: jnp.DeviceArray, *, axis: int = 0) -> jnp.DeviceArray: return x - jnp.expand_dims(jnp.log(jnp.sum(jnp.exp(x), axis=axis)), axis)
[ "def logsoftmax(input_tensor):\n return input_tensor - reduce_logsumexp(input_tensor, keep_dims=True)", "def logsoftmax(input_tensor):\n return input_tensor - reduce_logsumexp(input_tensor, reduction_indices=0, keep_dims=True)", "def _log_softmax(x):\n return x - logsumexp(x)", "def log_softmax_nd(logits...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy contents of one stream into another.
def copyStreamToStream(streamFrom, streamTo, input_length=sys.maxint, offset=0, buffer=2 ** 2 ** 2 ** 2): streamFrom.seek(offset, 0) nbytes = 0 while nbytes < input_length: chunk = streamFrom.read(min(input_length - nbytes, buffer)) if not chunk: ...
[ "def stream_copy(source, destination, chunk_size=512*1024):\r\n br = 0\r\n while True:\r\n chunk = source.read(chunk_size)\r\n destination.write(chunk)\r\n br += len(chunk)\r\n if len(chunk) < chunk_size:\r\n break\r\n # END reading output stream\r\n return br", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Printout memory usage statistics.
def print_memory_stats(location_tag="undef"): try: import psutil p = psutil.Process(os.getpid()) rm, vm = p.get_memory_info() print "MEM_STAT (%s) rm=%s, vm=%s" % (location_tag, rm, vm) except ImportError: print "psutil module not available"
[ "def print_memory_diags(disable_print=False):\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss/1000000000.0\n if not disable_print:\n logging.info('\\tMemory usage: {:.3f} GB'.format(memory))\n return memory", "def print_current_mem_usage():\n mem = get_current_me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cambiamos la potencia de disparo
def cambiar_potencia(self, potencia): self.potencia += potencia self.partida.actualizar_marcador()
[ "def dolzina_poti(pot):", "def mover_bm_derecha(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1,\n self.casilla[1]],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Comprobamos si el misil ha chocado con algún enemigo
def comprobar_enemigos(self): for enemigo in self.partida.jugadores: if enemigo != self.partida.jugador_actual: if self.distance(enemigo.xcor(), enemigo.ycor()) <= 35: return True return False
[ "def checkeo_e(jugador, entrenador):\n aliado = jugador.lista_equipo[0] #Se asigna el aliado\n enemigo = entrenador.lista_equipo[0] #Se asigna el enemigo\n subir_nivel = False #Variable para saber si ha subido de nivel\n\n\n # Los dos tiene vida\n if aliado.HP > 0 and enemigo.HP > 0:\n\n # El...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Walk Animation walk_images = os.path.join(_RESFOLDERS, 'Walk', '.gif') walk_list = glob.glob(walk_images)
def iniciar_sprites(self): res_gifs = os.path.join(_RESFOLDERS, '**', '*.gif') gifs_list = glob.glob(res_gifs, recursive=True) for gif in gifs_list: self.guardar_sprite(gif)
[ "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Populate choices using installed apps names.
def _get_target_choices(): apps = [('public', _("Public website"))] for model, entity in registry.registry.items(): if entity.menu: appname = model._meta.app_label.lower() apps.append((appname, unicode(entity.label))) return tuple(apps)
[ "def _load_installed_applications(self):\n for application in self.settings.get('apps', None) or []:\n path = None\n if isinstance(application, six.string_types):\n application_name = application\n if application.startswith('gordon.contrib.'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the difference between nuclear luminosity and stellar luminosity. Arguments radius (scaled units) mass (scaled units) delta_m, eta, xi convergence paramters mue mean molecular weight pp_factor multiplicative factor for rate Returns Lnuc(R) 4piR2sigmaTeff4
def lum_difference(radius,mass,delta_m,eta,xi,mue,pp_factor): m,r,p,Lnuc = integrate(mass,radius,delta_m,eta,xi,mue,pp_factor,max_steps=10000) return Lnuc[-1]-surface_luminosity(Teff_for_main(m[-1]),r[-1])
[ "def find_radius(mass,delta_m,eta,xi,mue,pp_factor):\n\n #range of radii; reason in detail under step 9 of report\n r_low = 0.01*Rsun # MKS\n r_high = 3*Rsun # MKS\n \n radius = brentq(lum_difference, r_low, r_high, xtol=1.0e-4, args = (mass,delta_m,eta,xi,mue,pp_factor))\n return radius", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a given mass calls rootfind over some range of radii, integrates over the function until the difference in luminosity is zero (nuclear luminosity = surface luminosity) Arguments mass (scaled units) delta_m, eta, xi convergence paramters mue mean molecular weight pp_factor multiplicative factor for rate Returns radi...
def find_radius(mass,delta_m,eta,xi,mue,pp_factor): #range of radii; reason in detail under step 9 of report r_low = 0.01*Rsun # MKS r_high = 3*Rsun # MKS radius = brentq(lum_difference, r_low, r_high, xtol=1.0e-4, args = (mass,delta_m,eta,xi,mue,pp_factor)) return radius
[ "def lum_difference(radius,mass,delta_m,eta,xi,mue,pp_factor):\n m,r,p,Lnuc = integrate(mass,radius,delta_m,eta,xi,mue,pp_factor,max_steps=10000)\n return Lnuc[-1]-surface_luminosity(Teff_for_main(m[-1]),r[-1])", "def _newton_solver(manom, ecc, tolerance=1e-9, max_iter=100, eanom0=None):\n # Ensure manom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple function to create or load existing label encoder If mode is train, alway create new label_encder
def get_or_make_label_encoder(params, problem, mode, label_list=None, zero_class=None): problem_path = params.ckpt_dir create_path(problem_path) le_path = os.path.join(problem_path, '%s_label_encoder.pkl' % problem) if mode == 'train' and not os.path.exists(le_path): label_encoder = LabelEncode...
[ "def load_encoder(checkpoint, encoder_cls,\n HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT, encoder_name, bidirectional):\n model = encoder_cls(HIDDEN_SIZE, embedding, ENCODER_N_LAYERS, DROPOUT,\n gate=encoder_name, bidirectional=bidirectional)\n model.load_state_dic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input string, returns it as the body of an html document. Adapted from example.py included in docutils distribution.
def rst_to_html(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=1, initial_header_level=1): overrides = {'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_header_level, ...
[ "def simple_page_content():\n return \"\"\"<html xmlns=\"http://www.w3.org/1999/xhtml\"><head></head>\n <body>\n <div id=\"content\">\n <p>\n Some <strong>text</strong>\n </p>\n </div>\n <textarea id=\"textarea\">area text</textarea>\n </body>\n</ht...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs this transform over the given content. We return a new string that is the result of this transform.
def run(self, content): parts = [] offset = 0 for match in self.regexp.finditer(content): parts.append(content[offset:match.start(0)]) parts.append(self.replace(match)) offset = match.end(0) parts.append(content[offset:]) return ''.join(parts)
[ "def pre(self, content):\n return content", "def transform(self, data, input_content_type, output_content_type):\n return self.transform_fn(data, input_content_type, output_content_type)", "def transform(self, stdout):\n return stdout", "def transform(self, text):\n return re.sub(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of roots of a linear polynomial.
def roots_linear(f): r = -f.nth(0)/f.nth(1) dom = f.get_domain() if not dom.is_Numerical: if dom.is_Composite: r = factor(r) else: from sympy.simplify.simplify import simplify r = simplify(r) return [r]
[ "def roots(self):\n return self.poly.roots()", "def getRoots(self):\n # This part is for exercise 11\n # return []\n \n # This part is for exercise 12\n if self.getDegree() == 0:\n return []\n if self.getDegree() == 1:\n return LinearPolynomia...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns a list of roots of a quartic polynomial. There are many references for solving quartic expressions available [15]. This reviewer has found that many of them require one to select from among 2 or more possible sets of solutions and that some solutions work when one is searching for real roots but do not wor...
def roots_quartic(f): _, a, b, c, d = f.monic().all_coeffs() if not d: return [S.Zero] + roots([1, a, b, c], multiple=True) elif (c/a)**2 == d: x, m = f.gen, c/a g = Poly(x**2 + a*x + b - 2*m, x) z1, z2 = roots_quadratic(g) h1 = Poly(x**2 - z1*x + m, x) h2...
[ "def roots_quintic(f):\n result = []\n\n coeff_5, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\n if not all(coeff.is_Rational for coeff in (coeff_5, coeff_4, p_, q_, r_, s_)):\n return result\n\n if coeff_5 != 1:\n f = Poly(f / coeff_5)\n _, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of roots of a binomial polynomial. If the domain is ZZ then the roots will be sorted with negatives coming before positives. The ordering will be the same for any numerical coefficients as long as the assumptions tested are correct, otherwise the ordering will not be sorted (but will be canonical).
def roots_binomial(f): n = f.degree() a, b = f.nth(n), f.nth(0) base = -cancel(b/a) alpha = root(base, n) if alpha.is_number: alpha = alpha.expand(complex=True) # define some parameters that will allow us to order the roots. # If the domain is ZZ this is guaranteed to return roots...
[ "def find_roots(p):\n\n return sorted(p.nroots(n=100))", "def almost_positive_roots(self):\n assert self.cartan_type().is_finite()\n return sorted([ -beta for beta in self.simple_roots() ] + list(self.positive_roots()))", "def real_roots(coeffs):\n all_roots = np.roots(coeffs)\n fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find ``(L, U)`` such that ``L >> from sympy.polys.polyroots import _inv_totient_estimate >>> _inv_totient_estimate(192) (192, 840) >>> _inv_totient_estimate(400) (400, 1750)
def _inv_totient_estimate(m): primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ] a, b = 1, 1 for p in primes: a *= p b *= p - 1 L = m U = int(math.ceil(m*(float(a)/b))) P = p = 2 primes = [] while P <= U: p = nextprime(p) primes.append(p) ...
[ "def findPotential(L, boundaryConditions, Minv = None):\n\tX = findStableState(L, boundaryConditions, Minv)\n\treturn np.trace(X.T.dot(L).dot(X))", "def get_estimated_inverse_slope(temp, temp_err, visc, visc_err, rad, rad_error):\n inv_slope = (1/get_slope(temp, visc, rad))\n rel_error = get_relative_error(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }