query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()): if queryset: [row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) for q in queryset: # object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne [row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
[ "def export_excel(self, request, queryset, export_name=''):\n if not export_name:\n export_name = 'contacts_edn_%s' % datetime.now().strftime('%d-%m-%Y')\n return ExcelResponse(queryset, export_name)", "def export_any_queryset(request, queryset, filename, excluded_fields=[], included_fiel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Durations are 'dict string keys'. The keys need to be converted to floats. The keys need to be ordered and the scenes returned with calculated durations
def parse_scene_order(self, data, timesigniture): if not data: return () num_scenes = len(data) def attempt_parse_key_timecode(value): if not value: return value try: return float(value) except (ValueError, TypeError): pass try: return timecode_to_beat(value, timesigniture) except (AssertionError, ValueError, AttributeError): pass return value # Surface the original key value in the dict (useful for debugging) for key, value in data.items(): if value: value['key'] = key data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()} assert len(data_float_indexed) == num_scenes sorted_keys = sorted(data_float_indexed.keys()) assert len(sorted_keys) == num_scenes def normalise_duration(index): """ Convert any time code or alias to a linear float value. e.g. '1.2' parses to -> 1.5 'match_next' resolves to -> 4.0 """ key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration for index in range(len(sorted_keys)): normalise_duration(index) scene_items = [] for key in sorted_keys: scene_item = data_float_indexed[key] assert scene_item and scene_item.get('duration') >= 0, "All scene must have durations. Something has failed in parsing. {0}:{1}".format(key, scene_item) scene_items.append(scene_item) return scene_items
[ "def breakdict(self, rawseconds):\n qt = abs(rawseconds)\n divtime = OrderedDict()\n for plc, (kt, vt) in enumerate(self.timeunits.viewitems()):\n qt, leftover = divmod(qt, vt)\n if qt:\n divtime[kt] = int(qt)\n if leftover < 1:\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Once the order of the items is known, we can iterate over the scenes calculating/prerendering the dmx state for each section This make seeking much faster
def pre_render_scene_item(self, current_scene_item, previous_scene_item): assert current_scene_item current_scene_dmx = current_scene_item.setdefault(Scene.SCENE_ITEM_DMX_STATE_KEY, {}) # Aquire a reference to the previous DMX state current_scene_dmx['previous'] = copy.copy(previous_scene_item.get(Scene.SCENE_ITEM_DMX_STATE_KEY, {})['target']) if previous_scene_item else AbstractDMXRenderer.new_dmx_array() # The target state is a copy of the previous state current_scene_dmx['target'] = copy.copy(current_scene_dmx['previous']) # Modify the starting/previous state based on any overrides in this scene (this is a shortcut feature as I kept requireing this) self.render_state_dict(current_scene_item.get('state_start'), current_scene_dmx['previous']) # Modify the target state based on this scene item self.render_state_dict(current_scene_item.get('state'), current_scene_dmx['target'])
[ "def updateItemControls(self):\n\t\tsuper(EMSliceInspector, self).updateItemControls()\n\t\t# Anything that needs to be updated when the scene is rendered goes here.....\n\t\tself.use_3d_texture_checkbox.setChecked(self.item3d().use_3d_texture)\n\t\tdata = self.item3d().getParent().getData()\n\t\tmin = data[\"minim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of parsed scene_items (a plain list of dicts) Provide methods for redering that data timesigniture is only used for debug printing
def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_): self.scene_items = scene_items self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items) self.timesigniture = timesigniture
[ "def create_stac_items(\n scenes_list: str, grid_geom: str, collection: int = 1, level: int = 1\n):\n # Read WRS2 Grid geometries\n with open(grid_geom, \"r\") as f:\n wrs_grid_list = [json.loads(line) for line in f.readlines()]\n pr = [x[\"properties\"][\"PR\"] for x in wrs_grid_list]\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all live Python objects, not including the list itself.
def get_all_objects(): gc.collect() gcl = gc.get_objects() olist = [] seen = {} # Just in case: seen[id(gcl)] = None seen[id(olist)] = None seen[id(seen)] = None # _getr does the real work. _getr(gcl, olist, seen) return olist
[ "def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = {}\n _getr(gcl, olist)\n \n del olist[id(olist)]\n del olist[id(gcl)]\n del olist[id(sys._getframe())]\n return olist", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Jus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the rest energy of the particle.
def RestEnergy(self): return (self.restMass * const.speed_of_light * const.speed_of_light)
[ "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self):\n return self.elstate.energy(self.vsig)", "def energy(self):\n return self.mc.energy(self.chain)", "def energy(self):\n return 0.5*(self.u**2 + s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns Beta (velocity/speed of light) as a float
def BetaVelocity(self): return np.linalg.norm(self.velocity) / const.speed_of_light
[ "def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg", "def beta(vector):\n return mass(vector) / time_component(vector)", "def calc_beta(self):\n if self._angle_beta == 0:\n if self._angle_alpha != 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the Lorentz Factor of the particle.
def LorentzFactor(self): # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz # factor than math.sqrt() at high velocities. return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5
[ "def calc_lumin(self):\r\n return -1./self.tau*self.c", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def lorentzian(self, params):\n height, width, c_freq = params\n return height / (1.0+ (4.0 / width**2)*(self.freqs - c_freq)**2)", "def Lumin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the relativistic momentum of the particle
def Momentum(self): return (np.multiply(Particle.LorentzFactor(self) , np.array(self.velocity,dtype=float))* self.restMass)
[ "def momentum(self):\n return self.mass * self.velocity", "def getMomentum(self):\n return self.p", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the electric field from the particle that affects another particle.
def GenerateElectricField(self, affectedParticle): return self.electricField.GenerateField(affectedParticle)
[ "def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the magnetic field from the particle that affects another particle.
def GenerateMagneticField(self, affectedParticle): return self.magneticField.GenerateField(affectedParticle)
[ "def magnetisation(field):\n # TODO: Valid volume\n return field.mean() / field.orientation.norm.mean()", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the initialized component manager. This is used as FastAPI dependency and called for every request.
def get_component_manager( token: str = Depends(get_api_token), ) -> ComponentOperations: session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT) session.headers = {"Authorization": f"Bearer {token}"} return ComponentClient(session)
[ "def __get_manager(self):\r\n if self.__manager is not None:\r\n return self.__manager\r\n x = IBSManager()\r\n self.__manager = x\r\n return x", "def get_manager():\n\n return multiprocessing.Manager()", "def getManager():\n global __manager\n if __manager is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status overview of the pool and nodes.
def get_pool_overview_string(self, mission): # get statuses pool_status, allocation_status, node_status = self.get_pool_status(mission) s = "Pool status: {}\n".format(pool_status) s += "Allocation status: {}".format(allocation_status) if pool_status != "N/A": other = sum(node_status.values()) - node_status["idle"] - \ node_status["running"] - node_status["unusable"] s += "\n" s += "Node status: " s += "{} idle; ".format(node_status["idle"]) s += "{} running; ".format(node_status["running"]) s += "{} unusable; ".format(node_status["unusable"]) s += "{} other;".format(other) return s
[ "def status(ctx):\n return show_network_status()", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def printStatus(self):\n output ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status overview of the job and tasks.
def get_job_overview_string(self, mission): # get statuses job_status, task_status = self.get_job_status(mission) s = "Job status: {}".format(job_status) if job_status != "N/A": s += "\n" s += "Tasks status: " s += "{} active; ".format(task_status["active"]) s += "{} running; ".format(task_status["running"]) s += "{} succeeded; ".format(task_status["succeeded"]) s += "{} failed;".format(task_status["failed"]) return s
[ "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of a mission's storage container.
def get_storage_container_status(self, mission): if self.storage_client.exists(container_name=mission.container_name): return "available" # TODO: calculate space used in the container return "N/A"
[ "def get_storage_container_overview_string(self, mission):\n\n status = self.get_storage_container_status(mission)\n s = \"Storage container status: {}\".format(status)\n return s", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def container_a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status of the storage container.
def get_storage_container_overview_string(self, mission): status = self.get_storage_container_status(mission) s = "Storage container status: {}".format(status) return s
[ "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def get_storage_container_status(self, mission):\n\n if self.storage_client.exists(container_name=mission.cont...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a simple scatterplot of X, colored by the classes in y. Technically, this shows the 1st three principal components of X if X has more than 3 dimensions. If X only has 2 dimensions, then just a 2dimensional scatterplot is returned. This will not produce a plot for 1 dimensional data.
def plot_data(X, y): x_dim = X.shape[1] # Ignore 1 dimensional data if x_dim == 1: print("plot_data not gonna bother with 1 dimensional data") return # For 2 dimensional data, just plot it if x_dim == 2: plt.scatter(X[:,0], X[:,1], c=y) plt.show() return # For at least 4 dimensions, do PCA if x_dim >= 4: pca = PCA(n_components=3) pca.fit(X) plot_x = pca.transform(X) else: plot_x = X # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Plot the now 3 dimensional data fig = plt.figure() ax = fig.add_subplot(111, projection='3d') Xs = plot_x[neg_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange') Xs = plot_x[pos_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple') # Label plot if x_dim >= 4: ax.set_title("PCA of Generated Data") ax.set_xlabel("1st Principal Component") ax.set_ylabel("2nd Principal Component") ax.set_zlabel("3rd Principal Component") else: ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) # Display! plt.show()
[ "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def plot_data_cluster(X, y, classes, *, save = False):\n # Create figure.\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Scatter data ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log and assert based on condition. If condition True, log message as PASS to testcase log file. If condition False, Assert and Print message with status FAIL.
def logfile_assert_message(s, condition, message): if not condition: s.log_to_file += now_short() + message + ": FAIL\r\n" assert 0, message + ": FAIL\r\n" else: log_message(s, message + ": PASS")
[ "def assert_true(self, condition, message=\"\"):\n if not condition:\n if message:\n print(message)\n self.failed = True", "def assertTrue(self, condition):\n self.failIf(not condition)", "def _assert(self, condition: bool, err_message: str, context: CX = None):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write detailed log file for given test.
def write_test_log(t, output_dir): if t.log_to_file is not None and hasattr(t, "stop_time"): filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt" testtime = t.stop_time - t.start_time with open(os.path.join(output_dir, filename), "w") as log: log.write("\t=======================================================") log.write(f"\n\tTest case ID: {type(t).__name__}") log.write(f"\n\tTest case Description: {type(t).__doc__}") log.write("\n\t=======================================================\n") log.write(t.log_to_file) log.write("\n\t=======================================================") log.write(f"\n\t{type(t).__name__} test result: {t.result_grade}") log.write(f"\n\tTotal test time: {testtime} seconds") log.write("\n\t=======================================================")
[ "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory for subfield items.
def subfieldFactory(name): from pythia.pyre.inventory import facility return facility(name, family="subfield", factory=Subfield)
[ "def subfield():\n return Subfield()", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = Test...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory associated with Subfield.
def subfield(): return Subfield()
[ "def subfieldFactory(name):\n from pythia.pyre.inventory import facility\n return facility(name, family=\"subfield\", factory=Subfield)", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the short path name of a given long path.
def get_short_path_name(long_name: str): output_buf_size = _GetShortPathNameW(long_name, None, 0) if output_buf_size <= 0: return None output_buf = ctypes.create_unicode_buffer(output_buf_size) needed = _GetShortPathNameW(long_name, output_buf, output_buf_size) assert 0 < needed < output_buf_size return output_buf.value
[ "def get_short_path_name(long_name):\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n else:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
open jpg file or merge several jpg file then open it
def execute_file(self, event=None): file_list = self.get_path_list() print(file_list) if not file_list: return # merge image # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片 try: self.photos.destroy() except: pass self.photos.imgs = file_list merged_photo = self.photos.merge_photos() # show image try: window.destroy() except: import traceback traceback.print_exc() window.build_img_canvas() window.show_img_in_canvas(merged_photo)
[ "def open_img(img):\n\n img.open()", "def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll canvas horizontally and redraw the image
def __scroll_x(self, *args, **kwargs): self.canvas_image.xview(*args) # scroll horizontally self.__show_image() # redraw the image
[ "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def scroll(self, dx, dy):\n cam_next = self.camera_rect\n cam_next[0] += dx\n cam_next[1] += dy", "def scrollDown(self):\r\n\r\n if sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll canvas vertically and redraw the image
def __scroll_y(self, *args, **kwargs): self.canvas_image.yview(*args) # scroll vertically self.__show_image() # redraw the image
[ "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.from...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the point (x,y) is outside the image area
def outside(self, x, y): bbox = self.canvas_image.coords(self.container) # get image area if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: return False # point (x,y) is inside the image area else: return True # point (x,y) is outside the image area
[ "def outside(self, x, y):\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area", "def _out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dummy function to redraw figures in the children classes
def redraw_figures(self): pass
[ "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeat the retrieval of the metrics of a metrics context until at least one of the specified metric group names has data. Returns the MetricGroupValues object for the metric group that has data.
def wait_for_metrics(metric_context, metric_groups): retries = 0 got_data = False while not got_data: mr_str = metric_context.get_metrics() mr = zhmcclient.MetricsResponse(metric_context, mr_str) for mg_values in mr.metric_group_values: if mg_values.name in metric_groups: got_data = True if DEBUG_METRICS_RESPONSE: print("Debug: MetricsResponse:") print(mr_str) break if not got_data: if retries > GET_METRICS_MAX_RETRIES: return None time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop retries += 1 return mg_values
[ "def get_metric_group(self, group_name):\n return self.metric_groups.to_map(key_attribute=\"name\").get(group_name)", "def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):\n if cached:\n return result_group_cached(group_id, failures, wait, count)\n start = time.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve and print metric groups.
def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter): if not isinstance(metric_groups, (list, tuple)): metric_groups = [metric_groups] properties = { 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY, 'metric-groups': metric_groups, } mc = client.metrics_contexts.create(properties) mg_values = wait_for_metrics(mc, metric_groups) filtered_object_values = list() # of MetricObjectValues if not mg_values: mg_name = metric_groups[0] # just pick any res_class = zhmcclient._metrics._resource_class_from_group(mg_name) mg_def = zhmcclient.MetricGroupDefinition( name=mg_name, resource_class=res_class, metric_definitions=[]) else: mg_def = mc.metric_group_definitions[mg_values.name] filter_cpc = None filter_partition = None filter_lpar = None filter_adapter = None filter_nic = None for r_class, r_name in resource_filter: if r_class == 'cpc' and r_name: filter_cpc = client.cpcs.find(name=r_name) elif r_class == 'partition' and r_name: assert filter_cpc filter_partition = filter_cpc.partitions.find(name=r_name) elif r_class == 'logical-partition' and r_name: assert filter_cpc filter_lpar = filter_cpc.lpars.find(name=r_name) elif r_class == 'adapter' and r_name: assert filter_cpc filter_adapter = filter_cpc.adapters.find(name=r_name) elif r_class == 'nic' and r_name: assert filter_partition filter_nic = filter_partition.nics.find(name=r_name) resource_class = mg_def.resource_class for ov in mg_values.object_values: included = False if resource_class == 'cpc': if not filter_cpc: included = True elif ov.resource_uri == filter_cpc.uri: included = True elif resource_class == 'partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_partition: included = True elif ov.resource_uri == filter_partition.uri: included = True elif resource_class == 'logical-partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_lpar: included = True elif ov.resource_uri == filter_lpar.uri: included = True elif resource_class == 'adapter': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_adapter: included = True elif ov.resource_uri == filter_adapter.uri: included = True elif resource_class == 'nic': if not filter_cpc: included = True elif ov.resource.manager.partition.manager.cpc.uri == \ filter_cpc.uri: if not filter_partition: included = True elif ov.resource.manager.partition.uri == \ filter_partition.uri: if not filter_nic: included = True elif ov.resource_uri == filter_nic.uri: included = True else: raise ValueError( "Invalid resource class: {}".format(resource_class)) if included: filtered_object_values.append(ov) resource_classes = [f[0] for f in resource_filter] cmd_ctx.spinner.stop() print_object_values(filtered_object_values, mg_def, resource_classes, cmd_ctx.output_format, cmd_ctx.transpose) mc.delete()
[ "def print_groups():", "def list_groups(self):\n pass", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def get_all_groups_formatted():\n return '\\n'.join(f\"{g['groupId']}. {g['groupName']}\" for g in cur.execute('SELECT * FROM groups').fetchall())", "def l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage overview metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_cpc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for active adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_adapter(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for all channels of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_channel(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "async def channel_stats(self, ctx, channel: discord.TextChannel = None):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report environmental and power consumption metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_env(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report processor usage metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_proc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for all active Crypto Express adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_crypto(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for the ports of network adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_networkport(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for the NICs of partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_nic(cmd_ctx, cpc, partition, nic, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics_channel(cmd_ctx, cp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Modulation SpectrumBased ECG Quality Index (MSQI) for one or many ECG signals defined in x, sampled with a sampling frequency fs
def msqi_ama(x, fs): # test ecg shape try: x.shape[1] except IndexError: x = x[:, np.newaxis] # Empirical values for the STFFT transformation win_size_sec = 0.125 #seconds win_over_sec = 0.09375 #seconds nfft_factor_1 = 16 nfft_factor_2 = 4 win_size_smp = int(win_size_sec * fs) #samples win_over_smp = int(win_over_sec * fs) #samples win_shft_smp = win_size_smp - win_over_smp # Computes Modulation Spectrogram modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp, win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' ) # Find fundamental frequency (HR) # f = (0, 40)Hz ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0) ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1 # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3) # number of epochs n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2] msqi_vals = np.zeros(n_epochs) hr_vals = np.zeros(n_epochs) for ix_epoch in range(n_epochs): B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch]) # Scale to maximun of B B = B / np.max(B) # Add B in the conventional frequency axis from 0 to 40 Hz tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0) # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) tmp[valid_f_ix] = 0 ix_max = np.argmax(tmp) freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max] # TME tme = np.sum(B) eme = 0 for ix_harm in range(1, 5): ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0) ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe # EME eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ]) # RME rme = tme - eme # MS-QI msqi_vals[ix_epoch] = eme / rme # HR hr_vals[ix_epoch] = freq_funda * 60 return (msqi_vals, hr_vals, modulation_spectrogram)
[ "def batch_analysis(x,fs,CHUNK_SIZE):\n\n\n\tfundamental_frequency_in_blocks = alysis.pitch_detect(x,fs,CHUNK_SIZE)\n\trms = alysis.root_mean_square(x,CHUNK_SIZE,fs)\n\tvoiced_unvoiced_starting_info_object = alysis.starting_info(x,fundamental_frequency_in_blocks,fs,CHUNK_SIZE)\n\tvoiced_samples = voiced_unvoiced_st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the image on initial load of the application
def OnInit(self): self.imageID = self.loadImage()
[ "def OnInit( self ):\n self.imageID = self.loadImage ()", "def load_image(self):\n self.image = pygame.image.load(\"images/hurdle.png\")", "def LoadImage(self):\n filename = self.GetFullFileName()\n img, size, alpha = self._imagehandler.Load(filename)\n self._image = img\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a cube with texture coordinates
def drawCube(self): glBegin(GL_QUADS); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glEnd()
[ "def DrawCube(x, y, z):\n glBegin(GL_QUADS) \n \n #Cube\n # Front Face (note that the texture's corners have to match the quad's corners)\n glTexCoord2f(0.0, 0.0); glVertex3f(-x, -2 * y, z) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f( x, -2 * y, z) # Bo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a message to the chat and scrolls down.
def add_message_to_chat(self, message: str): scroll_length = (len(message) // Client.TEXTBOX_CHARACTER_LENGTH) + 1 self.chat_text.config(state=NORMAL) self.chat_text.insert(END, message + '\n') self.chat_text.yview_scroll(scroll_length, "units") self.chat_text.config(state=DISABLED)
[ "def addMessage(self, msg: str) -> None:\n self.messages.moveCursor(QTextCursor.End)\n self.messages.ensureCursorVisible()\n self.messages.appendPlainText(msg)\n self.messages.ensureCursorVisible()\n self.repaint() # Update/refresh the message window", "def chatbuffer_add(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it
def zip_imagenet100c(): #First make sure the directory we are given is correct! if not os.path.isdir(DATA_SRC_ROOT): raise Exception("Bad filepath given") #create the destiantion directories if they don't exist if not os.path.isdir(IMAGENET100_DIR): os.mkdir(IMAGENET100_DIR) #grab the subset wnids for the 100 class-subset with open(IMAGENET100_CLASSES) as f: subset_wnids = f.readlines() subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab #Grab the names of all of the folders inside the root data source #Structure is distortion/sub_distortion/level/wnids for distortion in os.listdir(DATA_SRC_ROOT): if distortion != "meta.bin": print(distortion) folder_path = os.path.join(DATA_SRC_ROOT, distortion) if not os.path.isdir(folder_path): continue for sub_distortion in os.listdir(folder_path): print(sub_distortion) subfolder_path = os.path.join(folder_path, sub_distortion) if not os.path.isdir(subfolder_path): continue for level in os.listdir(subfolder_path): print(level) level_path = os.path.join(subfolder_path, level) #grab the correcrt validation d9recotires for wnid in os.listdir(level_path): wnid_path = os.path.join(level_path, wnid) if not os.path.isdir(wnid_path): continue if wnid in subset_wnids: dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid) shutil.copytree(wnid_path, dest_path) #copy the metadata bin file meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin') meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin') shutil.copy(meta_file, meta_dest) #Zip the destinatio file shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)
[ "def create_test_dataset(data_folder):\n dataset = datasets.CIFAR100('data/', False, download=True)\n Path(data_folder).mkdir()\n for i in range(100):\n img, label = random.choice(dataset)\n img.save(data_folder + str(i) +\n '_' + dataset.classes[label] + '.jpg')", "def crea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the popup and return True if accepted, False if canceled.
def popup(self): return self.exec_() == QDialog.Accepted
[ "def confirm_dialog_box():\n alert = world.browser.switch_to.alert\n alert.accept()", "def verify_popup(self, type):", "def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are yo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill the heavy metal unit labels with the selected unit.
def set_hm_unit_display(self): units = str(self.entries['units'].combobox.currentText()) self.ui.is_unitL1.setText(units) self.ui.is_unitL2.setText(units) self.ui.is_unitL3.setText(units) self.ui.is_unitL4.setText(units)
[ "def unit_label(self, unit_label):\n\n self._unit_label = unit_label", "def _update_units(self):\n self.options['gds_unit'] = 1.0 / self.design.parse_value('1 meter')", "def set_unit(self,unit):\n self.unit = unit", "def units(self):\n self.__content = 'unit'", "def unitUpdate(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a task exists on the server
def exists(self, server): try: server.get( 'task', replacements={ 'slug': self.__challenge__.slug, 'identifier': self.identifier}) except Exception: return False return True
[ "def exists_task(self, task):\n assert task, \"Must input a valid task name.\"\n return any(self.get_by_task(task))", "def isTasksExists(request):\n task_status = {}\n task_result = 0\n flag = None\n for task in request.data['tasks']:\n task_obj = Tafv2Task.obj...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the pooled sample variance for two samples.
def pooled_sample_variance(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
[ "def _pooled_sample_variance(sample1, sample2):\n deg_freedom = len(sample1) + len(sample2) - 2\n mean1 = statistics.mean(sample1)\n squares1 = ((x - mean1) ** 2 for x in sample1)\n mean2 = statistics.mean(sample2)\n squares2 = ((x - mean2) ** 2 for x in sample2)\n\n return (math.fsum(squares1) + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate a ttest score for the difference between two samples.
def tscore(sample1, sample2): if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
[ "def t_test_statistic(sampleA, sampleB):\r\n difference = compare_means(sampleA, sampleB)\r\n # Store lengths of samples\r\n n = len(sampleA)\r\n m = len(sampleB)\r\n stdev = (np.var(sampleA)/n + np.var(sampleB)/m)**0.5\r\n t_stat = difference / stdev\r\n return t_stat", "def ttest_rel(a, b):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return autsizeable field names in idfobject
def autosize_fieldname(idfobject): # undocumented stuff in this code return [ fname for (fname, dct) in zip(idfobject.objls, idfobject["objidd"]) if "autosizable" in dct ]
[ "def field_names(self):\n ...", "def arcpy_get_field_objects(self):\r\n\t\tif __thou_shalt__.do_a_dry_run:\r\n\t\t\treturn []\r\n\t\treturn thou_shalt(\"Fetch field information from {}\".format(self.shortened_name_with_context()),\r\n\t\t\tlambda:arcpy.ListFields(str(self))\r\n\t\t)", "def _fields_names(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the given ISBN10 code is valid. >>> isISBN10('9971502100') True >>> isISBN10('9971502108') False
def isISBN10(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((i + 1) * int(code[i]) for i in range(9)) % 11 # convert check digit into its string representation return 'X' if check == 10 else str(check) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 10: return False # check whether first nine characters of given code are digits if not code[:9].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
[ "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return check...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False
def isISBN13(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12)) # convert check digit into a single digit return str((10 - check) % 10) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 13: return False # check whether first nine characters of given code are digits if not code[:12].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
[ "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isbn_13_check_structure(isbn13):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> codes = ['0012345678', '0012345679', '9971502100', '080442957X', 5, True, 'The Practice of Computing Using Python', '9789027439642', '5486948320146'] >>> areISBN(codes) [False, True, True, True, False, False, False, True, False] >>> areISBN(codes, True) [False, False, False, False, False, False, False, True, False] >>> areISBN(codes, False) [False, True, True, True, False, False, False, False, False]
def areISBN(codes, isbn13=None): # initialize list of checks checks = [] # construct list of checks for code in codes: if isinstance(code, str): if isbn13 is None: checks.append(isISBN(code, len(code) == 13)) else: checks.append(isISBN(code, isbn13)) else: checks.append(False) # return list of checks return checks
[ "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return check...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a two's complement integer from the given input value's bits
def twos_complement(input_value, num_bits=16): mask = 2 ** (num_bits - 1) return -(input_value & mask) + (input_value & ~mask)
[ "def twos_complement_8bit(b: int) -> int:\n if b >= 256:\n raise ValueError(\"b must fit inside 8 bits\")\n if b & (1 << 7):\n # Negative number, calculate its value using two's-complement.\n return b - (1 << 8)\n else:\n # Positive number, do not touch.\n return b", "def __num_to_two_complement...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transfer models to target port
def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None: if target_port.name not in self.transfer_defs: print(f"No transfer definition found for target port '{target_port.name}'") return # transfer definitions for specified target port tds = self.transfer_defs[target_port.name] output_dir = os.path.join(script_dir, os.pardir, "output") training_type = "transfer" print(f"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'") if config_uids is not None: print(f"Transferring configs -> {config_uids} <-") window_width = 50 num_epochs = 25 train_lr = 0.01 fine_num_epochs = 20 fine_tune_lr = 1e-5 batch_size = 1024 # skip port if fully transferred num_not_transferred = 0 for td in tds: for config in self.transfer_configs: if not self._is_transferred(target_port.name, td.base_port_name, config.uid): # print(f"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})") num_not_transferred += 1 num_transfers = len(tds) * len(self.transfer_configs) print(f"Transferred count {num_transfers - num_not_transferred}/{num_transfers}") if num_not_transferred == 0: print(f"All transfers done for target port '{target_port.name}': Skipping") return X_ts, y_ts = load_data(target_port, window_width) baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts))) evaluator.set_naive_baseline(target_port, baseline) print(f"Naive baseline: {baseline}") # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, # random_state=42, shuffle=False) # train_optimizer = Adam(learning_rate=train_lr) # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) for td in tds: print(f".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.") print(f"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -") print(f"- - Window width {window_width} </> Batch size {batch_size} - -") # print(f"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -") base_port = self.pm.find_port(td.base_port_name) if base_port is None: raise ValueError(f"Unable to associate port with port name '{td.base_port_name}'") # model = inception_time(input_shape=(window_width, 37)) # print(model.summary()) # apply transfer config for config in self.transfer_configs: if config_uids is not None and config.uid not in config_uids: continue if self._is_transferred(target_port.name, td.base_port_name, config.uid): print(f"Skipping config {config.uid}") continue print(f"\n.:'':. APPLYING CONFIG {config.uid} ::'':.") print(f"-> -> {config.desc} <- <-") print(f"-> -> nth_subset: {config.nth_subset} <- <-") print(f"-> -> trainable layers: {config.train_layers} <- <-") _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1]) model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid) file_path = os.path.join(output_dir, "model", td.target_port_name, model_file_name) X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, random_state=42, shuffle=False) train_optimizer = Adam(learning_rate=train_lr) fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True) early = EarlyStopping(monitor="val_mae", mode="min", patience=10, verbose=2) redonplat = ReduceLROnPlateau(monitor="val_mae", mode="min", patience=3, verbose=2) callbacks_list = [checkpoint, early, redonplat] # optimizer = Adam(learning_rate=lr) # # # configure model # model.compile(optimizer=optimizer, loss="mse", metrics=["mae"]) # load base model model = load_model(td.base_model_path) # if config.uid == 0: # print(model.summary()) # else: # print(model.summary()) # del model X_train = X_train_orig X_test = X_test_orig y_train = y_train_orig y_test = y_test_orig # apply transfer configuration if config.nth_subset > 1: if X_train.shape[0] < config.nth_subset: print(f"Unable to apply nth-subset. Not enough data") X_train = X_train_orig[0::config.nth_subset] X_test = X_test_orig[0::config.nth_subset] y_train = y_train_orig[0::config.nth_subset] y_test = y_test_orig[0::config.nth_subset] print(f"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}") print(f"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}") print(f"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}") print(f"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}") modified = False # freeze certain layers for layer in model.layers: if layer.name not in config.train_layers: modified = True print(f"setting layer {layer.name} to False") layer.trainable = False else: print(f"layer {layer.name} stays True") if modified: print(f"modified. compiling") # re-compile model.compile(optimizer=train_optimizer, loss="mse", metrics=["mae"]) # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)])) # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print(f"Total params: {trainable_count + non_trainable_count}") print(f"Trainable params: {trainable_count}") print(f"Non trainable params: {non_trainable_count}") # transfer model result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) train_mae = result.history["mae"] val_mae = result.history["val_mae"] gc.collect() tune_result = None tune_train_mae = None tune_val_mae = None if config.tune: print(f"Fine-Tuning transferred model") # apply fine-tuning: unfreeze all but batch-normalization layers! for layer in model.layers: if not layer.name.startswith("batch_normalization"): layer.trainable = True model.compile(optimizer=fine_tune_optimizer, loss="mse", metrics=["mae"]) # print(f"model for fine tuning") # print(model.summary()) tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) tune_train_mae = tune_result.history["mae"] tune_val_mae = tune_result.history["val_mae"] model.load_weights(file_path) # set evaluation def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float: if _tune_val_mae is not None: _val_mae = _val_mae + _tune_val_mae return min(val_mae) evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid) y_pred = model.predict(X_test) grouped_mae = evaluator.group_mae(y_test, y_pred) evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid) # save history history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name, config.uid) history_path = os.path.join(output_dir, "data", target_port.name, history_file_name) np.save(history_path, [result.history, tune_result.history if tune_result else None]) # plot history plot_dir = os.path.join(output_dir, "plot") plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid, tune_train_mae, tune_val_mae) # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid) plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid) self.set_transfer(target_port.name, td.base_port_name, config.uid) del checkpoint, early, redonplat del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test gc.collect() tf.keras.backend.clear_session() gc.collect() del X_ts, y_ts
[ "def _models_to_device(self):\n for mc in self.model_configs:\n mc.model = mc.model.to(self.device)", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate TransferDefinitions based on transferconfig.json, containing those ports that have a base training for transferring to another port
def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]: config = read_json(self.config_path) transfer_defs = {} ports = list(config["ports"]) permutations = list(itertools.permutations(ports, r=2)) # for pair in _permute(config["ports"]): for pair in permutations: base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1]) if target_port is None: raise ValueError(f"No port found: Unable to transfer from base-port with name '{base_port.name}'") if target_port is None: raise ValueError(f"No port found: Unable to transfer to target-port with name '{pair[1]}'") trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type="base") # print(f"loaded trainings. base port {base_port.name}:\n{trainings.keys()}") if len(trainings.keys()) < 1: print(f"No base-training found for port '{base_port.name}'. Skipping") continue training = list(trainings.values())[-1][0] # print(f"training ({len(trainings.values())}): {training}") # print(f"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. " # f"Using latest at '{training.start_time}'") verify_output_dir(self.output_dir, target_port.name) td = TransferDefinition(base_port_name=base_port.name, base_model_path=training.model_path, target_port_name=target_port.name, target_routes_dir=os.path.join(self.routes_dir, target_port.name), target_model_dir=os.path.join(self.output_dir, "model", target_port.name), target_output_data_dir=os.path.join(self.output_dir, "data", target_port.name), target_plot_dir=os.path.join(self.output_dir, "plot", target_port.name), target_log_dir=os.path.join(self.output_dir, "log", target_port.name)) name = target_port.name if name in transfer_defs: transfer_defs[target_port.name].append(td) else: transfer_defs[target_port.name] = [td] return transfer_defs
[ "def port_configs():\n from abstract_open_traffic_generator.config import Config\n from abstract_open_traffic_generator.device import Device, Ethernet, Ipv4\n from abstract_open_traffic_generator.layer1 import FlowControl, Ieee8021qbb, Layer1, OneHundredGbe\n from abstract_open_traffic_generator.port im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares each curve with the next to verify continuity. Note that this function treats curves as directed, thus two curves that start at the same point will return `False` when compared.
def assert_continuous(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_continuous() cannot be called on an empty list") previous_curve = curves[0] for curve in curves[1:]: if previous_curve.p1 != curve.p0: return False previous_curve = curve return True
[ "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that the adjacent slopes between points are within specified tolerance of one another. Note that assert_collinear assumes ordered points; three actually collinear points passed with the middle point as the first or last argument will return `False`
def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool: if len(points) < 3: raise ValueError("CurveChecker.assert_collinear() must be called with at least three points") thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])] for t0, t1 in zip(thetas, thetas[1:]): if abs(t0 - t1) > tolerance: return False return True
[ "def are_collinear(p1, p2, p3, tolerance=0.5):\n x1, y1 = p1[0], p1[1]\n x2, y2 = p2[0], p2[1]\n x3, y3 = p3[0], p3[1]\n res = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)\n if -tolerance <= res <= tolerance:\n return True", "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies differentiability of curves by checking collinearity of adjacent curves' control points
def assert_differentiable(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_differentiable() cannot be called on an empty list") if not assert_continuous(*curves): return False for curve0, curve1 in zip(curves, curves[1:]): if not assert_collinear(curve0.c1, curve1.p0, curve1.c0): return False return True
[ "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a path to a string representation for inclusion in an SVG file as
def path_to_string(path: Path) -> str: assert_continuous(path) pieces = ["M {} {}".format(path[0].p0[0], path[0].p0[1])] for curve in iter(path): # iter cast not strictly necessary piece = "C {} {} {} {} {} {}".format( int(round(curve.c0[0])), int(round(curve.c0[1])), int(round(curve.c1[0])), int(round(curve.c1[1])), int(round(curve.p1[0])), int(round(curve.p1[1])) ) pieces.append(piece) return " ".join(pieces)
[ "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", "def _path_to_string(path):\n\n return \"/\".join(str(item) for item in path)", "def inline_static(path):\n prefix = 'data:image/svg+xml;utf8,'\n data = ''\n full_path = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the TurbiniaSetup method.
def testTurbiniaSetup(self, _mock_read_config): _mock_read_config.return_value = {"OUTPUT_DIR": "/tmp"} self.turbinia_processor.TurbiniaSetUp( project="turbinia-project", turbinia_auth=False, turbinia_recipe=None, turbinia_zone="us-central1f", turbinia_api="http://localhost:8001", incident_id="123456789", sketch_id="12345", ) self.assertEqual(self.turbinia_processor.project, "turbinia-project") self.assertEqual(self.turbinia_processor.turbinia_zone, "us-central1f") self.assertEqual( self.turbinia_processor.turbinia_api, "http://localhost:8001") self.assertEqual(self.turbinia_processor.incident_id, "123456789") self.assertEqual(self.turbinia_processor.sketch_id, "12345") self.assertEqual(self.turbinia_processor.output_path, "/tmp") self.assertEqual(self.turbinia_processor.turbinia_recipe, None)
[ "def test_setup(self):\n assert self.tac_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def setUp(\n self,\n _,\n __,\n ): #pylint: disable=arguments-differ\n config.LoadConfig()\n config.TASK_MANAGER = 'PSQ'\n config.STATE_MANAGER = 'Datastore'\n importl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the _isInterestingPath method.
def testIsInterestingPath(self): # pylint: disable=protected-access self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH))
[ "def test_pathEntriesOnPath(self):\n for n in ['os',\n 'twisted',\n 'twisted.python',\n 'twisted.python.reflect']:\n self.failUnlessIn(\n modules.getModule(n).pathEntry.filePath.path,\n sys.path)", "def is_path(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the RefreshClientCredentials method.
def testRefreshClientCredentials(self, mock_get_credentials, mock_initialize_client): # Set an expired token. self.turbinia_processor.credentials = mock.MagicMock( expiry = FAKE_CREDENTIALS['expiry'], expired = True) self.turbinia_processor.RefreshClientCredentials() mock_get_credentials.assert_called_once() mock_initialize_client.assert_called_once()
[ "def test_refresh(client):\n responses.add(responses.POST,\n '%s/oauth/token' % settings.API_BASE_URL,\n body = ('{\"access_token\": '\n '\"tail\", \"refresh_token\": '\n '\"wagging\", \"expires_in\": 3600}'\n ),\n status=200,\n content_type='appli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(None) mock_get_credentials.assert_called_once() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
[ "def testInitializeTurbiniaApiClient(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClient(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials) mock_get_credentials.assert_not_called() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
[ "def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name.
def multiget(self, option, section = None): matches = [] if section is None: section = self.default_section if self.cfg.has_option(section, option): matches.append((-1, self.get(option, section = section))) for key, value in self.cfg.items(section): s = key.rsplit(".", 1) if len(s) == 2 and s[0] == option and s[1].isdigit(): matches.append((int(s[1]), self.get(option, section = section))) matches.sort() return [match[1] for match in matches]
[ "def get_doxygen_option(doxyfile: str, option: str) -> List[str]:\n\n option_re = re.compile(r\"^\\s*([A-Z0-9_]+)\\s*=\\s*(.*)$\")\n multiline_re = re.compile(r\"^\\s*(.*)$\")\n\n values = []\n found = False\n finished = False\n for line in doxyfile.splitlines():\n if not found:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an integer option, perhaps with a default value.
def getint(self, option, default = None, section = None): return int(self.get(option, default, section))
[ "def int_option(options, name, default):\n\n if name in options:\n value = options[name]\n try:\n return int(value)\n except ValueError:\n print(f\"ERROR: option '{name}' needs to be an integer number.\")\n exit(1)\n else:\n return default", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a long integer option, perhaps with a default value.
def getlong(self, option, default = None, section = None): return long(self.get(option, default, section))
[ "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def to_long(value: Any) -> int:\n return LongConverter.to_long_with_default(value, 0)", "def to_long(self, value):\n if value is not None:\n return long(value)", "def getL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Consolidated control for all the little global control flags scattered through the libraries. This isn't a particularly good place for this function to live, but it has to live somewhere and making it a method of the config parser from which it gets all of its data is less silly than the available alternatives.
def set_global_flags(self): import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log try: rpki.http.debug_http = self.getboolean("debug_http") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_client = self.getboolean("want_persistent_client") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_server = self.getboolean("want_persistent_server") except ConfigParser.NoOptionError: pass try: rpki.http.use_adns = self.getboolean("use_adns") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers") except ConfigParser.NoOptionError: pass try: rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs") except ConfigParser.NoOptionError: pass try: rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.gc_debug = self.getboolean("gc_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.run_debug = self.getboolean("timer_debug") except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0)) except ConfigParser.NoOptionError: pass try: rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks") except ConfigParser.NoOptionError: pass
[ "def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for vertices' degrees >>> vertices_degree([[1, 0], [1, 1]], [[0, 1], [1, 0]]) (False, []) >>> vertices_degree([[1, 1], [0, 1]], [[1, 0], [1, 1]]) (True, [2, 1], [1, 2])
def vertices_degree(graph1: list, graph2: list): check1 = [] check2 = [] for row, _ in enumerate(graph1): degree1 = 0 degree2 = 0 for column, _ in enumerate(graph1[row]): if graph1[row][column] == 1: degree1 += 1 if graph2[row][column] == 1: degree2 += 1 check1.append(degree1) check2.append(degree2) if sorted(check1) == sorted(check2): return True, check1, check2 return False, []
[ "def _has_degree(\n self,\n degree: int,\n vertex: Vertex,\n ) -> bool:\n\n return vertex.get_id() in self._vertices_of_degree[degree]", "def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Wait for the user to type a character (and hit Enter). If the user enters one of the characters in letters, return that character. If the user hits Enter without entering a character, and default is specified, returns `default`, Otherwise, asks the user to enter a character again.
def _prompt(letters='yn', default=None): import sys while True: try: inputstr = sys.stdin.readline().strip() except KeyboardInterrupt: sys.exit(0) if inputstr and inputstr in letters: return inputstr if default is not None and inputstr == '': return default print 'Come again?'
[ "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to remove test results and confirmations older than 10 blocks
async def cleanTestResults(CURRENT_HEIGHT): LAST_GOOD_HEIGHT = int(CURRENT_HEIGHT) - 10 for testId in list(testResults): if int(testId) <= LAST_GOOD_HEIGHT: del testResults[testId] for testId in list(testConfirmations): if int(testId) <= LAST_GOOD_HEIGHT: del testConfirmations[testId]
[ "def remaining_batch_tests(loaded_batch_tests):\n remaining_tests = batch_test_set - set(loaded_batch_tests)\n with open('remaining_tests.txt', mode='w') as outfile:\n for batch_test in remaining_tests:\n outfile.write(\"%s\\n\" % batch_test)", "def remove_totally_failed_tests(df):\n al...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instance data use_wsdl if True try to construct XML Instance from information in WSDL.
def __init__(self, wsdl, service=None, port=None, tracefile=None, typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False): if not hasattr(wsdl, 'targetNamespace'): wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl) # for item in wsdl.types.items(): # self._serializer.loadSchema(item) self._service = wsdl.services[service or 0] self.__doc__ = self._service.documentation self._port = self._service.ports[port or 0] self._name = self._service.name self._wsdl = wsdl self._tracefile = tracefile self._typesmodule = typesmodule self._nsdict = nsdict or {} self._soapAction = soapAction self._ns = ns self._op_ns = op_ns self._use_wsdl = use_wsdl binding = self._port.getBinding() portType = binding.getPortType() for item in portType.operations: callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name) method = MethodProxy(self, callinfo) setattr(self, item.name, method)
[ "def _prepare_wsdl_objects(self):\r\n # This holds some optional options for the request..\r\n self.AddressValidationOptions = self.client.factory.create('AddressValidationOptions')\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns typecodes representing input and output messages, if request and/or response fails to be generated return None for either or both. callinfo WSDLTools.SOAPCallInfo instance describing an operation.
def _getTypeCodes(self, callinfo): prefix = None self._resetPrefixDict() if callinfo.use == 'encoded': prefix = self._getPrefix(callinfo.namespace) try: requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Request Failed to generate --", ex requestTC = None self._resetPrefixDict() try: replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Response Failed to generate --", ex replyTC = None request = response = None if callinfo.style == 'rpc': if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName) if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName) else: if requestTC: request = requestTC[0] if replyTC: response = replyTC[0] #THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper if request and prefix and callinfo.use == 'encoded': request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace} return request, response
[ "def generate_operation_input(self, service_id, operation_id, input_data,\n mapping_type):\n\n param_info_map = \\\n self._metadata.service_map[service_id][operation_id].param_info_map\n\n self.mapping_type = mapping_type\n try:\n fields = {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False
def _globalElement(self, typeCode, namespaceURI, literal): if literal: typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
[ "def XmlTypeNamespace(self) -> str:", "def xmpns_tagtype(xmp_namespace):\n tagtype = xmp_namespace # default is the full namespace\n if xmp_namespace == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#':\n tagtype = 'XMP-RDF'\n elif xmp_namespace == 'http://ns.adobe.com/tiff/1.0/':\n tagtype = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a prefix/namespace mapping. namespaceURI namespace
def _getPrefix(self, namespaceURI): prefixDict = self._getPrefixDict() if prefixDict.has_key(namespaceURI): prefix = prefixDict[namespaceURI] else: prefix = 'ns1' while prefix in prefixDict.values(): prefix = 'ns%d' %int(prefix[-1]) + 1 prefixDict[namespaceURI] = prefix return prefix
[ "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def test_getPrefix_with_namespace(self):\n ns_map = NamespaceMap()\n namespace = 'namespace'\n result = ns_map...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears the prefix dictionary, this needs to be done before creating a new typecode for a message (ie. before, and after creating a new message typecode)
def _resetPrefixDict(self): self._getPrefixDict().clear()
[ "def clear(self):\n for k in filter(lambda x: x.startswith(self._prefix), self._storage):\n del self._storage[k]", "def reset(self):\n self.footnotes = OrderedDict()\n self.unique_prefix += 1", "async def clear_prefixes(self, ctx: Context) -> None:\n\n assert ctx.guild is ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a typecode instance representing the passed in element. element XMLSchema.ElementDeclaration instance literal literal encoding? local is locally defined? namespaceURI namespace
def _getElement(self, element, literal=False, local=False, namespaceURI=None): if not element.isElement(): raise TypeError, 'Expecting an ElementDeclaration' tc = None elementName = element.getAttribute('name') tp = element.getTypeDefinition('type') typeObj = None if not (tp or element.content): nsuriType,localName = element.getAttribute('type') typeClass = self._getTypeClass(nsuriType,localName) typeObj = typeClass(elementName) elif not tp: tp = element.content if not typeObj: typeObj = self._getType(tp, elementName, literal, local, namespaceURI) minOccurs = int(element.getAttribute('minOccurs')) typeObj.optional = not minOccurs maxOccurs = element.getAttribute('maxOccurs') typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1) return typeObj
[ "def element_type(self) -> global___Type:", "def __init__(self, element):\n self._element = to_type(element)", "def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a typecode class representing the type we are looking for. localName name of the type we are looking for. namespaceURI defining XMLSchema targetNamespace.
def _getTypeClass(self, namespaceURI, localName): bti = BaseTypeInterpreter() simpleTypeClass = bti.get_typeclass(localName, namespaceURI) return simpleTypeClass
[ "def get_type(self, fqn):\n t = self.type_registry.get_type(fqn, nothrow = True)\n if not t:\n # TODO: if the fqn is actually NOT fully qualified, then\n # see if this matches any of the ones in the import decls\n\n # Try with the namespace as well\n n,ns,fq...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extracts the features used to calculate neural style cost gram_style_features a list of gram matrices calculated from the style layer outputs of the style image content_feature the content layer output of the content image
def generate_features(self): content_input = self.content_image * 255 style_input = self.style_image * 255 preprocessed_content = tf.keras.applications.vgg19.preprocess_input( content_input) preprocessed_style = tf.keras.applications.vgg19.preprocess_input( style_input) outputs_content = self.model(preprocessed_content) outputs_style = self.model(preprocessed_style) num_style_layers = tf.size(self.style_layers) style_outputs, content_outputs = ( outputs_style[:num_style_layers], outputs_content[num_style_layers:]) style_outputs = [self.gram_matrix( style_output)for style_output in style_outputs] self.gram_style_features = style_outputs self.content_feature = content_outputs
[ "def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downsamples spike data to include only the top 1% of frames
def downsample_spikes(S, thres=150, verbose=1): sum_S = np.sum(S, axis=0) if verbose > 0: print( 'Downsampling spike data to {} frames using threshold {}' .format(np.sum(np.greater(sum_S, thres)), thres)) return S[:, np.greater(sum_S, thres)]
[ "def data_down_sampling(data, n, mode):\r\n \"\"\" mode = -1: min, mode = 1: max, mode = 0: average\"\"\"\r\n result = np.zeros( (data.shape[0], n, n) )\r\n for i in range(data.shape[0]):\r\n result[i] = down_sampling(data[i], n, mode)\r\n if ((i + 1) % 5000 == 0):\r\n print(\"Down...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downsamples fluorescence data to include approximately the top 1% of frames based on total increase in activity. Currently the threshold is set for 1000 neurons. Original code from
def downsample_fluorescence(F, thres=20, verbose=1): diff_F = np.diff(F, axis=1) sum_F = np.sum(diff_F, axis=0) F = F[:,:-1] if verbose > 0: print( 'Downsampling fluorescence data to {} frames using threshold {}' .format(np.sum(np.greater(sum_F, thres)))) return F[:, np.greater(sum_F, thres)]
[ "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a balanced set of training examples from one or more datasets.
def generate_dataset( datasets, networks, parents, mode='train', mean=None, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) data_type = params.setdefault('data_type', 'spikes') thres = params.setdefault('thres', 150.0) target = params.setdefault('target', int(1.2e6)) valid_split = params.setdefault('valid_split', 0.1) slice_len = params.setdefault('slice_len', 330) assert len(datasets) == len(networks) == len(parents) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) ex_per_netw = target//len(datasets) params['target'] = ex_per_netw for i in range(len(datasets)): if verbose > 0: print('Network {} of {}'.format(i+1, len(datasets))) data = datasets[i] network = networks[i] parents_ = parents[i] if data_type == 'spikes': ds_data = downsample_spikes(data, thres=thres, verbose=verbose) elif data_type == 'fluorescence': ds_data = downsample_fluorescence( data, thres=thres, verbose=verbose) else: raise ValueError('Invalid data type') start = i*ex_per_netw end = (i+1)*ex_per_netw examples[start:end], labels[start:end] = get_examples( ds_data, network, parents_, verbose=verbose, **params) shuffle_idx = np.random.permutation(np.arange(examples.shape[0])) examples = examples[shuffle_idx] labels = labels[shuffle_idx] if mode == 'train': idx = int(examples.shape[0]*valid_split) ex_valid, ex_train = np.split(examples, [idx], axis=0) lbl_valid, lbl_train = np.split(labels, [idx], axis=0) mean = np.mean(ex_train, axis=0) ex_train -= mean ex_valid -= mean return ex_train, ex_valid, lbl_train, lbl_valid, mean elif mode == 'test': assert mean != None examples -= mean return examples, labels else: raise ValueError('Invalid mode')
[ "def _make_train_datasets(self):\n # Draw data from a random generator with a fixed seed to always get the\n # same data.\n rng = np.random.RandomState(42)\n train_x = rng.normal(0.0, self._noise_level, self._train_size)\n train_y = rng.normal(0.0, self._noise_level, self._train_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def _process_pod_rate(self, metric_name, metric, scraper_config, labels=None):\n if labels is None:\n labels = []\n\n if metric.type not in METRIC_TYPES:\n self.log.error(\"Metric type %s unsupported for metric %s\", metric.type, metric.name)\n return\n\n sampl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def define_gauge_metric(registry, metric_obj):\n labels_map = metric_obj.get(\"labels\", {})\n labels = labels_map.keys()\n gauge = Gauge(\n name=metric_obj.get(\"metric_name\"),\n documentation=metric_obj.get(\"description\"),\n registry=registry,\n labelnames=labels,\n )\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visit assignment node whose targets are all simple.
def visit_simple_assign(self, node): temp = gensym() temp_target = to_name(temp, ast.Store()) stmts = [ ast.Assign([temp_target], node.value) ] stmts += [ ast.Assign([target], to_name(temp)) for target in node.targets ] return stmts
[ "def visit_Assign(self, node):\r\n self.visit(node.node)\r\n self.visit(node.target)", "def assignment_node():\n return RedBaron('a = 1')[0]", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visit assignment node with at least one compound target.
def visit_compound_assign(self, node): # Determine number of values (arity) of compound assignment. nvalues = { len(target.elts) for target in node.targets if is_sequence_node(target) } if len(nvalues) > 1: # A multiple, compound assignment with different arities, e.g., # `x,y = a,b,c = ...` is not a syntax error in Python, though it # probably should be because it's guaranteed to cause a runtime # error. Raise the error here, since we cannot proceed. raise SyntaxError("Multiple assignment with different arities") nvalues = nvalues.pop() # Assign temporary variables. temps = [ gensym() for i in range(nvalues) ] stmts = [] if is_sequence_node(node.value) and len(node.value.elts) == nvalues: # Special case: RHS is sequence literal of correct length. for i in range(nvalues): temp_target = to_name(temps[i], ast.Store()) stmts.append(ast.Assign([temp_target], node.value.elts[i])) else: # General case. temp_target = to_tuple( (to_name(temp, ast.Store()) for temp in temps), ast.Store()) stmts.append(ast.Assign([temp_target], node.value)) # Rewrite assignments as sequence of assignments. for target in reversed(node.targets): if is_sequence_node(target): stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i])) for i in range(nvalues)) else: temp_tuple = to_tuple(to_name(temp) for temp in temps) stmts.append(ast.Assign([target], temp_tuple)) return stmts
[ "def visit_Assign(self, node):\r\n self.visit(node.node)\r\n self.visit(node.target)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert assignment to attributes to `setattr` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr), node.value ] return ast.Expr(to_call(to_name('setattr'), args)) return node
[ "def assign_attr(obj, name, val, oper=None):\n if oper:\n setattr(obj, name, oper(getattr(obj, name), val))\n else:\n setattr(obj, name, val)\n return obj", "def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert index (slice) to functional expression.
def index_to_expr(self, index): if isinstance(index, ast.Index): return index.value elif isinstance(index, ast.Slice): if index.lower is None and index.step is None: args = [ index.upper ] elif index.step is None: args = [ index.lower, index.upper ] else: args = [ index.lower, index.upper, index.step ] args = [ to_name_constant(None) if arg is None else arg for arg in args ] return to_call(to_name('slice'), args) elif isinstance(index, ast.ExtSlice): indexes = list(map(self.index_to_expr, index.dims)) return ast.Tuple(elts=indexes, ctx=ast.Load()) elif isinstance(index, ast.Tuple): elts = list(map(self.index_to_expr, index.elts)) return ast.Tuple(elts=elts, ctx=ast.Load()) else: return index
[ "def __getitem__(self, item):\n if isinstance(item, int):\n names = self.get_column_names()\n item = item % len(self)\n return [self.evaluate(name, item, item+1, array_type='python')[0] for name in names]\n elif isinstance(item, six.string_types):\n if hasat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert indexed `del` operation to `delitem` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'delitem') args = [ target.value, self.index_to_expr(target.slice) ] return ast.Expr(to_call(fun, args)) return node
[ "def delitem(obj, index):\n del obj[index]\n return obj", "def __delitem__(self, i):\r\n key = self._main._sequence[i]\r\n if isinstance(i, types.SliceType):\r\n for k in key:\r\n # FIXME: efficiency?\r\n del self._main[k]\r\n else:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the AST node can be safely evaluated twice.
def can_reevaluate(self, node): return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \ (six.PY3 and isinstance(node, ast.Bytes)) or \ (ast_has_name_constant and isinstance(node, ast.NameConstant))
[ "def evil_hack(self, other):\n if isinstance(other, FExpr):\n return other == self\n return isinstance(other, self.__class__) and self.id == other.id", "def _is_consistent(self, node: Node):\n return self._get_g(node.key) == self._get_rhs(node.key)", "def can_reuse(self):\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() return to_attribute(self.operator, inplace_operator_table[name])
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert augmented assignment to assignment plus function call.
def visit_AugAssign(self, node): # FIXME: Gensym the LHS to avoid two evaluations. self.generic_visit(node) rhs = to_call(self.op_to_function(node.op), [set_ctx(node.target), node.value]) return ast.Assign([node.target], rhs)
[ "def assign(lvalue, rvalue):\n return AssignOp(lvalue, rvalue)", "def get_assign_op(self): # TODO delete the other one\n return self.raw_w.assign(self.dynamics)", "def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() name = operator_table.get(name, name) return to_attribute(self.operator, name)
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert list literal to function call.
def visit_List(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__list__'), node.elts) return node
[ "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def listify(arg):\n if not isinstance(arg, list):\n arg = [arg, ]\n return arg", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert tuple literal to function call.
def visit_Tuple(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__tuple__'), node.elts) return node
[ "def eval_func_tuple(f_args):\n return f_args[0](*f_args[1:])", "def tuple(x):\n pass", "def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))", "def TupleConver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert set literal to function call.
def visit_Set(self, node): self.generic_visit(node) return to_call(to_attribute(self.operator, '__set__'), node.elts)
[ "def _gen_set_cmd(dev_set_func, node_path: str):\n def set_cmd(val):\n return dev_set_func(node_path, val)\n return set_cmd", "def set(x):\n pass", "def parse_set_cmd(self, line):\n _, set_type, var_name, _, set_name = line.split()\n if set_type not in SET_TYPES:\n self.pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This takes a string as an input parameter and treats it as a zip code, looks up the weather for that zipcode, and returns the current temperature at that zipcode in Fahrenheit.
def weather(zipcode): URL = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode + ',us&appid=' + '7d7a3cf9902ef14f54f49f160fc8a550' + '&units=imperial' webpage = urllib.request.urlopen(URL) contents = webpage.read() contents = contents.decode('ascii') weather = eval(contents) #this line turns it from a string into dictionaries and lists temperature = weather['main']['temp'] return temperature
[ "def weather(zipcode):\n URL = 'http://api.openweathermap.org/data/2.5/weather?zip='\\\n + zipcode \\\n +',us&appid=4bd44e422bc37d9761411c9efe4c1112&units=imperial'\n webpage = urllib.request.urlopen(URL) \n contents = webpage.read()\n contents = contents.decode('ascii')\n d=eval(contents)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the token where the value is stored.
def _value_token_index(self): # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError('could not find a value token')
[ "def get_token_value(token):\n return token[0]", "def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> import shutil >>> import os.path >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) True >>> cv1.deleteconvert() >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) False >>> f.close() >>> shutil.rmtree(basepath)
def deleteconvert(self): filename = os.path.join(self.docx_path, self.name.docx) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.html_path, self.name.html) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.docbook_path, self.name.xml) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.markdown_path, self.name.md) if os.path.isfile(filename): os.remove(filename)
[ "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read ascii file to get weather info
def read_weather(self): print "Reading weather data from file",self.datafile tab = ascii.read(self.datafile) # Fix 'T' values in precipitation column, which represent tiny # amounts of rain (not measurable) TINY_VALUE = '.005' # 0.005 is half the smallest measurable value rain = tab['PrecipitationIn'] wbad = (rain == 'T') rain[wbad] = TINY_VALUE rain = numpy.array(rain).astype("float") # Replace string version of precip with float version tab['PrecipIn'] = rain tab.remove_column('PrecipitationIn') self.table = tab
[ "def read_weather_datafile(filename):\n metadata = {'filename': filename,\n 'Station Name': '',\n 'Station ID': '',\n 'Location': '',\n 'Latitude': 0,\n 'Longitude': 0,\n 'Elevation': 0}\n\n # Read the file.\n root, e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }