query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Gets the custom_fields of this Workitems. 用户自定义字段
def custom_fields(self): return self._custom_fields
[ "def custom_fields(self):\n if (self._custom_fields is None):\n self._custom_fields = list(CustomField.objects.filter(object_id = self.id))\n return self._custom_fields", "def getCustomFields(self):\n text = self.generateRequest('/v2.1/CustomFields', 'GET', '')\n return self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a timedomain array `E` to the frequency domain via 2D FFT. `dx` and `dy` are sample spacing in x (leftright, 1st axis) and y (updown, 0th axis) directions. An optional `upsample > 1` will zeropad `E` to obtain an upsampled spectrum. Returns `(spectrum, xf, yf)` where `spectrum` contains the 2D FFT of `E`. If `N...
def makeSpectrum(E, dx, dy, upsample=10): zeropadded = np.array(E.shape) * upsample F = fft.fftshift(fft.fft2(E, zeropadded)) / E.size xf = fft.fftshift(fft.fftfreq(zeropadded[1], d=dx)) yf = fft.fftshift(fft.fftfreq(zeropadded[0], d=dy)) return (F, xf, yf)
[ "def fft2(d_A, econ = False, batch_size = 8):\n if type(d_A) is parray.PitchArray:\n return _fft2_parray(d_A, econ = econ, batch_size = batch_size)\n elif type(d_A) is gpuarray.GPUArray:\n return _fft2_gpuarray(d_A, econ = econ, batch_size = batch_size)\n else:\n raise TypeError(\"FFT2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output a string to the html file with a trailing newline
def outputHtml(s): htmlFile.write(s + "\n")
[ "def write(self, string):\n self.html.write(string)\n self.html.write('\\r\\n')", "def writeHTML(text):\n\n open(\"index.html\", 'a+').write(\"\\n\" + text)", "def write_html(outdir, path, html):\n with open(os.path.join(outdir, path), 'w') as buf:\n buf.write(html)", "def writeHtml...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transpose a hash of hashes so that the inner keys are now outer
def transpose(h): res = {} for i in list(h.keys()): v = h[i] for j in list(v.keys()): if not res.get(j, None): res[j] = {} res[j][i] = v[j] return res
[ "def transpose_dict_of_dicts(d):\n all_y = set(y for _, di in d.iteritems() for y, _ in di.iteritems())\n return {y: {x: val for x, di in d.iteritems() for y1, val in di.iteritems() if y1 == y} for y in all_y}", "def transpose_2d_table(dicts_within_dict_table):\n transposed_table = {}\n for x2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up the X axis, including scaling, labels and max/min values
def setupXAxis(plot, minVal, maxVal, label, logarithmic): plot.set_xlabel(label) if logarithmic: plot.set_xscale("log") plot.set_xlim(minVal, maxVal) # plot.set_xscale('log', basex=2) # tickLabels = [1] # labelValue = minVal # while labelValue <= maxVal: # ...
[ "def _scale_x_axis(self):\n pass", "def __draw_xaxis(self):\n self.ax.set_xlim(self.xlims)\n # put x ticks on top\n xticks = [1]\n xticks.extend(range(5, self.xmax+5, 5))\n fs = self.settings.rcParams[\"axes.labelsize\"] if self.settings.otherParams[\n \"xlabel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the legend to the plot, shrinking the plot slightly to make room, since we add the legend outside the plot to the right, or leaving the plot full sized and allowing matplotlib to choose a good placement
def addLegend(ax, lines, impls, legendPos): # If there's only one piece of data being plotted, there's no need for a legend # since all the parameters will be in the title. # Compute the length (in characters) of the longest implementation. legendLen = max(list(map(len, impls))) if legendLen == 0: ...
[ "def _plot_legend(fig, ax):\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='upper left', bbox_to_anchor=(1.02, 1), framealpha=0)", "def add_legend(self):\n self.ax.legend(loc=\"upper left\", bbox_to_anchor=(1,1.1))\n # TODO: maybe ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a single plot, which has various options. The maximum y axis value can be set Grid lines can be plotted across the graph Error bars can be plotted
def generatePlot( bmName, yAxisName, npl, sizeValues, deviations=None, yMax=None, yMin=None, yLines=(), fileSuffix="", xMin=None, xLabel="", logarithmic=False, legendPos="best", sortKeyFn=lambda x: x, timeUnit=None, ): print("Plot: '" + bmName + "'") f...
[ "def create_plots(self):\n x_values = nmp.array(range(self.button_start_var.get(), self.button_end_var.get()))\n # the 'step' below ensures that the list of points will not be too long\n # to help graphs load faster\n step = 2 * int( nmp.size(x_values) / 100) + 1\n x_values = x_va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output a media wiki formatted table
def mediaWikiTable(leftmostTitle, array, formatFn=lambda x: str(x)): columnKeys = extractColumnKeys(array) print("{|") for t in [leftmostTitle] + [str(k) for k in columnKeys]: print("!" + " !! ".join(titles)) for k in sorted(array.keys, key=cmp_to_key(compareFn)): print("|-") pri...
[ "def print_as_wiki_hybrid_table(rows, table_header):\n\n # Print as Wiki Table\n print('{| style=\"valign:top;\"')\n\n h_num, h_signature_date, h_subject, h_fedreg_page = table_header\n\n header = \"\"\"! Proc.&nbsp;No.\n! &nbsp;\n! align=\"left\" | Subject\n! <small>Signature Date</small>\n! align=\"ri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract a time from a string of the form "%dm %4.2fs" which is what "time" generates.
def extractTime(s): msRe = r"([0-9]+)m +([0-9]+\.[0-9]+)s" matched = re.match(msRe, s) if matched: return 60 * int(matched.group(1)) + float(matched.group(2)) # Maybe we don't have any minutes sRe = r"([0-9]+\.[0-9]+)s" matched = re.match(sRe, s) if matched: return float(matc...
[ "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def parse_time(s):\n return time.gmtime(float(s))", "def read_time(time_string):\n factors = {\n \"n\": 1e-9,\n \"u\": 1e-6,\n \"m\": 1e-3,\n \"s\": 1\n }\n \n # Check...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format a number in engineering format, where the exponent is a multiple of 3
def engFormat(f): if f == 0.0: value = 0.0 exponent = 0 else: exponent = math.log10(-f if f < 0 else f) if exponent < 0: exponent = -int(math.ceil(-exponent)) else: exponent = int(math.floor(exponent)) for i in range(3): if (exp...
[ "def latex_float(input_number):\n float_str = \"{0:.2g}\".format(input_number)\n if \"e\" in float_str:\n base, exponent = float_str.split(\"e\")\n return r\"${0} \\times 10^{{{1}}}$\".format(base, int(exponent))\n else:\n return float_str", "def write_num_exp(fmt, val):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a measurement has Min and Max, we can convert them into a notional error bar by replacing the name_SD field with a [minNamevalue, maxNamevalue] pair
def convertMinMaxIntoError(m, name, minName, maxName): minVal = m.__dict__.get(minName, None) maxVal = m.__dict__.get(maxName, None) if maxVal == None or minVal == None: return None value = m.__dict__[name] return [[value - minVal], [maxVal - value]]
[ "def plot_MinMaxAvg(data, column_names, figsize=(12,4)):\n average = data.mean(axis=0)\n st_deviation = data.std(axis=0)\n min_values = data.min(axis=0)\n max_values = data.max(axis=0)\n plt.figure()\n plt.errorbar(np.arange(average.shape[0]), average, st_deviation, fmt='ok',\n lw=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an overall mean for the given field
def addOverallMeans(results, fieldNames, fields): # Work out what the values we already have look like meanValues = ["Overall Mean"] geoMeanValues = ["Overall Geometric Mean"] for name in fieldNames[1:]: if name in fields: values = [r.__dict__[name] for r in results] geoM...
[ "def _count_mean(self) -> None:\n self._mean = self._sumArea / len(self._areaFeat)", "def returnMean(self,fieldname):\n field = self.getFieldByName(fieldname)\n permitted_types = [\"float\",\"int\"]\n if field.type not in permitted_types:\n raise MyDataException(\"Trying to calculate a mean o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find each set of results with the same first parameter, compute the min of their means and then scale all their results by that. The aim here is to scale results for specific processors in a way that is internally consistent. Scaling each run by its own min can be misleading, since then results which were smaller can l...
def normalizeResults(results, independentVariable, basis): normValues = {} if basis == "min": reduction = min elif basis == "mean": reduction = mean elif basis == "max": reduction = max print("Normalising by " + basis) for k in results.keys(): ik = k.split(",")[0...
[ "def min_max_normalization_multivariate(dataset) :\r\n\r\n scaled_data = [] #The data obtained afte scaling the given dataset\r\n \r\n #Getting the max and min values of the features\r\n max_vals = []\r\n min_vals = []\r\n for feature_index in range(0, len(dataset[0])) :\r\n min_vals.append...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates word frequency for a given text. We don't consider stop words when calculating frequency.
def word_frequency(text): tokenizer = RegexpTokenizer(r'\w+') tokens = tokenizer.tokenize(text) stop = set(stopwords.words('english')) tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens)) counts = Counter(tokens_without_stop) return counts
[ "def word_frequency(text, word):\n wordCount = text.count(word)\n frequency = wordCount / len(text) * 100\n \n return frequency", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def getFrequencies(text):\r\n\r\n return Counter(text)", "def complexity...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a mol has 2D coordinates and if not, calculate them.
def check_2d_coords(mol, force=False): if not force: try: mol.GetConformer() except ValueError: force = True # no 2D coords... calculate them if force: if USE_AVALON_2D: pyAv.Generate2DCoords(mol) else: mol.Compute2DCoords()
[ "def check_2d_coords(mol):\n try:\n mol.GetConformer()\n except ValueError: # no 2D coords... calculate them\n mol.Compute2DCoords()", "def test_coord_promotion(self):\n result = check_cube_coordinates(self.cube, self.squeezed_cube)\n self.assertEqual(result.dim_coords, self.cub...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True, if x is a number (i.e. can be converted to float).
def isnumber(x): try: float(x) return True except ValueError: return False
[ "def isnumber(x):\n try:\n float(x)\n return True\n except:\n return False", "def is_number(x):\n return isinstance(x,numbers.Number)", "def isNumber(x):\n return isinstance(x, (int, float))", "def _is_number(obj):\n try:\n float(obj)\n return True\n except...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a given cluster_id calculate the distance from each point to the centroid/medoid.
def calculate_distances_for_cluster(self, cluster_id): cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy() if cluster_of_interest.empty: raise ValueError(f'Cluster id {cluster_id} not found') # Don't calculate distances for the...
[ "def _calc_distance(self, data: np.ndarray):\r\n distances = []\r\n for c in self.centroid:\r\n distance = np.sum((data - c) * (data - c), axis=1)\r\n distances.append(distance)\r\n\r\n distances = np.array(distances)\r\n distances = distances.T\r\n return di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a given cluster return a pandas dataframe of points ranked by distance to the cluster centroid/medoid
def rank_cluster_points_by_distance(self, cluster_id): cluster_of_interest = self.embedding_df[self.embedding_df['cluster'] == cluster_id].copy() if cluster_of_interest.empty: raise ValueError(f'Cluster id {cluster_id} not found') if 'dist_to_rep_point' not in s...
[ "def get_all_cluster_rankings(self):\n if 'dist_to_rep_point' not in self.embedding_df.columns:\n self.calculate_all_distances_to_center()\n\n self.embedding_df['rank_in_cluster'] = self.embedding_df.groupby('cluster')['dist_to_rep_point'].rank(method='min')", "def assign_clusters(df: Dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the rank of each point within a cluster
def get_all_cluster_rankings(self): if 'dist_to_rep_point' not in self.embedding_df.columns: self.calculate_all_distances_to_center() self.embedding_df['rank_in_cluster'] = self.embedding_df.groupby('cluster')['dist_to_rep_point'].rank(method='min')
[ "def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the object located at given coordinates.
def get_object_at_location(self, x, y): object_map_at_target_location = self.maps.get((x, y)) if not object_map_at_target_location: return None return object_map_at_target_location.get_real_object()
[ "def get_obj(self, x, y):\n if self.inworld(x, y):\n return self.objs[x][y]\n return None", "def get_object_at(self, position, ignore=set()):\n for o in self.objects:\n if o in ignore:\n continue\n if position in o.cells:\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a food object randomly somewhere in this world.
def generate_food(self): x = random.randint(0, self.width) y = random.randint(0, self.height) new_food = Food(self.id, x, y) food_created = self.add_object(new_food) if not food_created: existing_object = self.get_object_at_location(x, y) if isinstance(exi...
[ "def create_food(self):\n # @4.1 random.randint(a,b) returns a random integer between a and b,\n # (including both end points). Use this to generate a random grid\n # square and set it's contents to self.FOOD using self.set_cell() \n pass", "def spawn_food(self):\n if not self.f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the choice's value is empty string or None.
def _choice_has_empty_value(choice): value, _, crige = choice return value is None or value == ''
[ "def is_none_or_empty(value):\n\n if value is None:\n return True\n\n if type(value) is str:\n return len(value.strip()) == 0\n\n return False", "def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of optgroups for this widget.
def optgroups(self, name, value, attrs=None): groups = [] has_selected = False for index, (option_value, option_label, option_crige) in enumerate(self.choices): if option_value is None: option_value = '' subgroup = [] if isinstance(option_lab...
[ "def groups(self):\r\n return set(opt.group_name for opt in self._options.values())", "def get_option_groups(product):\n return product.options_groups.all()", "def list_groups(self):\n pass", "def get_groups(self):\n return [g for g in self._descriptions]", "def get_groups(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets up the cairo context and pango layout
def set_up_pangocairo(self, widget, event): # Create the cairo context self.cr = self.window.cairo_create() #Create a pango layout self.pg = self.cr.create_layout() # Restrict Cairo to the exposed area; avoid extra work self.cr.rectangle(event.area.x, event.ar...
[ "def create_layout(self, size = None):\r\n if not self.context:\r\n # TODO - this is rather sloppy as far as exception goes\r\n # should explain better\r\n raise \"Can not create layout without existing context!\"\r\n\r\n layout = pangocairo.create_layout(self.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles expose event. Sets up cairo and calls draw() to draw the text
def do_expose_event(self, widget, event): self.set_up_pangocairo(widget, event) self.draw(*self.window.get_size())
[ "def do_expose_event(self, event):\n self.do_general_event(event.window, event)\n cr = self.window.cairo_create()\n cr.rectangle(event.area.x, event.area.y,\n event.area.width, event.area.height)\n cr.clip()\n\n self.draw(cr, *self.window.get_size())", "def m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invalidates the canvas to allow cairo to redraw
def invalidate_canvas(self): if self.window: x, y, w, h = self.get_allocation() self.window.invalidate_rect((0,0,w,h), False) self.cr = self.window.cairo_create() self.cr.update_layout(self.pg)
[ "def undraw(self):\n \n if not self.canvas: return\n if not self.canvas.isClosed():\n #self.canvas.delete(self.id)\n _tkExec(self.canvas.delete, self.id)\n if self.canvas.autoflush:\n #_root.update()\n _tkCall(_root.update)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls continuous_scroll every 38 ms until drag stops and the gobject.source is removed
def start_refresh(self, widget, context): self.source_id = gobject.timeout_add(38, self.continuous_scroll, context)
[ "def stopScrolling(self):\n self.__timer.stop()", "def __stopScrolling(self):\n self.__view.inputWidget().releaseMouse()\n QApplication.restoreOverrideCursor()\n \n self.__indicator.hide()\n self.__indicator.setParent(None)\n self.__scroller.stopScrolling()", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CPU kernel for 3d mesh to particles quantity interpolation
def mesh_to_particles_CPU_3d(mesh, mesh_quantity, indices, weights): ip, jp, kp = indices stridex = mesh.nx stridey = mesh.ny mq = np.ravel(mesh_quantity) @np.vectorize def check_outside(ip, jp, kp): outside_idx = (jp < 0 or jp >= mesh.nx - 1 or ip < 0 or ip >= me...
[ "def accel_particle(p, particle, mass, index, particle_in_cell, smooth):\r\n G = 4.452*10**(-7) #in unit of kpc^3/10^5 solar masses/Myr^2\r\n #smooth = 1.0 #The smoothen scale is 100 pc which is bigger than the size of globular cluster (around 0.01 kpc, smallest possible\r\n #mass) and the size of a dwarf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a short score with pick up and two voices.
def makeScoreWithPickup(self): sc = stream.Score() num_voices = 2 pitches = ['C', 'A-'] for i in range(num_voices): part = stream.Part() part.id = 'part %d' % i time_sig = meter.TimeSignature('4/4') key_sig = key.Key('c') # Add pickup measure. pickup = stream.Measure...
[ "def makeScore(self):\n sc = stream.Score()\n num_voices = 2\n pitches = ['C', 'A-']\n for i in range(num_voices):\n part = stream.Part()\n part.id = 'part %d' % i\n time_sig = meter.TimeSignature('4/4')\n key_sig = key.Key('c')\n\n # Make a note.\n n1 = music21_note.Note(p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a short score with pick up and two voices.
def makeScore(self): sc = stream.Score() num_voices = 2 pitches = ['C', 'A-'] for i in range(num_voices): part = stream.Part() part.id = 'part %d' % i time_sig = meter.TimeSignature('4/4') key_sig = key.Key('c') # Make a note. n1 = music21_note.Note(pitches[i]) ...
[ "def makeScoreWithPickup(self):\n sc = stream.Score()\n num_voices = 2\n pitches = ['C', 'A-']\n for i in range(num_voices):\n part = stream.Part()\n part.id = 'part %d' % i\n time_sig = meter.TimeSignature('4/4')\n key_sig = key.Key('c')\n\n # Add pickup measure.\n pickup ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the key, mode, tonic pitch class extraction from key signature.
def testExtractionOfKeySignatureAttributes(self): num_to_major_key = {0: 'C', 1: 'G', 2: 'D', 3: 'A', 4: 'E', 5: 'B', 6: 'F#', 7: 'C#', ...
[ "def keyCheker(key):\r\n match = re.search(r\"\\.jpg|\\.png\", key.key)\r\n if match:\r\n return True\r\n else:\r\n return False", "def __getKeyInformation( self , flaglist ):\n\t\tkeyinfo = 0\n\t\tif 'HMAC_MD5_RC4' in flaglist:\n\t\t\tkeyinfo = setBit( keyinfo , 0 )\n\t\tif 'HMAC_SHA1_AES'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test pretty_music21 score by comparing to music21 score.
def testCompareScores(self): for score_type, source in self.sources.iteritems(): simple_score = self.simple_scores[score_type] # Check overall length. self.assertAlmostEqual(source.duration.quarterLength / 2.0, simple_score.total_time) # Check number of parts. ...
[ "def to_music21(music: \"Music\") -> Score:\n # Create a new score\n score = Score()\n\n # Metadata\n if music.metadata:\n score.append(to_music21_metadata(music.metadata))\n\n # Tracks\n for track in music.tracks:\n # Create a new part\n part = Part()\n part.partName =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if notes are sorted by start time.
def testSortedNotes(self): for simple_score in self.simple_scores.values(): notes = simple_score.sorted_notes assert all(notes[i].start_time <= notes[i + 1].start_time for i in range(len(notes) - 1))
[ "def test_sort_by_start(self):\r\n self.factory.create(name=\"Run 1\", start=date(2012, 1, 5))\r\n self.factory.create(name=\"Run 2\", start=date(2012, 1, 10))\r\n\r\n res = self.get(\r\n params={\"sortfield\": \"start\", \"sortdirection\": \"desc\"})\r\n\r\n self.assertOrderI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the given command and gathers the output. If a callback is provided, then the output is sent to it, otherwise it is just returned. Optionally, the output of the command can be "watched" and whenever new output is detected, it will be sent to the given `callback`.
def run_cmd(cmd, callback=None, watch=False, background=False, shell=False): if watch and not callback: raise RuntimeError( "You must provide a callback when watching a process." ) output = None if shell: proc = subprocess.Popen(cmd, shell=True, stdout=subpro...
[ "def exec_cmd_callback(self, cmd, callback):\n self._queue_cmd(cmd, callback)", "def callback_command_(self, cmd, visit_args, cbdata):\n self.callback_command(cmd, visit_args, cbdata)\n return", "def run_command(command, command_out_path=None):\n command_output = subprocess.getstatusoutp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that proportions in composition file sum to 1
def check_proportions(self): proportions = [ v['proportion'] for k, v in self.composition.items() ] if sum(proportions) < 1.0: raise ValueError('Sum of proportions between host and pathogen must be 1.0.') elif sum(proportions) > 1.0: raise V...
[ "def verify():\n p = sum(map(lambda f: f.p, FileStore.files))\n mean_size = sum(map(lambda f: f.size, FileStore.files)) / len(FileStore.files)\n\n logger.debug(f\"Sum probabilties: {p}\")\n logger.debug(f\"Mean file size: {mean_size}\")", "def check_compositionality(cls, fraction_total...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean up the Fastq index files from Pyfastx
def clean(self): for _, data in self.composition.items(): index_file = Path(data['file'] + '.fxi') if index_file.exists(): index_file.unlink()
[ "def _cleanupHtslibsMess(indexDir):\n if os.path.exists(indexDir):\n shutil.rmtree(indexDir)", "def delete_index_files(file_name: str) -> None:\n delete_file(f'{file_name}.gzi')\n delete_file(f'{file_name}.fai')\n delete_file(f'{file_name}.tbi')", "def delete_index(self):\n if self.ind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename read headers from the Pyfastx reads (readonly)
def rename_headers(reads: list, organism: str): i = 0 read_strings = [] for read in reads: read_str = read.raw.splitlines() read_str[0] = f'@{organism}_{i}' read_str = '\n'.join(read_str) read_strings.append(read_str) i += 1 ...
[ "def renameHeaderSDF(pfilin):\n namesdf = pfilin.split(\"/\")[-1].split(\".\")[0]\n filin = open(pfilin, \"r\")\n llines = filin.readlines()\n filin.close()\n llines[0] = str(namesdf) + \"\\n\"\n\n filout = open(pfilin, \"w\")\n filout.write(\"\".join(llines))\n filout.close()", "def _rese...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample a list of Fastq reads / read names
def sample(fastq: list, reads: int = None, replacement: bool = False): if replacement: sampled_reads = random.choices(fastq, k=reads) else: sampled_reads = random.sample(fastq, k=reads) return sampled_reads
[ "def sample_name_from_fastq_paths(fastqs: List[str]) -> str:\n grouped_fastqs = group_fastqs(fastqs)\n for fastq_paths, sample_name in grouped_fastqs:\n return sample_name", "def create_read_list(samfile):\n read_sampler = ReadSampler()\n for line in samfile:\n line = sam_utils.SamAlignm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set item in nested dictionary
def set_nested_item(data_dict: dict, key_list: tuple or list, value): reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value return data_dict
[ "def __setitem__(self, key, value): \n key, leaf = self.__build_sequence(self, key) \n super(NestedDict, leaf).__setitem__(key, value)", "def _set_nested(self, d, keys, value):\n if len(keys) > 1 and isinstance(d, dict):\n if dict(d).__contains__(keys[0]) and isinstance(d[key...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a list of axes of a variable mv
def allAxes( mv ): if mv is None: return None return mv.getAxisList()
[ "def _getaxes(self):\n try:\n return [getattr(self.nxgroup,name) for name in _readaxes(self.axes)]\n except KeyError:\n return None", "def _axes(self):\n try:\n return [getattr(self,name) for name in _readaxes(self.nxsignal.axes)]\n except KeyError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sometimes we get time units which aren't compatible with cdtime. This function will (try to) fix them. The input argument is a string, e.g. "months since Jan 1979" and the return value is another string, e.g.
def fix_time_units( timeunits ): imon = timeunits.find("months since ") if imon==0: since="months since " else: iday = timeunits.find("days since ") if iday==0: since="days since " else: ihour = timeunits.find("hours since ") if ihour==0: ...
[ "def normalize_time_unit(s):\n s = s.lower().strip()\n if s in units:\n return s\n if s in unit_aliases:\n return unit_aliases[s]\n if s[-1] == 's':\n return normalize_time_unit(s.rstrip('s'))\n\n raise ValueError(\"Do not understand time unit %s\" % s)", "def standardize_time_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input is a variable which depends on latitude. This function will copy it to a new variable, except that the new variable's latitude axis will be restricted to latmin<=lat<=latmax; and of course the data will be restricted to correspond.
def restrict_lat( mv, latmin, latmax ): if latmin==-90: latmin = -91 # just to make sure if latmax==90: latmax = 91 # axes latax,idx = latAxis2(mv) if latax is None: return None imin = min( [i for i in range(len(latax)) if latax[i]>=latmin and latax[i]<=latmax ] ) imax = max( [i for i in ...
[ "def standardize_latlon(var):\n # Ensure the variable order is lat,lon, time.\n retvar = var.transpose('lat', 'lon', 'time')\n \n if retvar.lat[1] - var.lat[0] < 0:\n data = retvar[::-1,:,:]\n lats = retvar.lat[::-1]\n else:\n data = retvar[:,:,:]\n lats = retvar.lat\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the mean of the variable over the supplied latitude range (in degrees, based on values of lat, not lat_bnds) The computed quantity is a scalar but is returned as a cdms2 variable, i.e. a MV. The input mv is a cdms2 variable, assumed to be indexed as is usual for CFcompliant variables, i.e. mv(time,lat,lon). At ...
def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ): # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar. # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average) # If they aren't, it's best to use area from cell_measures a...
[ "def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n ilat = None\n for i,ax in enumerate(axes):\n if ax.id=='lat': ilat = i\n # reduce size of lat axis to (latmin,latmax)\n # Let's home a direct search will...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the mean of the variable over the supplied latitude range (in degrees, based on values of lat, not lat_bnds) The computed quantity is a scalar but is returned as a cdms2 variable, i.e. a MV. The input mv is a cdms2 variable too. This function uses the cdms2 avarager() function to handle weights and do averages
def reduce2scalar_zonal( mv, latmin=-90, latmax=90, vid=None ): if vid==None: vid = 'reduced_'+mv.id axes = allAxes( mv ) ilat = None for i,ax in enumerate(axes): if ax.id=='lat': ilat = i # reduce size of lat axis to (latmin,latmax) # Let's home a direct search will be fast enou...
[ "def reduce2scalar_zonal_old( mv, latmin=-90, latmax=90, vid=None ):\n # For now, I'm assuming that the only axes are time,lat,lon - so that zm is a scalar.\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
averages mv over the full range all axes, to a single scalar. Uses the averager module for greater capabilities
def reduce2scalar( mv, vid=None ): if vid==None: # Note that the averager function returns a variable with meaningless id. vid = 'reduced_'+mv.id axes = allAxes( mv ) axis_names = [ a.id for a in axes ] axes_string = '('+')('.join(axis_names)+')' avmv = averager( mv, axis=axes_string ) ...
[ "def mavg(arr, n=2, axis=-1):\n return np.mean(rolling_window(arr, n), axis=axis)", "def manual_mean(arr):\r\n sum=0\r\n for i in xrange(0,arr.shape[0]):\r\n for j in xrange(0,arr.shape[1]):\r\n sum=sum + arr[i,j]\r\n return sum / arr.size", "def mean(self, array):\n n = flo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the mean of the variable over all axes but latitude, as a cdms2 variable, i.e. a MV. The input mv is a also cdms2 variable, assumed to be indexed as is usual for CFcompliant variables, i.e. mv(time,lat,lon). At present, no other axes (e.g. level) are supported. At present mv must depend on all three axes.
def reduce2lat_old( mv, vid=None ): # >>> For now, I'm assuming that the only axes are time,lat,lon # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average) # If they aren't, it's best to use area from cell_measures attribute if available; otherwise # compute it with...
[ "def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as reduce2lat_old, but uses the averager module for greater capabilities
def reduce2lat( mv, vid=None ): if vid==None: # Note that the averager function returns a variable with meaningless id. vid = 'reduced_'+mv.id axes = allAxes( mv ) axis_names = [ a.id for a in axes if a.id!='lat' ] axes_string = '('+')('.join(axis_names)+')' avmv = averager( mv, axis=axes...
[ "def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # comput...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as reduce2lat, but averaging reduces coordinates to (lev,lat)
def reduce2levlat( mv, vid=None ): if vid==None: # Note that the averager function returns a variable with meaningless id. vid = 'reduced_'+mv.id if levAxis(mv) is None: return None if latAxis(mv) is None: return None axes = allAxes( mv ) timeax = timeAxis(mv) if timeax.getBounds()==No...
[ "def reduce2lat( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes if a.id!='lat' ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as reduce2levlat, but data is averaged only for time restricted to the specified season; as in reduce2lat_seasona.
def reduce2levlat_seasonal( mv, seasons=seasonsyr, vid=None ): if vid==None: # Note that the averager function returns a variable with meaningless id. vid = 'reduced_'+mv.id if levAxis(mv) is None: return None if latAxis(mv) is None: return None axes = allAxes( mv ) timeax = timeAxis(mv) ...
[ "def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager fu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as reduce2lat, but data is used only for time restricted to the specified season. The season is specified as an object of type cdutil.ties.Seasons, and defaults to the whole year. The returned variable will still have a time axis, with one value per season specified.
def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ): if vid==None: vid = 'reduced_'+mv.id # Note that the averager function returns a variable with meaningless id. # The climatology function returns the same id as mv, which we also don't want. # The slicers in time.py require getBounds()...
[ "def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ):\n # This differs from reduce2lat_seasonal only in the line \"axis_names =\"....\n # I need to think about how to structure the code so there's less cut-and-paste!\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager fu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
as reduce2lat_seasonal, but both lat and lon axes are retained.
def reduce2latlon_seasonal( mv, seasons=seasonsyr, vid=None ): # This differs from reduce2lat_seasonal only in the line "axis_names =".... # I need to think about how to structure the code so there's less cut-and-paste! if vid==None: vid = 'reduced_'+mv.id # Note that the averager function retur...
[ "def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input is a leveldependent variable mv and a level slev to select. slev is an instance of udunits thus it has a value and a units attribute. This function will create and return a new variable mvs without a level axis. The values of mvs correspond to the values of mv with level set to slev. Interpolation isn't done yet,...
def select_lev( mv, slev ): levax = levAxis(mv) # Get ig, the first index for which levax[ig]>slev # Assume that levax values are monotonic. dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax if levax[0]<=levax[-1]: ids = numpy.where( levax[:]>=slev.value ) # ...
[ "def levvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lev_axis = levAxis(mv)\n #levmv = mv.clone() # good if mv has only a lev axis\n #levmv[:] = lev_axis[:]\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned along the lat axis but whose values are the latitudes
def latvar( mv ): # First get the axis. This is probably not as general as we'll need... if mv is None: return None lat_axis = latAxis(mv) #latmv = mv.clone() # good if mv has only a lat axis #latmv[:] = lat_axis[:] latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat', ...
[ "def lat_lons(self):", "def get_galactic_latitude(self, transient):\r\n\t\tcoordinates = SkyCoord(transient['ra'], transient['dec'], unit='deg')\r\n\t\tb = coordinates.galactic.b.deg\r\n\t\treturn b", "def lonvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned along the lon axis but whose values are the longitudes
def lonvar( mv ): # First get the axis. This is probably not as general as we'll need... if mv is None: return None lon_axis = lonAxis(mv) latmv = cdms2.createVariable( lon_axis[:], axes=[lon_axis], id='lon', attributes={'units':lon_axis.units}, ...
[ "def latvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lat_axis = latAxis(mv)\n #latmv = mv.clone() # good if mv has only a lat axis\n #latmv[:] = lat_axis[:]\n latmv = cdms2.createVariable( lat_axis[:], axes=[lat_axis], id='lat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned along the lev (level) axis but whose values are the levels
def levvar( mv ): # First get the axis. This is probably not as general as we'll need... if mv is None: return None lev_axis = levAxis(mv) #levmv = mv.clone() # good if mv has only a lev axis #levmv[:] = lev_axis[:] levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev', ...
[ "def select_lev( mv, slev ):\n levax = levAxis(mv)\n # Get ig, the first index for which levax[ig]>slev\n # Assume that levax values are monotonic.\n dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax\n if levax[0]<=levax[-1]:\n ids = numpy.where( levax[:]>=slev.va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From a variable or axis of pressures, this function converts to millibars, and returns the result as a numpy array.
def pressures_in_mb( pressures ): if not hasattr( pressures, 'units' ): return None if pressures.units=='mb': pressures.units = 'mbar' # udunits uses mb for something else return pressures[:] tmp = udunits(1.0,pressures.units) s,i = tmp.how('mbar') pressmb = s*pressures[:] + i re...
[ "def _convert_bar_width(x, width=1, ncols=1):\n # WARNING: This will fail for non-numeric non-datetime64 singleton\n # datatypes but this is good enough for vast majority of cases.\n x_test = np.atleast_1d(_to_ndarray(x))\n if len(x_test) >= 2:\n x_step = x_test[1:] - x_test[:-1]\n x_step ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned along the lev (level) axis and whose values are the heights corresponding to the pressure levels found as the lev axis of mv. Levels will be converted to millibars. heights are returned in km
def heightvar( mv ): if mv is None: return None lev_axis = levAxis(mv) heights = 0.001 * press2alt.press2alt( pressures_in_mb(lev_axis) ) # 1000 m = 1 km heightmv = cdms2.createVariable( heights, axes=[lev_axis], id=mv.id, attributes={'units':"km"} ) return heig...
[ "def levvar( mv ):\n # First get the axis. This is probably not as general as we'll need...\n if mv is None: return None\n lev_axis = levAxis(mv)\n #levmv = mv.clone() # good if mv has only a lev axis\n #levmv[:] = lev_axis[:]\n levmv = cdms2.createVariable( lev_axis[:], axes=[lev_axis], id='lev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest latitude points but whose values are the latitudes
def latvar_min( mv1, mv2 ): if mv1 is None: return None if mv2 is None: return None lat_axis1 = latAxis(mv1) lat_axis2 = latAxis(mv2) if len(lat_axis1)<=len(lat_axis2): lat_axis = lat_axis1 mv = mv1 else: lat_axis = lat_axis2 mv = mv2 latmv = cdms2.createVaria...
[ "def lonvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lon_axis1 = lonAxis(mv1)\n lon_axis2 = lonAxis(mv2)\n if len(lon_axis1)<=len(lon_axis2):\n lon_axis = lon_axis1\n mv = mv1\n else:\n lon_axis = lon_axis2\n mv = mv2\n lonmv = c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest longitude points but whose values are the longitudes
def lonvar_min( mv1, mv2 ): if mv1 is None: return None if mv2 is None: return None lon_axis1 = lonAxis(mv1) lon_axis2 = lonAxis(mv2) if len(lon_axis1)<=len(lon_axis2): lon_axis = lon_axis1 mv = mv1 else: lon_axis = lon_axis2 mv = mv2 lonmv = cdms2.createVaria...
[ "def latvar_min( mv1, mv2 ):\n if mv1 is None: return None\n if mv2 is None: return None\n lat_axis1 = latAxis(mv1)\n lat_axis2 = latAxis(mv2)\n if len(lat_axis1)<=len(lat_axis2):\n lat_axis = lat_axis1\n mv = mv1\n else:\n lat_axis = lat_axis2\n mv = mv2\n latmv = c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable which is dimensioned as whichever of mv1, mv2 has the fewest level points but whose values are the levels
def levvar_min( mv1, mv2 ): if mv1 is None: return None if mv2 is None: return None lev_axis1 = levAxis(mv1) lev_axis2 = levAxis(mv2) if len(lev_axis1)<=len(lev_axis2): lev_axis = lev_axis1 mv = mv1 else: lev_axis = lev_axis2 mv = mv2 levmv = cdms2.createVaria...
[ "def select_lev( mv, slev ):\n levax = levAxis(mv)\n # Get ig, the first index for which levax[ig]>slev\n # Assume that levax values are monotonic.\n dummy,slev = reconcile_units( levax, slev ) # new slev has same units as levax\n if levax[0]<=levax[-1]:\n ids = numpy.where( levax[:]>=slev.va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
interpolates a variable mv along its second axis, normally latitude, so as to match the new axis (which should be coarser, i.e. fewer points), and returns a numpy array of the interpolated values. The first axis is normally levels, and isn't expected to be very large (usually <20; surely <50) There shall be no more tha...
def interp2( newaxis1, mv ): missing = mv.get_fill_value() axes = allAxes(mv) if len(newaxis1[:])>len(axes[1][:]): return mv new_vals = numpy.ma.masked_all( ( len(axes[0]), len(newaxis1[:]) ) ) for i in range(len( axes[0] )): new_vals[i,:] = numpy.interp( newaxis1[:], axes[1][:], mv[i,:], l...
[ "def reduce2lat_old( mv, vid=None ):\n # >>> For now, I'm assuming that the only axes are time,lat,lon\n # And I'm assuming equal spacing in lon (so all longitudes contribute equally to the average)\n # If they aren't, it's best to use area from cell_measures attribute if available; otherwise\n # comput...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns mv1[0,]mv2[0,]; they should be dimensioned alike. Attributes will be fixed up where I know how.
def aminusb0( mv1, mv2 ): mv = mv1[0,] - mv2[0,] if hasattr(mv,'long_name'): if mv.long_name==mv1.long_name: # They're different, shouldn't have the same long_name mv.long_name = '' return mv
[ "def _attribute_2d(self, attribute):\n ans = np.zeros((self.num_rows(), self.num_cols()))\n\n for row in range(self.num_rows()):\n for col in range(self.num_cols()):\n ans[row][col] = attribute(self[row, col])\n\n return ans", "def get_molecular_matrix_and_vector(sin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a transient variable representing mv1mv2, where mv1 and mv2 are variables, normally transient variables, which are required to depend only one axis. To perform the subtraction, one of the variables is linearly interpolated to the axis of the other. The axis used will be the coarsest (fewest points) of the two a...
def aminusb_1ax( mv1, mv2 ): mv1, mv2 = reconcile_units( mv1, mv2 ) if hasattr(mv1,'units') and hasattr(mv2,'units') and mv1.units!=mv2.units: print "WARNING: aminusb_1ax1 is subtracting variables with different units!",mv1,mv1 if mv1 is None or mv2 is None: return None missing = mv1.get_fill_va...
[ "def basic_sub(mv1, mv2):\n obj = expand(mv1.obj - mv2.obj)\n return MV(obj)", "def var_sub ( v1 , v2 , name = '' , title = '' ) :\n f1 = isinstance ( v1 , num_types )\n f2 = isinstance ( v2 , num_types ) \n if f1 and f2 :\n r = float ( v1 ) - float ( v2 ) \n return ROOT.Ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns time averages of the cems2 variable mv. The average is comuted only over times which lie in the specified season(s). The returned variable has the same number of dimensions as mv, but the time axis has been reduced to the number of seasons requested. The seasons are specified as an object of type cdutil.times.S...
def timeave_seasonal( mv, seasons=seasonsyr ): return seasons.climatology(mv)
[ "def reduce2lat_seasonal( mv, seasons=seasonsyr, vid=None ):\n if vid==None:\n vid = 'reduced_'+mv.id\n # Note that the averager function returns a variable with meaningless id.\n # The climatology function returns the same id as mv, which we also don't want.\n\n # The slicers in time.py require ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a time average of the cdms2 variable mv. mv is a cdms2 variable, assumed to be timedependent and indexed as is usual for CFcompliant variables, i.e. mv(time,...). What's returned is a numpy array, not a cdms2 variable. (I may change this in the future).
def timeave_old( mv ): # I haven't thought yet about how missing values would work with this... # If time intervals be unequal, this will have to be changed... sh = mv.shape # e.g. [312,90,144] for t,lat,lon n = sh[0] # BTW, this is the size of everything else: # n2 = reduce( operator.mul, sh...
[ "def reduce2scalar( mv, vid=None ):\n if vid==None: # Note that the averager function returns a variable with meaningless id.\n vid = 'reduced_'+mv.id\n axes = allAxes( mv )\n axis_names = [ a.id for a in axes ]\n axes_string = '('+')('.join(axis_names)+')'\n\n avmv = averager( mv, axis=axes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a TransientVariable containing the minimum and maximum values of all the variables provided as arguments
def minmin_maxmax( *args ): rmin = min( [ mv.min() for mv in args ] ) rmax = max( [ mv.max() for mv in args ] ) rmv = cdms2.createVariable( [rmin,rmax] ) return rmv
[ "def _rrv_minmax_ ( s ) :\n return s.getMin(),s.getMax()", "def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If mv depends on an axis with just one value, create a copy of mv without that axis, and without the corresponding data dimension. Normally this happens when time has been averaged out, but there is still a onevalued time axis left (thus one would normally use id='time'). You can specify the axis id if there might be m...
def delete_singleton_axis( mv, vid=None ): axes = allAxes(mv) saxis = None si = None for i in range(len(axes)): if len(axes[i])==1 and (vid==None or axes[i].id==vid): saxis = axes[i] si = i del axes[si] break if saxis==None: return mv data ...
[ "def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axiso...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Not much tested I decided against doing overlapping line plots this way. The input arguments are two variables (cdms2 MVs, normally TransientVariables), with whatever compatibility is needed for this function to work. New axes are computed which can be used for both variables. These axes are returned as a list of tuple...
def common_axes( mv1, mv2 ): axes1 = [a[0] for a in mv1.getDomain()] axes2 = [a[0] for a in mv2.getDomain()] if len(axes1)!=len(axes2): print "ERROR. common_axes requires same number of axes in",mv1," and",mv2 return None axes3 = [] for i in range(len(axes1)): axes3.append(c...
[ "def plot_var_2_axis(var_list, ax_list, dates=None, linestyle='-', color=None,\n label=None, zorder=None, share_axes=False, lw=None,\n minter=3):\n\n if (color == 'k'):\n if lw is None:\n lw = 1.25\n\n else:\n if lw is None:\n lw = 1.\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Not much tested I decided against doing overlapping line plots this way. The input arguments are two axes (AbstractAxis class), as compatible as necessary for the following to be sensible. This function has 3 return values. It returns a TransientAxis which includes all the points of the input axes. It may be one of the...
def common_axis( axis1, axis2 ): if hasattr( axis1, 'units' ): units1 = axis1.units.lower().replace(' ','_') if axis1.isTime(): axis1.toRelativeTime( units1 ) #probably will change input argument else: units1 = None if hasattr( axis2, 'units' ): units2 = axis2.un...
[ "def common_axes( mv1, mv2 ):\n axes1 = [a[0] for a in mv1.getDomain()]\n axes2 = [a[0] for a in mv2.getDomain()]\n if len(axes1)!=len(axes2):\n print \"ERROR. common_axes requires same number of axes in\",mv1,\" and\",mv2\n return None\n axes3 = []\n for i in range(len(axes1)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Not much tested I decided against doing overlapping line plots this way. Returns a TransientVaraible made by replacing an axis axisold of a TransientVariable mv with a new axis. The new axis will have all points of the old axis, but may have more, thus requiring the new variable to have more missing data. The variable ...
def convert_axis( mv, axisold, axisindnew ): (axisnew, indexina3) = axisindnew axes = allAxes(mv) kold = None for k in range(len(axes)): if axes[k]==axisold: kold=k if kold==None: print "ERROR. convert_axis cannot find axis",axisold," in variable",mv if len(axisold)==len(axisnew)...
[ "def moveaxes(a, old, new):\n\t# The final moves will happen in left-to-right order.\n\t# Hence, the first moves must be in the reverse of\n\t# this order.\n\tn = len(old)\n\told = np.asarray(old)\n\torder = np.argsort(new)\n\trold = old[order[::-1]]\n\tfor i in range(n):\n\t\ta = moveaxis(a, rold[i], -1)\n\t\t#...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From a filename, extracts the first part of the filename as the possible name of a family of files; e.g. from 'ts_Amon_bcccsm11_amip_r1i1p1_197901200812.nc' extract and return 'ts_Amon_bcccsm11_amip_r1i1p1'. To distinguish between the end of a file family name and the beginning of the filespecific part of the filename,...
def extract_filefamilyname( self, filename ): matchobject = re.search( r"^.*_\d\d", filename ) if matchobject is None: return filename else: familyname = filename[0:(matchobject.end()-3)] return familyname
[ "def extract_name(filename):\n name = os.path.splitext(os.path.basename(filename))[0]\n pattern = \"([0-9a-zA-Z_\\-\\.]+)_[0-9]+_[0-9]+$\"\n g = re.search(pattern, name)\n if g is not None:\n name = g.groups()[0]\n return name", "def reFileName(str_):\n rv = 'None', str_\n m = re.match...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds and opens the files containing data required for the variable, Applies the reduction function to the data, and returns an MV. When completed, this will treat missing data as such. At present only CFcompliant files are supported.
def reduce( self, vid=None ): if vid is None: vid = self._vid rows = self._filetable.find_files( self.variableid, time_range=self.timerange, lat_range=self.latrange, lon_range=self.lonrange, level_range=sel...
[ "def cmorize(self):\n logger.info(\"Starting cmorization for Tier%s OBS files: %s\",\n self.cfg['attributes']['tier'],\n self.cfg['attributes']['dataset_id'])\n logger.info(\"Input data from: %s\", self.in_dir)\n logger.info(\"Output will be written to: %s\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare the file locker. Specify the file to lock and optionally the maximum timeout and the delay between each attempt to lock.
def __init__(self, file_name, timeout=10, delay=.05): self.is_locked = False #self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name) self.lockfile = file_name + '.lock' self.file_name = file_name self.timeout = timeout self.delay = delay
[ "def __init__(self, file_name, timeout=10, delay=.05):\n self.is_locked = False\n self.lockfile = os.path.abspath(file_name)\n self.file_name = file_name\n self.timeout = timeout\n self.delay = delay\n self.fd = None", "def __init__(self, file_name, timeout=10, delay=.05)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the view model for the cards in the deck
def get_cards(self): return [card.view_model() for card in self._deck.loc]
[ "def cards(self):\r\n return self._request('cards')", "def get_cards(request, deck_id):\n deck = Deck.objects.get_object_or_404(pk=deck_id, owner=request.user,\n active=True)\n cards = deck.deck_cards.filter(active=True).values();\n deck = deck.values()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
validate rpy2 can load correctly
def test_rpy2_integration(): ## Try to import rpy (test R_HOME path) ## import rpy2.robjects as robjects import rpy2 from rpy2.robjects.packages import importr req_filepath = path.join(ROOT, R_REQUIREMENTS_FILE) with open(req_filepath, 'r') as req_fh: raw_req = req_fh.read().splitlines(...
[ "def test_valid_python():\n from decisionengine.framework.util import reaper # noqa: F401\n\n pass", "def rpn_version_check(self):", "def test_LPyModelDriver_nolpy(): # pragma: no lpy\n assert_raises(RuntimeError, LPyModelDriver.LPyModelDriver,\n 'test', scripts['lpy'])", "def __lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do not return anything, modify board inplace instead. 思路:把边境扫一遍,有'O'的加入queue做BFS,并且mark一个别的值例如'D'。最后整体扫一遍,再遇到有'O'的,直接mark'X'.遇到'D', mark 'O' first for loop check the first row and the last row if there's any 'O'. If it is, check if it's out of the range or != 'O', if yes, add to queue do bfs second loop is from the sec...
def solve(self, board: List[List[str]]) -> None: def bfs(x,y): if x<0 or x>row-1 or y<0 or y>column-1 or board[x][y]!='O': return queue.append((x,y)) board[x][y]='D' while queue: i, j = queue.popleft() bfs(i+1,j) # c...
[ "def bfs(maze, initial_state, goal_state):\n\n # Initialize fringe and closed_set.\n # Fringe is a collection.deque and closed_set is a set()\n fringe, closed_set = setup_fringe_and_closedset(initial_state)\n\n # Object that gathers metrics about the search algorithm on this maze\n metrics = Metric...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a generator of packets. This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and reimplements reading packets in a sync way, yielding each packet as it arrives.
def _packets_from_tshark_sync(self, tshark_process, packet_count=None, timeout:float=3.0, max_data_length:int=10000): # NOTE: This has code duplication with the async version, think about how to solve this psml_structure, data = self.eventloop.run_until_complete(self...
[ "def pkt_gen(self):\n for i in range(self.num_pkts):\n # create the test packets\n pkt = Ether()/IP()/TCP()/'hello there pretty world!!!'\n rank = random.sample(range(0, 100), 1)[0]\n pkt_id = i\n tuser = Tuser(len(pkt), 0b00000001, 0b00000100, rank, pkt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the matrix_vector_product code.
def test_matrix_product(self, use_cache): key = jrandom.PRNGKey(0) dim = 50 max_power = 25 matrix = jrandom.normal(key, (dim, dim)) / 10 vector = jnp.ones((dim,), dtype=jnp.float32) if use_cache: mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power) else: ...
[ "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def test_matrixmult(self, A, B, expected):\n result = pyCGM.matrixmult(A, B)\n np.testing.assert_almost_equal(result, expected, rounding_precision)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the matrix_power_cached code.
def test_matrix_power(self, use_cache): key = jrandom.PRNGKey(0) dim = 50 max_power = 25 matrix = jrandom.normal(key, (dim, dim)) / 10 if use_cache: mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power) else: mpstate = model_utils.LazyMatrixPowerState(matrix) ...
[ "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the stations with the N highest relative water levels.
def stations_highest_rel_level(stations, N): relative_water_level = [] # Create dictionary of relevant stations with relative water levels for station in stations: if type(station.relative_water_level()) != float: continue else: relative_water_level.append((station.na...
[ "def stations_highest_rel_level(stations, N):\r\n\r\n stations_with_threshold = []\r\n update_water_levels(stations)\r\n for station in stations:\r\n if station.relative_water_level() == None:\r\n pass\r\n else:\r\n \r\n stations_with_threshold.append((station...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the number of vertices of a graph
def num_vertices(self): return len(self.__graph_dict.keys())
[ "def vertex_count(self) -> int:\n return len(self.graph_dict.keys())", "def vertex_count(self) -> int:\n return self.graph.vertex_count()", "def get_num_vertices(self):\n\n return self._graph_state.get_num_vertices()", "def get_number_of_vertices(self):\n return len(self.vertices)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assumes that edge is of type set, tuple or list; between two vertices can be multiple edges!
def add_edge(self, edge): edge = set(edge) (vertex1, vertex2) = tuple(edge) if vertex1 in self.__graph_dict.keys() and vertex2 in self.__graph_dict.keys(): if vertex2 in self.__graph_dict[vertex1] and vertex1 in self.__graph_dict[vertex2]: return ...
[ "def edges(self):", "def edges(g):\r\n return set([tuple([i, j]) for i, v in g.items() for j in v])", "def edge_vertices(edge):\n return [edge.vertex1, edge.vertex2]", "def connect(self, edge):\n edge = set(edge)\n obj1, obj2 = tuple(edge)\n for x, y in [(obj1, obj2), (obj2, obj...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Slim options if more than n left.
def slim_down_options(options, count_func, n=25, v=''): if len(options) > 100: options_slim = [] c = count_func(base) for obj in options: if c == count_func(obj): options_slim.append(obj) if len(options_slim) > n: ...
[ "def keep_n(self, n=100):\n before = self.item_count()\n\n item_count = self.item_count()\n if item_count > n: self.filter(self.sample(n))\n\n after = self.item_count()\n with msg(f'Keeping (at most) {n} items: {after} of {before}', done=False, enabled=self.output):pass", "def l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the words in the vocabulary sorted according to their embeddingdistance to the given word. Different metrics can be used, e.g. 'cosine' or 'euclidean'.
def print_sorted_words(word, metric='cosine'): # Get the token (i.e. integer ID) for the given word. token = tokenizer.word_index[word] # Get the embedding for the given word. Note that the # embedding-weight-matrix is indexed by the word-tokens # which are integer IDs. embedding = weights_emb...
[ "def display_nearest_words(self, word, k=10):\n \n if word not in self.vocab:\n print('Word \"{}\" not in vocabulary.'.format(word))\n return\n\n # Compute distance to every other word.\n idx = self.vocab.index(word)\n word_rep = self.params.word_embedding_we...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the FDR curve for arrays of target scores and decoy scores.
def calc_fdr_arr(target_arr, decoy_arr, ascending=False): n, m = len(target_arr), len(decoy_arr) if n != m: raise TypeError('target should be same length as decoy {} {}'.format(n, m)) ordering = 1 if ascending else -1 # reversed sorting if score is not ascending combined = np.concatenate((targe...
[ "def false_discovery_rate(y_true: np.array, y_score: np.array) -> float:\n fp = false_positive(y_true, y_score)\n tp = true_positive(y_true, y_score)\n fdr = fp / (fp + tp)\n return fdr", "def _cost_function_derivative(self, y_pred, y, X, m):\n\n derivatives= np.zeros((X.shape[0],1))\n f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the index of the point before the rightmost crossing point between an FDR curve and a FDR target value. Formally speaking, given an array fdr_curve and a number fdr_target, find the smallest index i such that fdr_curve[j] >= fdr_target for all j > i
def find_crossing(fdr_curve, fdr_target): #if not is_fdr_curve(fdr_curve): # raise ValueError("Not a valid FDR curve") #ADP - need to review is_fdr_curve criteria +noise means can start above 0 if not 0 < fdr_target < 1: return -1 less_zero_indices = np.where(fdr_curve <= fdr_target)[0] ...
[ "def find_closest(A, target):\n idx = A.searchsorted(target)\n idx = np.clip(idx, 1, len(A)-1)\n left = A[idx-1]\n right = A[idx]\n idx -= target - left < right - target\n return idx", "def __find_r_corr_in_points(arr):\n n = len(arr)\n th = arr[n // 2] * exp(-1.0)\n for i i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculation file hash use md5
def calc_file_md5(file_path): hash_md5 = str() method = hashlib.md5() if not os.path.exists(file_path): logger.error("File(%s) don not exist, can not calculation file hash" % file_path) return hash_md5 with open(file_path, 'rb') as f: for chunk in read_chunks(f, 1024 * 1024): ...
[ "def md5calc(filename):\n hashsum = md5()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hashsum.update(chunk)\n return hashsum.hexdigest()", "def CalcMD5(filepath):\n with open(filepath,'rb') as f:\n md5obj = hashlib....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the process of cpu and memory info
def get_cpu_memory_info(process_name): info_dict = dict() try: process_list = get_process_info(process_name) for process in process_list: cmdline = process.cmdline() name = os.path.basename(cmdline[2]) if len(cmdline) > 3 else process_name + "_" + str(process.pid) ...
[ "def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether `obj` inherits from Boost.Python.enum.
def is_boost_enum(obj: Any) -> bool: for cls in type(obj).__bases__: if "Boost.Python.enum" in str(cls): return True return False
[ "def valid_class(obj):\n is_valid = type(obj) is not EnumMeta and isclass(obj)\n return is_valid", "def IsEnum(self) -> bool:", "def is_enum(schema_obj):\n\n return (isinstance(schema_obj, schema.Enum) or\n (isinstance(schema_obj, schema.Field) and schema_obj.enum_type))", "def isEnumType(type...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether `obj` is an IceCubespecific class.
def is_icecube_class(obj: Any) -> bool: classname = str(type(obj)) return "icecube." in classname
[ "def isclass(obj):\n return isinstance(obj, type)", "def is_class(obj) -> bool:\n return inspect.isclass(obj)", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def is_pigalle_instance(obj: Any) -> bool:\n return isinstance(obj, PygalleBaseClass)", "def is_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether `obj` is a method.
def is_method(obj: Any) -> bool: return inspect.ismethod(obj) or "Boost.Python.function" in str(type(obj))
[ "def ismethod(obj):\n return isinstance(obj, types.MethodType)", "def hasmethod(obj, method_name):\n obj_method = getattr(obj, method_name, None)\n return callable(obj_method) if obj_method else False", "def isMethod(self):\r\n return self._wrap(inspect.ismethod(self.obj))", "def is_method_of(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of valid member variables. Ignoring mangled (__) variables, types, methods, and Boost enums.
def get_member_variables( obj: Any, return_discarded: bool = False ) -> Union[List[str], Tuple[List[str], Dict[str, List[str]]]]: valid_member_variables = [] discarded_member_variables: Dict[str, List[str]] = { "mangled": [], "is_type": [], "invalid_attr": [], "is_method": []...
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cast `obj`, and any members/elements, to purepython classes. The function takes any object `obj` and tries to cast it to a pure python class. This is mainly relevant for IceCubespecific classes (I3) that cannot be cast trivially. For IceCubespecific classes, we check whether the object has any member, variables and if ...
def cast_object_to_pure_python(obj: Any) -> Any: logger = Logger() logger.debug(f"Value: {obj}") logger.debug(f"Type: {str(type(obj))}") if not is_icecube_class(obj): logger.debug("Found non-I3 class. Exiting.") if isinstance(obj, (list, tuple, set)): return [cast_object_to_...
[ "def _class_for(self, obj):\n if isinstance(obj, type):\n return obj\n\n return obj.__class__", "def ns_from_py(pyobj):\n\n if isinstance(pyobj, enum.Enum):\n pyobj = pyobj.value\n\n # Many Objective-C method calls here use the convert_result=False kwarg to\n # disable aut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure values are <= max brightness and != stop byte
def _check_values(self, rgb_array): for i, value in enumerate(rgb_array): if value > self.brightness_limit: rgb_array[i] = self.brightness_limit if value == self.STOP_BYTE: rgb_array[i] -= 1
[ "def _assert_is_brightness(value):\n if not 0 <= value <= 1:\n raise ValueError('Brightness must be between 0 and 1.')", "def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Install emacs with some features in python 2.7 environement
def install_p2k(): if 'pkgs' not in env: env.pkgs = [] pkgs = [ 'python2', 'git', 'mercurial', 'emacs', # For flymake 'xmlstarlet', #'csslint-git', ] require.arch.packages(pkgs) python_cmd = 'python2.7' virtualenv = '.virtualenvs/...
[ "def setup(c, version=None):\n version = version or '3.8'\n suffix = '' if version == '3.8' else version.replace('.', '')\n env_name = f'sk-eval{suffix}'\n\n c.run(f'conda create --name {env_name} python={version} --yes')\n c.run('eval \"$(conda shell.bash hook)\" '\n f'&& conda activate {en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create creates a set session
async def create( self, *, header: Optional[headers.RequestHeader] = None ) -> CreateResponse: request = CreateRequest() if header is not None: request.header = header return await self._unary_unary( "/atomix.set.SetService/Create", request, CreateResponse, ...
[ "def create_set(self, node, name):\r\n return self._send({'name': 'createSet', 'args': [node, name]})", "def create(self):\r\n sessId = Session.generateId()\r\n return Session(sessId)", "def create(data):\n \n return Setlist(\n list_id = data['id'],\n nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close closes a set
async def close( self, *, header: Optional[headers.RequestHeader] = None, delete: bool = False ) -> CloseResponse: request = CloseRequest() if header is not None: request.header = header request.delete = delete return await self._unary_unary( "/atomi...
[ "def close_changeset(self):\n pass", "def _close_result_set(self):\n if self._result_set:\n self._result_set.close(self.session)\n self._result_set = None", "def remove_set(self, set_id):\n pass", "def testFilesetClosing2(self):\n testOutputFileset1 = Fileset(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads 10 seconds of 8000Hz music ('dataset/wind_lq_predicted.wav'), applies algorithm on windows of size alg.N, and outputs the result in a .wav file.
def test_real_song(alg): alg.input_func = None alg.input_func_args = 'dataset/wind_lq.wav',True alg.predict_long_wav_data(fs=8000, outname='wind_lq_predicted.wav')
[ "def generate_pickle_dataset(threshold=10):\n\n threshold = threshold * 3600\n mapped_audio = map_audio_transcripts_generic()\n audioInput = []\n timing = 0\n pickle_file_index = 0\n for path, transcription in mapped_audio.items():\n print(path + \"====>\" + transcription)\n\n # Call...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure class methods' signatures.
def test_class_method() -> None: assert inspect.signature(lmp.tknzr._bpe.BPETknzr.add_CLI_args) == inspect.signature(BaseTknzr.add_CLI_args)
[ "def test_method_signatures(self):\n pass", "def check_implemented_functions(_class):\n mandatory_functions_to_implement = [('generate', 2), ('__init__', 6)]\n implemented_class_function_names = get_implemented_class_functions(_class)\n for function in mandatory_functions_to_implement:\n fu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a StartFunction task.
def __init__(self, func=None, **kwargs): self.func = func if func is not None else self.start_func_default super(StartFunction, self).__init__(**kwargs)
[ "def start_tasklet(self, func, args):\r\n return stackless.tasklet(func)(*args)", "def start_thread(function):\n function.start_thread = True\n return function", "def make_task(self, function, *args):\r\n return self.make_task_with_deps(function, [], *args)", "def __init__(self, func, task...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }