query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Reverse the list of regions in place.
def reverse(self): self.regions.reverse()
[ "def removeRegions(self):\n for i in range(len(self.verticalStripes)):\n for j in range(len(self.horizontalStripes)):\n region = Region(self.verticalStripes[i], self.horizontalStripes[j])\n self.regions.append(region)\n region.removeRegion(self)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove and return the region at index.
def pop(self, index=-1): return self.regions.pop(index)
[ "def removeIndex(self, index):\r\n item = self.queue.pop(index)\r\n return item", "def remove_map(self, index):\n return self._maps.pop(index)", "def remove(self, idx):\n\n assert idx > 0\n assert idx < len(self.points) - 1\n del_p = self.points[idx]\n self.point...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the registered I/O formats as a Table.
def get_formats(cls): return RegionsRegistry.get_formats(cls)
[ "def get_supported_output_formats():\n return exporter", "def listFormats():\n return None", "def output_formats(self) -> List[DataFormat]:\n return self._output_formats", "def output_formats(self) -> List[DataFormat]:\n pass", "def getDocFormats(conn=None):\n\n query = cdrdb.Query(\"fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the regions to a region file in the specified format. This method allows writing a file in many supported data
def write(self, filename, format=None, overwrite=False, **kwargs): return RegionsRegistry.write(self.regions, filename, self.__class__, format=format, overwrite=overwrite, **kwargs)
[ "def write_data_to_file(self, regions, filename):\n raise Exception(\"Unimplemented Function\")", "def tofile(self, *args, **kwargs):\n return _regionmanager.regionmanager_tofile(self, *args, **kwargs)", "def writef13(self, filename, write_regions=True):\n # The whitespace is important if t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Serialize the regions to a region string or table. This method allows serializing regions in many supported data
def serialize(self, format=None, **kwargs): return RegionsRegistry.serialize(self.regions, self.__class__, format=format, **kwargs)
[ "def regions(self):\n url = self.base_url + 'nodes?hierarchy=%2A'\n headers = {'Authorization': 'Bearer {}'.format(self.auth())}\n r = requests.get(url, headers=headers)\n df = pd.read_json(r.content, orient='records')\n return pd.Series(df['name'].unique(), name='region')", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns survival label corresponding to each patient in gene file
def getSurvivalLabels(genePeople, peopleDict): labels = [] for i in range(len(genePeople)): label = peopleDict[genePeople[i]][24] labels.append(label) return labels
[ "def extract_labels(self):\r\n self.ECGlabel = {}\r\n self.ECGsampling = []\r\n for i in range(self.signalnum):\r\n label = self.HeaderFile[256+(16*i):272+(16*i)] # extract the lead type i's label\r\n self.ECGlabel[i] = label.strip()\r\n sampling = int(self.Head...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that surival label correctly correspond to each patient in gene file
def verifySurvivalLabels(genePeople, peopleDict, newSurvival): for i in range(len(genePeople)): wrong = 0 a = peopleDict[genePeople[i]][24] b = newSurvival[i] if a != b: wrong+=1 return wrong
[ "def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))\n\n labels, scalars = read_label(label_path, True)\n assert_true(np.all(labels == label))\n assert_true(len(labels) == len(scalar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find index of selected genes so we can adjust the data
def findGeneIndex(featureGenes, genes): indices = [] missing = [] for gene in featureGenes: if gene in genes: indices.append(genes.index(gene)) else: missing.append(gene) return indices
[ "def get_gene_index(self, query):\n if len(query) > 4 and query[:4] == 'ENSG':\n return np.nonzero(query == self.gene_ids)[0]\n else:\n return np.nonzero(query == self.gene_names)[0]", "def get_first_genotype_index(self):\n return self.first_genotype_idx", "def gen2ind(gen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find patients in survival data that are absent in gene data
def findMissingPatients(genePatients, survivalPatients): missing = [] for patient in survivalPatients: if patient not in genePatients: missing.append(patient) return missing
[ "def _get_non_mesons(PDGIDs):\n return [pid for pid in PDGIDs if pid not in _get_mesons(PDGIDs)]", "def filter_no_fragments(data_frame):\n clearance = data_frame.groupby(\"Patient ID\").sum() == 0\n return data_frame[\n ~clearance[data_frame.index.get_level_values(\"Patient ID\")].values\n ]", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select 50 features using step forward selection
def selectFeatures50(X, Y): #Build RF classifier to use in feature selection clf = RandomForestClassifier(n_estimators=100, n_jobs=-1) # Build step forward feature selection sfs1 = sfs(clf, k_features=50, forward=True, floating=False, verbos...
[ "def selectFeatures30(X, Y):\n # Build RF classifier to use in feature selection\n clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n\n # Build step forward feature selection\n sfs1 = sfs(clf,\n k_features=30,\n forward=True,\n floating=False,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select 30 features using step forward selection
def selectFeatures30(X, Y): # Build RF classifier to use in feature selection clf = RandomForestClassifier(n_estimators=100, n_jobs=-1) # Build step forward feature selection sfs1 = sfs(clf, k_features=30, forward=True, floating=False, verbose...
[ "def selectFeatures50(X, Y):\n #Build RF classifier to use in feature selection\n clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n\n # Build step forward feature selection\n sfs1 = sfs(clf,\n k_features=50,\n forward=True,\n floating=False,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates dendrogram from randomly selected genes
def makeRandomDendrogram(patientData, patients): linked = linkage(patientData, 'ward') plt.figure(figsize=(100, 100)) dendrogram(linked, orientation='top',labels=patients,distance_sort='descending',show_leaf_counts=True) plt.title("Randomly Selected Gene Dendrogram") plt.xlabel("Patients") plt....
[ "def makeNewDendrogram(patientData, patients):\n linked = linkage(patientData, 'ward')\n print(linked)\n plt.figure(figsize=(100, 100))\n dendrogram(linked, orientation='top',labels=patients,distance_sort='descending',show_leaf_counts=True)\n plt.title(\"Selected Gene Dendrogram\")\n plt.xlabel(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates new dendrogram for smaller subset of genes
def makeNewDendrogram(patientData, patients): linked = linkage(patientData, 'ward') print(linked) plt.figure(figsize=(100, 100)) dendrogram(linked, orientation='top',labels=patients,distance_sort='descending',show_leaf_counts=True) plt.title("Selected Gene Dendrogram") plt.xlabel("Patients") ...
[ "def plot_dendrogram(G, partitions):\n\n num_of_nodes = G.number_of_nodes()\n dist = np.ones( shape=(num_of_nodes, num_of_nodes), dtype=np.float )*num_of_nodes\n d = num_of_nodes-1\n for partition in partitions:\n for subset in partition:\n for i in range(len(subset)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an array containing each clusters survival rate ratio
def findSurvivalRatio(cluster, patients, survivalData, survival): #print(survivalData) print(cluster) cluster0 = [] survival0 = [] cluster1 = [] survival1 = [] for i in range(len(cluster)): if cluster[i] == 0: cluster0.append(patients[i]) survival0.append(surv...
[ "def clustering_coefficient(self):\n res = [] # list for storing ratios for each vertex\n edges = set(edge for sublist in self.edges() for edge in sublist)\n for v in self.vertices(): # iterate through all vertices\n out = self.out_vertices(v) # store the out vertices\n k_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the existing tape if available, returning (boolean, fileno)
def init_tape(self): print( 'ATTEMPT BIOS_TAPE', self.uuid_radix_name, 'with 32914') fileno = self.open(self.uuid_radix_name, 32914) print('TAPE fileno', fileno) # return if BIOS Tape file exists at the gven location, # this should load for instructions return filen...
[ "def open(self, tape_id, access_id):\r\n\r\n # Open bios tape at current source location (ugly.)\r\n # and read the input strategy.\r\n try:\r\n puts('Opening', tape_id, 'with access', access_id)\r\n bios_tape_stream = os.open(tape_id, access_id)\r\n return bios...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a file descriptor to the HOST RAM given a tape_id to assist compilation of the data asset
def open(self, tape_id, access_id): # Open bios tape at current source location (ugly.) # and read the input strategy. try: puts('Opening', tape_id, 'with access', access_id) bios_tape_stream = os.open(tape_id, access_id) return bios_tape_stream ...
[ "def _open_tablet(self, cell_id, cgroup, mode='r'):\n\n\t\tif mode == 'r':\n\t\t\tfn_r = self._tablet_file(cell_id, cgroup)\n\t\t # --- hack: preload the entire file to have it appear in filesystem cache\n\t\t # this will speed up subsequent random reads within the file\n\t\t\twith open(fn_r) as f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a new BIOD RAM tape using the given ID. The mandatory BIOS inatructions are measured and set. A new tape is saved. Return the tape fileno This is a byte array for file access loadout
def new_tape(self): puts('Creating new bios tape with 34193') try: vv = os.O_RDWR|os.O_RANDOM|os.O_BINARY|os.O_CREAT fileno = os.open(self.uuid_radix_name, vv)# mode='wb') except Exception as e: puts('Error with open', str(e)) fileno = -2 ...
[ "def write_new_tape(self, fileno):\r\n\r\n puts('Writing new tape')\r\n write = os.write\r\n lines = ()\r\n # vol phase state.\r\n lines += (b'0', )\r\n # Address pointer of the the ram file.\r\n # Booting in the same env this should be the same every time.\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in a bios tape and configure
def read_tape(self, fileno): os.lseek(fileno, 0, 0) # Read as much as needed to capture the first # word within the string. first = os.read(fileno, 10) if len(first) == 0: puts('Corrupt tape. Delete => Renew') self.write_new_tape(fileno) ...
[ "def init_tape(self):\r\n print( 'ATTEMPT BIOS_TAPE', self.uuid_radix_name, 'with 32914')\r\n fileno = self.open(self.uuid_radix_name, 32914)\r\n print('TAPE fileno', fileno)\r\n # return if BIOS Tape file exists at the gven location,\r\n # this should load for instructions\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write a new tape into the given tape fileno.
def write_new_tape(self, fileno): puts('Writing new tape') write = os.write lines = () # vol phase state. lines += (b'0', ) # Address pointer of the the ram file. # Booting in the same env this should be the same every time. lines += (bytes(str(f...
[ "def new_tape(self):\r\n puts('Creating new bios tape with 34193')\r\n try:\r\n vv = os.O_RDWR|os.O_RANDOM|os.O_BINARY|os.O_CREAT\r\n\r\n fileno = os.open(self.uuid_radix_name, vv)# mode='wb')\r\n except Exception as e:\r\n puts('Error with open', str(e))\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the path to xgboost dynamic library files. Returns lib_path List of all found library path to xgboost
def find_lib_path() -> List[str]: curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) dll_path = [ # normal, after installation `lib` is copied into Python package tree. os.path.join(curr_path, 'lib'), # editable installation, no copying is performed. os.pa...
[ "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'dlr'),\n os.path.join(sys.prefix, 'lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots a hyperspy signal and draws an interactive ROI on it on the top left tenth of the image. Can take a list of [x1, y1, x2, y2, linewidth] to set a known intial ROI. Returns a tuple of (roi, roi_signal). Use hide=True to not show the plot.
def extract_ROI_line(s, lineROI=[0, 0, 512, 512, 1], hide=False, color="red"): import hyperspy.api as hs if s.axes_manager.navigation_dimension < 2: x_axis = s.axes_manager[s.axes_manager.signal_indices_in_array[1]] y_axis = s.axes_manager[s.axes_manager.signal_indices_in_array[0]] else: x_axis = s.axes_manag...
[ "def draw_signal_ROIs(bin_mask, img, show_bbox=False, show_quad=False, show_graphical=True, imagepath=None):\n # draw signal ROIs\n # get centre of phantom and definte 5 ROIs from there\n label_img, num = label(bin_mask, connectivity=img.ndim, return_num=True) # labels the mask\n\n props = regionprops(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the value of property _LayerState
def LayerState(self): if self.force_auto_sync: self.get('LayerState') return self._LayerState
[ "def state(self):\n return self._state.value", "def state(self):\n if not self.sublayers:\n return self._state\n else:\n return tuple(layer.state if s is None else s\n for (layer, s) in zip(self.sublayers, self._state))", "def state(self):\n return self._def[ATTR_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the value of property _AddressMode
def AddressMode(self): if self.force_auto_sync: self.get('AddressMode') return self._AddressMode
[ "def billing_address_handling_mode(self):\n return self._billing_address_handling_mode", "def AddressType(self):\n if self.force_auto_sync:\n self.get('AddressType')\n return self._AddressType", "def shipping_address_handling_mode(self):\n return self._shipping_address_han...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a public version of a task_dict. task_id is replace with an appropriate URI and task_complete is coerced from a number to a python boolean
def make_public_task(task_dict): new_task = {} for key, value in task_dict.items(): # Switch task_id to task_uri if key == 'task_id': new_task['task_uri'] = url_for( '.get_task', task_id=task_dict['task_id'], _external=True) # Switch task_complete from numeric 0/1 to bool True/False el...
[ "def generate_task_dictionary(self, tid, state=\"done\", completed=True):\n\n known_states = [\"done\", \"error\", \"started\", \"received\", \"waiting\"]\n\n now = int(time.time())\n\n completed_ts = None\n if completed:\n completed_ts = now\n\n # I want know about dod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Balanced strings are those who have equal quantity of 'L' and 'R' characters. Given a balanced string s split it in the maximum amount of balanced strings. Return the maximum amount of splitted balanced strings. >>> balancedStringSplit("RLRRLLRLRL") 4 >>> balancedStringSplit("RLLLLRRRLR") 3 >>> balancedStringSplit("LLL...
def balancedStringSplit(s: str) -> int: c = 0 res = 0 for char in s: if char == 'R': c += 1 if char == 'L': c -= 1 if c == 0: res += 1 return res
[ "def balanced_strings(s: str) -> StaticArray:\n a, b, c, index_1, index_2, count, array_size = 0, 0, 0, 0, 0, 0, 0\n\n # sets temp array size\n for _ in s:\n array_size += 1\n\n # divided by three to save memory space since minimum length of string is 3.\n temp_arr = StaticArray(array_size//3)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the mutability value to `mutable` on this options and children.
def _set_mutable(self, mutable): # pylint: disable=protected-access object.__setattr__(self, "_mutable", mutable)
[ "def _set_mutable(self, mutable):\n # pylint: disable=protected-access\n object.__setattr__(self, \"_mutable\", mutable)\n self.autotune._set_mutable(mutable)\n self.experimental_distribute._set_mutable(mutable)\n self.experimental_optimization._set_mutable(mutable)\n self.threading._set_mutable(m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the mutability value to `mutable` on this options and children.
def _set_mutable(self, mutable): # pylint: disable=protected-access object.__setattr__(self, "_mutable", mutable) self.autotune._set_mutable(mutable) self.experimental_distribute._set_mutable(mutable) self.experimental_optimization._set_mutable(mutable) self.threading._set_mutable(mutable)
[ "def is_mutable(self):\n return False", "def is_mutable(self):\n return self._is_mutable", "def is_immutable(self):\n return not self._is_mutable", "def setValueMutable(self, boolean: bool) -> None:\n ...", "def update_mutables(self, *, save_yaml: bool = False) -> None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges itself with the given `tf.data.Options`. If this object and the `options` to merge set an option differently, a warning is generated and this object's value is updated with the `options` object's value.
def merge(self, options): return options_lib.merge_options(self, options)
[ "def merge(self, dataset):\n self.__dataset.update(**dataset)", "def update_if_not_none(self, other: \"Options\") -> \"Options\":\n merged = self._asdict()\n other_dict = other._asdict()\n for k, v in other_dict.items():\n if k in merged and v is not None and v:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inputs 2D tensor that can resort zero in tensor to the end.
def resort_zero(inputs, max_batch_size): with tf.name_scope("resort_zero"): batch_size, max_length = tf.shape(inputs)[0], tf.shape(inputs)[1] odd_indices = tf.matmul(tf.reshape(tf.range(1, batch_size*2, 2), [-1,1]), tf.fill([1,max_length], 1)) even_indices = tf.matmul(tf.reshape(tf.range...
[ "def the_last1(tensor: torch.Tensor, out_len: int) ->torch.Tensor:\n return tensor[:, -1, :].unsqueeze(0).permute(1, 0, 2).repeat(1, out_len, 1)", "def rshift_time(tensor_2d, fill=misc.BF_EOS_INT):\n dim_0 = tf.shape(tensor_2d)[0]\n fill_tensor = tf.fill([dim_0, 1], fill)\n return tf.concat([fill_tensor, te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes top1 and top5 accuracies of neural networks found in network_outputs and of ensembles found in ensemble_outputs. These accuracies are stored into nets.csv and combins.csv respectively. Also computes accuracies of ensembles on subsets of testing data, where these subsets are formed according to correctness of c...
def evaluate_results(): parser = argparse.ArgumentParser() parser.add_argument('-network_outputs', type=str, required=True, help='path to networks test outputs folder') parser.add_argument('-ensemble_outputs', type=str, required=True, help='path to ensembles test outputs folder') parser.add_argument('-e...
[ "def ensemble_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n train_eq_ensemble_pred = equally_ensemble_results(train_probas)\n val_eq_ensemble_pred = equally_ensemble_results(val_probas)\n test_eq_ensemble_pred = equally_ensemble_results(test_probas)\n\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the cosine and sin for the given angle in degrees, with specialcase handling of multiples of 90 for perfect right angles
def cos_sin_deg(deg): deg = deg % 360.0 if deg == 90.0: return 0.0, 1.0 elif deg == 180.0: return -1.0, 0 elif deg == 270.0: return 0, -1.0 rad = math.radians(deg) return math.cos(rad), math.sin(rad)
[ "def cos(angle):\n\n return str(math.cos(parse_operand(angle)))", "def sin(x):\n return math.sin(math.pi*x/180)", "def sin(angle):\n\n return str(math.sin(parse_operand(angle)))", "def sin_degrees(theta):\n return sin(radians_from_degrees(theta))", "def cos(x):\n return math.cos(math.pi*x/180...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a vector from polar coordinates. Angle should be in radians.
def polar(cls, angle, length=1.0): x, y = math.cos(angle), math.sin(angle) return Vector(x * length, y * length)
[ "def polar_deg(cls, angle, length=1.0):\n return Vector.polar(math.radians(angle), length)", "def polar2vect(delta, phi):\n import numpy as np\n x = np.cos(phi)\n y = np.sin(phi)\n z = np.sin(delta)\n vector = np.array([x,y,z])\n vector = vector / np.linalg.norm(vector)\n return vector...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a vector from polar coordinates. Angle should be in degrees.
def polar_deg(cls, angle, length=1.0): return Vector.polar(math.radians(angle), length)
[ "def polar(cls, angle, length=1.0):\n x, y = math.cos(angle), math.sin(angle)\n return Vector(x * length, y * length)", "def polar2vect(delta, phi):\n import numpy as np\n x = np.cos(phi)\n y = np.sin(phi)\n z = np.sin(delta)\n vector = np.array([x,y,z])\n vector = vector / np.lina...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the set thread dataset command.
async def test_set_thread_dataset( hass: HomeAssistant, hass_ws_client: WebSocketGenerator, matter_client: MagicMock, integration: MockConfigEntry, ) -> None: ws_client = await hass_ws_client(hass) await ws_client.send_json( { ID: 1, TYPE: "matter/set_thread", ...
[ "def run(self, datasets, **kwargs):\n return None", "def _set_dataset_var(**context):\n dataset_id = context['task_instance'].xcom_pull(\n task_ids='create_dataset_task', key='dataset_id')\n models.Variable.set(blockbuster_constants.BLOCKBUSTER_RECENT_DATASET_VAL,\n dataset_id)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the set WiFi credentials command.
async def test_set_wifi_credentials( hass: HomeAssistant, hass_ws_client: WebSocketGenerator, matter_client: MagicMock, integration: MockConfigEntry, ) -> None: ws_client = await hass_ws_client(hass) await ws_client.send_json( { ID: 1, TYPE: "matter/set_wifi_cred...
[ "def setUp(self):\n\n self.new_credentials = Credentials(\"Swift\",\"Gracegee\",\"grcee\")", "def connectToWifiSpot(wifiObject, password) :\n print(\"not yet implemented\")", "def test_wpas_config_file_set_cred(dev):\n config = \"/tmp/test_wpas_config_file.conf\"\n if os.path.exists(config):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of classrooms.
def get_classrooms(char): if char.level < 2: return [] classrooms = [] page = char.visit(province_url).read() for m in re.finditer("(textePage\[2\]\[1\]\[)(\d+)(\]\[\'Texte\'\] = \')", page, re.IGNORECASE): classroom = Classroom(int(m.group(2))) start = m.end(0) end...
[ "def get_all_classrooms() -> List[classroom_config_domain.Classroom]:\n backend_classroom_models = classroom_models.ClassroomModel.get_all()\n classrooms: List[classroom_config_domain.Classroom] = [\n get_classroom_from_classroom_model(model)\n for model in backend_classroom_models\n ]\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synch repo on master
def synch(type="develop"): cluster_name = 'spiderdev'; if type == "deploy": cluster_name = 'spiderdeploy' cmd = "starcluster sshmaster " + cluster_name + \ " 'cd /home/spideradmin/spiderengine; git pull origin'" p = subprocess.call(cmd, shell=True) if p != 0: print 'Command...
[ "def sync():\n run( \"pull\", \"--rebase\", \"origin\", \"master\" )\n if run( \"push\", \"origin\", \"master\" ) != 0:\n echo(click.style('could not push to remote master - repo out of sync?', fg=\"red\"))\n return\n echo(click.style('repo synced', fg=\"green\"))", "def deploy():\n loca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The goal is the state that we wish to reach.
def goal(self): return self.goal
[ "def is_goal(self, state):\n return state == self.goal", "def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False", "def set_goal(self, goal):\n self.goal = goal", "def is_goal(state):\n return sum(sum(sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the current state is the goal
def is_goal(self, state): return state == self.goal
[ "def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False", "def checkGoalState(self): \n #check if the place is AI Lab\n return self.place == \"AI Lab\"", "def is_goal(state):\n return sum(sum(state, [])) ==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the successors of the current state (node)
def successors(self, new_state): return self.graph[new_state]
[ "def successors(self, tttnode):\n for i in range(self.n**2):\n if tttnode.board[i] == 0:\n lstate = list(tttnode.board) # create a list to manipulate\n lstate[i] = tttnode.nextplayer # fill an empty space\n \n # before we yield the succe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function that generates reliability diagrams.
def reliability_diagram(fig,fcst,verif,**kwargs): levs = kwargs.get('levs',np.arange(0,101,5)) lev_int = kwargs.get('lev_int',.5*(levs[1]-levs[0])) bootstrap = kwargs.get('bootstrap',False) n_bootstraps = kwargs.get('n_bootstraps',1000) n_blocks = kwargs.get('n_blocks',1000) block_length = k...
[ "def reliability(self) -> str:", "def display_sampler_diags(fit):\n rhat_worst,n_eff_int_site=get_sampler_diags(fit)\n if (rhat_worst>1.1)|(rhat_worst<0.9):\n rhatlabel.button_style='danger'\n else:\n rhatlabel.button_style='success'\n if n_eff_int_site<1000:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a local utility. factory the factory that creates the local utility provides the interface the utility should be looked up with name the name of the utility setup a callable that receives the utility as its single argument, it is called after the utility has been created and stored public if False, the utility...
def local_utility(factory, provides=None, name='', setup=None, public=False, name_in_container=None):
[ "def register_utility(self, name, price):\n if name not in bst.stream_utility_prices:\n docname = name.lower()\n methname = docname.replace(' ', '_').replace('-', '_')\n flowmethname = f\"get_{methname}_flow\"\n costmethname = f\"get_{methname}_cost\"\n repname = repr(name)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the nearest enclosing `grok.Application`.
def getApplication():
[ "def _get_parent(self) -> \"adsk::core::Ptr< adsk::core::Application >\" :\n return _core.Document__get_parent(self)", "def _get_main_window(self):\n return self._window_ref()", "def getApplication(self):\r\n return self.app", "def current_app(self):\n return self.app", "def get_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialization method to kick start the optimization process. It is a combination of points passed by the user, and randomly sampled ones.
def init(self, init_points=5): # Generate random points l = [np.random.uniform(x[0], x[1], size=init_points) for x in self.bounds] # Concatenate new random points to possible existing # points from self.explore method. self.init_points += list(map(list, zip(*l))) # Cre...
[ "def init(self, init_points):\n # Concatenate new random points to possible existing\n # points from self.explore method.\n rand_points = self.space.random_points(init_points)\n self.init_points.extend(rand_points)\n\n # Evaluate target function at all initialization points\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From the new_sw_collections.csv, extract the attribute of each sidewalk and add them onto the relative edges.
def join_attributes_to_node(G): for idx, row in sw.iterrows(): coordinates = row["v_coordinates"][1: -1].split(',') xv = "%.7f" % float(coordinates[0]) yv = "%.7f" % float(coordinates[1]) v = '(' + str(xv) + ', ' + str(yv) + ')' # end node coordinates = row["u_coordi...
[ "def add_multi_link_attributes(self): \n for (u, v) in self.G.edges(): \n self.G.add_edge(u, v, w = self.weight_edge_list[(u,v)])", "def edge_features(self):", "def add_edges_from(self, ebunch):\n for (source, target, new_attr) in ebunch:\n self.ad...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store the start point to a geojson file.
def start_pt_to_geojson(start_node, filename): node = start_node.strip('()').split(', ') for i in range(len(node)): node[i] = float(node[i]) vis_point_d = dict() vis_point_d['type'] = "FeatureCollection" vis_point_d['features'] = [] geometry = dict() geometry['geometry'] = dict() ...
[ "def get_default_location(self):\n address = input(\"Enter Default Location: \").replace(\" \", \"+\")\n if os.path.exists(self.user_data):\n with open(self.user_data) as infile:\n self.data = json.load(infile)\n self.data['add'] = str(address)\n with open(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
identifies features prior to preprocessing. These are saved in the dictionary under the tag 'weightFeatures', as they were weighted features when this was implemented for Niave Bayes. This returns the tweet dictionary with the features.
def preFeatures(tweetData, upper, repitition): newData = {} newData['tweets'] = {} tweetIDs = tweetData['tweets'].keys() for tweetID in tweetIDs: weightFeatures = [] answers = tweetData['tweets'][tweetID]['answers'] words = tweetData['tweets'][tweetID]['words'] tags = tw...
[ "def get_ts_features_to_preprocess(self):", "def preProcessing(tweetData, conflate, lower, negate, hashtag):\n\n newData = {}\n newData['tweets'] = {}\n tweetIDs = tweetData['tweets'].keys()\n for tweetID in tweetIDs:\n wfeatures = tweetData['tweets'][tweetID].get('weightFeatures')\n ans...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
identifies words that are completely uppercase and are greater than 3 characters in length. It returns the feature 'UPPER', giving us a count of all uppercase words
def isUpper(words): wFeatures = [] for i in range(len(words)): if words[i].isupper() and len(words[i]) > 3: wFeatures += "UPPER" return wFeatures
[ "def upper_case_words(token_list):\n return [x.isupper() and len(x[0]) > 1 for x in token_list].count(True)", "def count_upper(sentence):\n upper = 0\n for c in sentence:\n if(c.isupper()):\n upper += 1\n\n return upper", "def get_is_capital_count(self):\n with open(self.fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
identifies words that have 2 or more repeated characters (such as the word greeeeat). It returns the feature 'REPEATED', giving us a count of all words with repeated characters
def repeatedChars(words): wFeatures = [] for i in range(len(words)): rgx = re.compile(r"(\w)\1{2,}") #matches same char, of same case if rgx.search(words[i]): m = rgx.search(words[i]).group()[1:] feat = re.sub(m, '', words[i]) while rgx.search(feat): ...
[ "def duplicate_count(text):\n return len([x for x in set(text.lower()) if text.lower().count(x) > 1])", "def word_count(self):\n\n # Split by non-alphanumerical boundaires\n split_text = re.split('\\W',self.text.lower())\n\n # Count occurences\n counts = {}\n for word in split_text:\n if wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
preproccess tweetData by modifing both 'words' and 'answers' conflate objectiveORneutral becomes neutral lower casefolding words negate negates words occuring after negative word and before punctuation hashtag removes '' in hastags
def preProcessing(tweetData, conflate, lower, negate, hashtag): newData = {} newData['tweets'] = {} tweetIDs = tweetData['tweets'].keys() for tweetID in tweetIDs: wfeatures = tweetData['tweets'][tweetID].get('weightFeatures') answers = tweetData['tweets'][tweetID]['answers'] wor...
[ "def preprocessing(company, lang):\n\n # get tweets\n tweets = np.array(execute(\"SELECT * FROM tweet WHERE searchterm = '@\" + company + \"'\"))\n tweets = tweets[:,2]\n\n # count retweets\n pattern = re.compile(\"^RT \")\n rt_tweets = [ tweet for tweet in tweets if pattern.match(tweet) ]\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
conflates answer set, such that objectiveORneutral becomes neutral. returns the conflated answers
def conflateAnswers(answers): if 'objective' in answers or 'neutral' in answers: answers = ['neutral'] return answers
[ "def any_answered(self) -> Set[str]:\n return reduce(set.union, self.answers, initial=set())", "def all_answered(self) -> Set[str]:\n return reduce(set.intersection, self.answers, initial=self.any_answered)", "def doCorrectSetSubsumption(self):\r\n subsumer = None\r\n for ref in self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
lowers words in our word list. returns word list in all lowercase
def lowerWords(wordList): newWords = [] for i in range(len(wordList)): newWords.append(wordList[i].lower()) return newWords
[ "def to_lowercase(self, words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words", "def problem4_1(wordlist):\n print(wordlist)\n wordlist.sort(key = str.lower)\n print(wordlist)", "def lower_list(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
negates words found between a negative word in our negateWordList (or words ending in n't) and a punctuation mark
def negateWords(words, negateWordList): newWords = [] negation = False for word in words: #append words if negation: newWords.append('NOT_' + word) else: newWords.append(word) #set negation to True or False if word.lower() in nega...
[ "def replaceNegation(tokens):\n count = 0\n for i in range(len(tokens)):\n word = tokens[i].lower().strip(specialChar)\n if (word == \"no\" or word == \"not\" or word.count(\"n't\") > 0):\n # tokens[i] = 'negation'\n count += 1\n return tokens, count", "def isNegative(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
removes the most common words in all tweets. It removes up to num words. It returns the dictionary with the common words
def removeStopWords(tweetData, num): commonWords = findMostCommonWords(tweetData, num) tweetIDs = tweetData["tweets"].keys() for tweetID in tweetIDs: words = tweetData["tweets"][tweetID]["words"] newWords = [] newTags = [] for word in words: if not word in common...
[ "def findMostCommonWords(tweetData, num):\n \n count = defaultdict(int)\n tweetIDs = tweetData[\"tweets\"].keys()\n for tweetID in tweetIDs:\n words = tweetData[\"tweets\"][tweetID][\"words\"]\n for w in words: #accumulate count of words\n count[w] += 1\n\n wordCountPairs = c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds the most common words in all tweets and returns them. this is used as a helper function for removeStopWords
def findMostCommonWords(tweetData, num): count = defaultdict(int) tweetIDs = tweetData["tweets"].keys() for tweetID in tweetIDs: words = tweetData["tweets"][tweetID]["words"] for w in words: #accumulate count of words count[w] += 1 wordCountPairs = count.items() # s...
[ "def get_top_words(ce_object, num_words=100):\n all_flattened = list(chain.from_iterable(ce_object.spacy_contexts))\n\n all_counter = Counter(all_flattened)\n\n if num_words > len(all_counter):\n print(\n f\"{num_words} requested, but there are only {len(aggregated_counters)} words.\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
removes other words. Specifically, if url is true, it removes urls. If tweetHandle is true, it removes twitter handles (tag '@')
def removeOtherWords(tweetData, url, tweetHandle): tweetIDs = tweetData["tweets"].keys() for tweetID in tweetIDs: words = tweetData["tweets"][tweetID]["words"] tags = tweetData["tweets"][tweetID].get("tags") removeIndices = [] for i in range(len(words)): word = words...
[ "def clean_tweet(tweet):\n\ttweet = re.sub(r\"\\bhttps?:\\/\\/\\S+\\b\", '<LINK>', tweet) # Replace links with <LINK> tag\n\ttweet = re.sub(r\"@\\w+\", '<USER> ', tweet) # Replace @user with <USER> tag\n\ttweet = re.sub(r\"😺✏ — ((?s).*?)<LINK>\", r\"<CATASK> \\1\", tweet) # Add a tag to CuriousCat answers\n\ttweet...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
adds more emoticons for each one seen and returns them as features
def isEmoticon(words, tags): wFeatures = [] for i in range(len(words)): if tags[i] == 'E': if words[i] in [':)','(:',':-)',':))',';)',';-)','=)',\ '=))',';]','<3','^_^', '^.^',':)))', '(;', '<33', '<333', '<3333']: feat = 'POSITIVE' ...
[ "def demoji(tokens):\n emoji_description = []\n for token in tokens:\n detect = emoji.demojize(token)\n emoji_description.append(detect)\n return emoji_description", "def extract_emoji(text):\n the_emoji = None\n for emoji_type, code_point, emoji_list, name, parent in EMOJIS:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a list of words and segments into discontiguous ngrams
def discontiguousNgrams(ngramList, n): newGrams = [] for ngram in ngramList: for i in range(1, (n-1)): temp = ngram.split() temp[i] = '*' temp = ' '.join(temp) newGrams.append(temp) return newGrams
[ "def ngrams(tokens):\n n_tokens = len(tokens)\n for i in xrange(n_tokens):\n for j in xrange(i+3, min(n_tokens, i+3)+1):\n yield tokens[i:j]", "def create_ngrams(doc, n=2):\n ngrams = [[[doc[j] for j in range(i, i + n - 1)], doc[i + n - 1]] for i in\n range(0, len(doc) - n)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines and validates params for add_tags
def add_tags_args(): return {"id": fields.UUID(required=True, location="view_args"), "tags": fields.List(fields.String(), missing=[])}
[ "def add_tags(self, *tags):\n\n try:\n tag_list = self.data[\"tags\"]\n except KeyError:\n tag_list = []\n\n tag_list.extend(tags)\n \n self.data[\"tags\"] = list(set(tag_list))", "def _add_tag(prev_tags, tag_list):\n params = {}\n logger.info...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines and validates suggested tags params
def suggested_tags_args(): return { "team_id": fields.UUID(required=True), "tags": fields.List(fields.String(), missing=[]), "min_support": fields.Number(missing=0.25, validate=lambda val: val <= 1), "limit": fields.Integer(missing=3), }
[ "def _prepare_tags_for_tuning(self):\n\n # Add tags from Estimator class\n estimator = self.estimator or self.estimator_dict[sorted(self.estimator_dict.keys())[0]]\n\n estimator_tags = getattr(estimator, \"tags\", []) or []\n\n if self.tags is None and len(estimator_tags) > 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This algorithm will start out with aggressive blocking and then turn it down as the number of moves in the game increases. The thought is that as the game progresses there will become fewer legal moves and therefore the blocking effect should become smaller and smaller.
def decrease_blocking_improved_score(game, player): own_moves = game.get_legal_moves(player) opp_moves = game.get_legal_moves(game.get_opponent(player)) move_count_factor = 0.5 * game.move_count blocking_factor = (2 * len(opp_moves) - move_count_factor) return float((len(own_moves) - blocking_fa...
[ "def CollectBlocks():\n\n\tint blocksPicked = 0\n\n\twhile (VisibleBlocks() && blocksPicked < Constants.BLOCK_CAPACITY):\n\t\tGoToBlocks()\n\t\tExtendFlaps()\n\t\tPushBlocks()\n\t\t# blocksPicked should be updated during one of these... \n\t\t# need vision to count and push blocks", "def walk_and_avoid(self):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tars a group of files together into a tarball
def tarball_files(work_dir, tar_name, uuid=None, files=None): with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out: for fname in files: if uuid: f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname) else: f_out.add(os.pat...
[ "def __package(self, files, dc, prefix=\"\"):\n\t\tname = dc[\"module\"] + \"-\" + dc[\"version\"] + \".tar\"\n\t\ttar = tarfile.open(name=os.path.join(prefix, name), mode=\"w\")\n\t\tfor file in files:\n\t\t\ttar.add(file, arcname=file.replace(Config().RootDirectory, \"\"))\n\t\ttar.close()", "def compress_all_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads a sample from CGHub via GeneTorrent.
def download_from_genetorrent(job, input_args, analysis_id): work_dir = job.fileStore.getLocalTempDir() folder_path = os.path.join(work_dir, os.path.basename(analysis_id)) sudo = input_args['sudo'] shutil.copy(input_args['genetorrent_key'], os.path.join(work_dir, 'cghub.key')) parameters = ['-vv', '...
[ "def inception_v3_download():", "def test_ensembl_genome_download_links():\n p = genomepy.provider.ProviderBase.create(\"Ensembl\")\n\n for genome in [\"GRCz11\", \"GRCh38.p13\"]:\n p.get_genome_download_link(genome)", "def test_ensemblgenomes_genome_download_links():\n p = genomepy.provider.Pro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> list(partitions([], 10)) [] >>> list(partitions([1,2,3,4,5], 1)) [[1], [2], [3], [4], [5]] >>> list(partitions([1,2,3,4,5], 2)) [[1, 2], [3, 4], [5]] >>> list(partitions([1,2,3,4,5], 5)) [[1, 2, 3, 4, 5]]
def partitions(l, partition_size): for i in xrange(0, len(l), partition_size): yield l[i:i+partition_size]
[ "def partition(n):\n if n == 0:\n return [[]]\n\n partitions = []\n for k in [1, 2, 3]:\n if k > n:\n break\n for subpartition in partition(n - k):\n partitions.append([k] + subpartition)\n return partitions", "def _partition(n, top):\n\n\tif top == 1:\n\t\tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the various input formats then launches the job batcher
def parse_input_samples(job, input_args): config = input_args['config'] sample_dir = input_args['dir'] sample_urls = input_args['sample_urls'] genetorrent = input_args['genetorrent'] samples = None if config: samples = parse_config(config) elif sample_dir: files = os.listdir(...
[ "def batch_main(args):\n command = args[\"command\"]\n with BatchClient(get_connection_settings(args), from_cli=True) as batch_client:\n if command == \"transcribe\":\n for filename in args[\"files\"]:\n print(f\"Processing {filename}\\n==========\")\n job_id = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts sample.tar(.gz) into two fastq files.
def process_sample_tar(job, job_vars): # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() ids['R.fastq'] = None # I/O read_from_filestore(job, work_dir, ids, 'sample.tar') sample_tar = os.path.join(work_dir, 'sample.tar') # Untar File and concat ...
[ "def process_sample(job, inputs, tar_id):\n job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n # I/O\n tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar'))\n # Untar File and concat\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the calories for the cookie of the given proportion.
def calculate_calories(ingredients, proportion): return sum([x[0] * x[1] for x in zip(proportion, [j[-1] for j in ingredients])])
[ "def calculate_calories_proportions(self):\n carbs_calories = self.carb_content * 4\n protein_calories = self.protein_content * 4\n fat_calories = self.fat_content * 9\n alcohol_calories = self.alcohol_content * 7\n total_calories = carbs_calories + protein_calories + fat_calories...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns matrix of possible ingredient proportions.
def ingredient_proportions(ingredient_count, max_tblsp): return it.ifilter(lambda x: sum(x) == max_tblsp, \ it.product(range(max_tblsp + 1), repeat=ingredient_count))
[ "def proportion_vector(proportions):\n\n vec = []\n for c in string.ascii_lowercase:\n try:\n vec.append(proportions[c])\n except KeyError:\n vec.append(0)\n return numpy.array(vec)", "def score_properties_for_pro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of scores for ingredients in the given proportion.
def score_properties_for_proportion(ingredients, proportion): scores = [] # for each ingredient multiply its properties by the individual # proportion for that ingredient; accumulate them into a list # which is then returned for index in range(len(proportion)): scores.append([proportion[inde...
[ "def score_proportion(ingredients, proportion):\n # Wrap intermediate functions\n return multiply_properties_scores(\n sum_properties_scores(\n score_properties_for_proportion(ingredients, proportion)))", "def calculate_calories(ingredients, proportion):\n return sum([x[0] * x[1] for x ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum the scores for each property, i.e. sum the columns of the scores array.
def sum_properties_scores(scores): return [sum(t) for t in [x for x in zip(*scores)]]
[ "def score_aggregation(self, word_scores):\n score = np.sum(word_scores)\n score *= len(word_scores)**(-self.normalisation_index)\n return score", "def calculate_score(self, *args):\n\n score = 0\n for p in args:\n if p not in self.bitmap:\n raise Prope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the score for the given proportion of ingredients.
def score_proportion(ingredients, proportion): # Wrap intermediate functions return multiply_properties_scores( sum_properties_scores( score_properties_for_proportion(ingredients, proportion)))
[ "def score_properties_for_proportion(ingredients, proportion):\n scores = []\n # for each ingredient multiply its properties by the individual\n # proportion for that ingredient; accumulate them into a list\n # which is then returned\n for index in range(len(proportion)):\n scores.append([prop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts provided ascii string into an array of ICS flags
def ics_converter(ascii_string): ics_flags = [] convert_ascii = ascii_string.upper() for letter in convert_ascii: if letter in ICS: ics_flags.append('Flag/' + ICS.get(letter)) else: print("WARNING: \'{}\' in \"{}\" is not convertable!".format(letter, ascii_string)) return ics_flags
[ "def __get_bin_list(string):\n return [1 if str(c).isupper() else 0 for c in string]", "def str_to_bits(text: str) -> np.ndarray:\n msg_bytes = text.encode('utf-8')\n bits = []\n for byte in msg_bytes:\n bits.extend([(byte >> i) & 3 for i in range(6, -1, -2)])\n bits....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates ICS image from either from stdin by user or file
def user_decision(user_input): if user_input: ascii_string = input("Input to convert to ICS Flags: ") image_creator(ics_converter(ascii_string), ascii_string) else: while True: try: input_file = input("File path of txt file to convert: ") include_plain = input("Include plain text? (y/n): ") with...
[ "def create_output_image(img, instances):\n pass", "def new_image():\n names = list(task.argv)\n if not names:\n if Project.prompt and task.arg_prompt is not None:\n name = prompt(task.arg_prompt)\n if name is not None and name.strip():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synchronizes the info between the new client and the other clients in the meeting.
def sync_info(self, new_par: Participant): with self.participants_lock: pars = self.participants[new_par.meeting_id] # send the other clients info to the new client msg = (Info.CLIENTS_INFO, [par.client_info for client_id, par in pars.items() ...
[ "def _sync_clients_with(self, client_data_list, server):\n if client_data_list is None:\n self._log.error('syncing the client list with server %s failed',\n server)\n return\n\n client_data_by_username = {}\n\n for client_data in client_data_list:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Broadcasts a given info msg to all the participants, except the one who sends it.
def broadcast_info_msg(self, sender_par: Participant, msg: tuple): packet = create_packet(pickle.dumps(msg)) self.broadcast(sender_par, packet)
[ "def broadcast(self, text, sender=None):\n for actor in self.players:\n if actor==sender:continue\n actor.send(text)", "def broadcast(self, msg, exclude_bootstrap=True, except_set: set = None, except_type: set = None):\n for other in self._get_connections(exclude_bootstrap, exc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles new data received from a given participant.
def handle_new_data(self, par: Participant, data: bytes): msg_name, msg_data = pickle.loads(data) if msg_name == Info.TOGGLE_AUDIO: par.client_info.is_audio_on = not par.client_info.is_audio_on elif msg_name == Info.TOGGLE_VIDEO: par.client_info.is_video_on = not par.clie...
[ "def dataReceived(data):", "def _data_handler(self, msg):\n\n if len(msg) != 2:\n self.logger.info('skipping malformed message: %s' % str(msg))\n else:\n\n # When a message arrives, increase the corresponding received_count\n in_id = msg[0]\n out_id, data ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a message received from sender_par is an opposite message (if it's in Info.OPPOSITE_MSGS). If it is, it handles the message, otherwise does nothing.
def handle_opposite_msg(self, sender_par: Participant, msg_name, msg_data): for start_msg, stop_msg in Info.OPPOSITE_MSGS.items(): if msg_name == start_msg: if sender_par.meeting_id not in self.last_status_msgs: self.last_status_msgs[sender_par.meeting_id] = [] ...
[ "def handleMessage(self, protocol, msg):\n return False", "def handle_prvmsg(self, msg):\n if self.nick is not None:\n regex = re.compile(\"^PRVMSG (#\\w{0,199}) (.*)$\")\n match = regex.match(msg)\n\n if match:\n #Mulitplex the message to all clients ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receives a participant who has disconnected, and informs all the other participants.
def par_disconnected(self, par: Participant): super(InfoServer, self).par_disconnected(par) full_par_id = par.meeting_id + par.client_id msgs_to_remove = [] for msg in self.last_status_msgs.get(par.meeting_id, []): msg_name, msg_data = msg # if the client who lef...
[ "def on_peer_disconnected(peer, peer_count):", "async def on_disconnect(data: dict):\n logger.info(f\"Disconnected: {data}\")\n room_id = data.get(\"room_id\")\n session_id = data.get(\"session_id\")\n r = Room(id=room_id)\n await redis.read_model(r)\n r.participants.pop(session_id, None)\n a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
serialize message with numpy array types into buffer
def serialize_numpy(self, buff, numpy): try: pass except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
[ "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.messages)\n buff.write(_struct_I.pack(length))\n for val1 in self.messages:\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override model iterator. It should yield model field name and it's value as a tupel.
def __iter__(self): for n in self._meta.fields.keys(): yield (n, getattr(self, n, None))
[ "def __iter__(self):\n for name, field in self._fields.items():\n value = getattr(self, name)\n yield name, value", "def field_values_gen(self):\n fvals = FieldValue.objects.filter(event_id=self)\n lut = self.datasheet_id.internal_fieldname_lookup\n for fval in fv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets behavioral responses or stimulus classes from the trial_info file and encodes them as onehot.
def get_targets(decode_for, raw_path, elec_type, n_chans, only_correct_trials=True, onehot=True): # Trial info holds behavioral responses and stimulus classes tinfo_path = raw_path + 'trial_info.mat' # Get behavioral responses or stimulus classes, depending on user input if decode_...
[ "def get_classes(self):\n\n with open('imagenet_class_index.json') as f:\n class_dict = json.load(f)\n self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]", "def infer(self):\r\n counter = 0\r\n output = {}\r\n while True:\r\n batch = sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets sample image classes for all trials in a given session.
def get_samples(tinfo_path): with h5py.File(tinfo_path, 'r') as f: # 'trial_info.mat' holds only 1 structure tinfo = f['trial_info'] # Extract and return sample classes. substract 1 to have classes range # from 0 to 4 instead of 1 to 5 return np.array([k-1 for k in t...
[ "def get_images(cls):\n # Get all images\n images = ImageManager.get_created_images()\n\n # Instantiate None class_image array\n class_images = None\n # If there is images\n if images:\n # Get all class images\n class_images = [image for image in image...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns array of unique area names for recordings of a given session.
def get_area_names(rinfo_path): with h5py.File(rinfo_path, 'r') as f: info = f.get('recording_info') area = info['area'] area_names = [] for i in range(area.shape[0]): for j in range(area.shape[1]): curr_idx = area[i][j] curr_area = info[c...
[ "def get_all_areas():\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute(\"select * from area\")\n results = []\n for row in cur.fetchall():\n results.append(row)\n\n con.close()\n\n return results", "def determine_global_occupied_region(self):\r\n\r\n occupie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find an image with the given id. Returns either an svg file or a 404 error.
def get_image(id): path = None for root, dirs, files in os.walk(IMAGE_DIR): results = [file for file in files if file.startswith('{0:04d}'.format(id))] if results: path = results[0] break if path: return static_file(path, root=IMAGE_DIR) else: abo...
[ "def _get_image_by_id(self, image_id: \"std::uuid\"):\n try:\n return self._glance.images.get(image_id)\n except glance_exceptions.HTTPNotFound:\n return None", "def find(self, id):\r\n try:\r\n detailsDict = self.imageDetails(id)\r\n except CloudServer...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a config dictionary, codex dictionary, and parameter vector to a pupil.
def config_codex_params_to_pupil(config, codex, params, defocus=0): s = config pupil_pass_zernikes = {key: value for (key, value) in zip(codex.values(), params)} pupil_pass_zernikes['Z4'] += defocus return FringeZernike(**pupil_pass_zernikes, base=1, epd...
[ "def from_config(cls, config: Dict[str, Any]) -> \"ImgPilToPatchesAndImage\":\n return cls(**config)", "def from_config(cls, config: Dict[str, Any]) -> \"ImgPilRandomPhotometric\":\n p = config.get(\"p\", 0.66)\n return cls(p=p)", "def image_processing_parameters(recipe_config):\n params...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cost function that compares a measured or simulated T/S MTF to a simulated one.
def mtf_cost_core_main(true_tan, true_sag, sim_tan, sim_sag): difference_t = true_tan - sim_tan difference_s = true_sag - sim_sag return difference_t, difference_s
[ "def test_cost_function(self):\n\n np.random.seed(42)\n\n def check(D, N, mu=None, Lambda=None, rho=None, A=None):\n if mu is None:\n mu = np.zeros(D)\n if Lambda is None:\n Lambda = np.identity(D)\n if rho is None:\n rho = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the T, S differences between MTF curves.
def _mtf_cost_core_addreduce(difference_t, difference_s): return difference_t + difference_s
[ "def mtf_cost_core_main(true_tan, true_sag, sim_tan, sim_sag):\n difference_t = true_tan - sim_tan\n difference_s = true_sag - sim_sag\n return difference_t, difference_s", "def eRates(self, Gp, Gm, eDiffp, eDiffm, corrDiffpm, tp, tm):\r\n #For now we take the derivative of the function fp and fm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set number of variables available in self.df pandas.DataFrame
def _set_vars(self): if self.vars == []: for v in self.df.columns: if v in VARIABLES.keys(): self.vars.append(VARIABLES[v])
[ "def variables_num(self):\n raise NotImplementedError()", "def initialize_dataframe(self):\n # TODO: check if the set of columns in dataframe after initialiation is exactly\n # the set of base features.\n raise NotImplementedError", "def set_feature_number(self):\r\n self.n_fe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads data from device file into self.df as a pandas.DataFrame. Loads burstaveraged data into self.df_avg as a pandas.DataFrame.
def _load_data(self): self.df = None self.df_avg = None if self.format == "h5": data_path = self.get_H5_path() self.df = pd.read_hdf(data_path, "df") try: self.df_avg = pd.read_hdf(self.get_H5_avg_path(), "df") except FileNotFoundEr...
[ "def set_df_avg(self, save=False):\n self.set_ssc()\n self.df_avg = self.clean_df(self.df)\n self.df_avg[\"ssc_sd\"] = self.df.ssc.resample(\"%ss\" % self.i).std()\n if self.dtype == \"bedframe\":\n self._calc_bursts()\n self.save_H5(avg=save)", "def stage_data(self,d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates U, T and H for each valid burst.
def _calc_bursts(self): # Orbital speed, sig wave height and period if self.dtype != "bedframe": raise NotImplementedError("Not available in current class") self.df_avg["u"] = np.NaN self.df_avg["T"] = np.NaN self.df_avg["H"] = np.NaN df = self.clean_df(self.d...
[ "def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate trend of tide Ebb/Flood depending on next row's depth
def set_tide(self): if "Tide" not in self.df_avg.columns: self.df_avg["Tide"] = np.where( self.df_avg["depth_00"] > self.df_avg["depth_00"].shift(-1), "Ebb", "Flood") # last row doesn't have a next, compare with previous row last_rows =...
[ "def depth(self) -> float:", "def _norm_depth(self):\n self.df.depth = self.df.depth / self.df.depth.mean()", "def compute_depth_errors(gt, pred):\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = np.mean((thresh < 1.25 ).astype(np.float))\n a2 = np.mean((thresh < 1.25 ** ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get Burst from start to end dates
def get_burst(self, start=None, end=None, method="fourier", df=None): if df is None: df = self.df bvars = [ "salinity_00", "temperature_00", "seapressure_00", "depth_00"] dfburst = df[(df.index >= start) & (df.index < end)][:self.sr][bv...
[ "def getBookingBrains(self, start_date=None, end_date=None, **kwargs):\n\n # Initialize\n ctool = getToolByName(self, 'portal_catalog')\n btool = getToolByName(self, 'portal_booking')\n center_obj = self.getBookingCenter()\n center_path = '/'.join(center_obj.getPhysicalPath())\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates SSC, clean data, average and save pandas.DataFrame in self.data_path_avg file
def set_df_avg(self, save=False): self.set_ssc() self.df_avg = self.clean_df(self.df) self.df_avg["ssc_sd"] = self.df.ssc.resample("%ss" % self.i).std() if self.dtype == "bedframe": self._calc_bursts() self.save_H5(avg=save)
[ "def _load_data(self):\n self.df = None\n self.df_avg = None\n if self.format == \"h5\":\n data_path = self.get_H5_path()\n self.df = pd.read_hdf(data_path, \"df\")\n try:\n self.df_avg = pd.read_hdf(self.get_H5_avg_path(), \"df\")\n ex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get % of time in the water (depth available)
def get_time_stats(self): perc = (len(self.df_avg[self.df_avg.depth_00 > 0.025]) / len(self.df_avg)) * 100 self.logger.info("Time in the water %f", perc) return round(perc, 2)
[ "def fraction_time_using_disk(self): \n total_disk_write_time = 0\n total_runtime = 0\n for id, stage in self.stages.iteritems():\n stage_disk_write_time = 0\n stage_total_runtime = 0\n for task in stage.tasks:\n stage_disk_write_time += task.disk_time()\n stage_total_runtime +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot SSC and U values per tidal cycle defined by given intervals.
def plot_tidal_ssc(self, intervals=None): if intervals is None: intervals = STORM_INTERVALS[self.__str__()] else: intervals = intervals[self.__str__()] tidal_vars = ["hours", "u", "ssc", "H", "depth_00", "T"] if self.df_tidal is None: tdelta = pd.Time...
[ "def sn_plot(self):\n import matplotlib.pyplot as plt\n\n # Plot of the basic SN curve according to GL2010\n sigma_1 = self.Rp * (1 - self.R) / self.gamma_M\n # Number of load cycles at upper fatigue limit\n N_1 = self.N_D * (2 * self.sigma_D / sigma_1) ** self.m1\n N_e = 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get signed difference, in seconds, between row index (name) and given mid_value timestamp
def get_hours(row, mid_value): if (row.name < mid_value): return -(mid_value - row.name).total_seconds()/3600 else: return (row.name - mid_value).total_seconds()/3600
[ "def test_seconds_df_timestamp_delta(seconds_df):\n deltas = (seconds_df['timestamp'] - seconds_df['timestamp'].shift())[1:]\n assert np.abs(1.0 - deltas.min()) <= np.finfo(np.float64).eps\n assert np.abs(1.0 - deltas.max()) <= np.finfo(np.float64).eps", "def getseconds(self, key) -> float:\n\t\tvalue = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a hex encoded string to raw bytes. >>> hexDecode('49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d') "I'm killing your brain like a poisonous mushroom"
def hexDecode(hex_str): return hex_str.decode('hex')
[ "def decode_hex(_hex):\n return binascii.a2b_hex(_hex)", "def _dehex(self, hex_string: str) -> str:\n try:\n return bytearray.fromhex(hex_string).decode()\n except ValueError:\n return hex_string", "def hex_decode(s):\n if not s:\n return \"\"\n ret = \"\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts raw bytes to base64. >>> convertToBase64("I'm killing your brain like a poisonous mushroom") 'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t\\n'
def convertToBase64(raw_bytes): return raw_bytes.encode('base64')
[ "def encode_base64(_bytes):\n return binascii.b2a_base64(_bytes, newline=False)", "def encode_base64(s):\n bencode = base64.b64encode(s.encode(\"utf-8\"))\n return str(bencode, \"utf-8\")", "def hex_to_base64(s):\n return codecs.encode(codecs.decode(s, \"hex\"), \"base64\").decode()", "def hex_to_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Guard to ensure that an event kind is valid.
def _guard_kind(kind): if kind.startswith("numba:") and kind not in _builtin_kinds: msg = (f"{kind} is not a valid event kind, " "it starts with the reserved prefix 'numba:'") raise ValueError(msg) return kind
[ "def __CheckExtensionKind(self, kind):\n\n if kind not in base.extension_kinds:\n raise qm.cmdline.CommandError, \\\n qm.error(\"invalid extension kind\",\n kind = kind)", "def is_valid_input(kind):\n if kind not in [\"bit\", \"phase\", \"both\"]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Is it a START event? Returns
def is_start(self): return self._status == EventStatus.START
[ "def is_started(self) -> bool:\n return self.__timer is not None", "def shouldStart(self):\n return self.startButton.pressed()", "async def is_start_in_required(self):\n runlevel = await self.create_and_send_command(STARTLEVEL)\n return not bool(int(runlevel))", "def started(self) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }