query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Backtest the portfolio and return a DataFrame containing the equity curve and the percentage returns. | def backtest_portfolio(self):
portfolio = pd.DataFrame(index=self.positions.index)
pos_diff = self.positions.diff()
portfolio['price_diff'] = self.bars['Close_Out']-self.bars['Open_Out']
portfolio['price_diff'][0:5] = 0.0
portfolio['profit'] = self.positions[... | [
"def backtest_portfolio(self):\r\n\r\n # Set the portfolio object to have the same time period\r\n # as the positions DataFrame\r\n portfolio = pd.DataFrame(index=self.positions.index)\r\n pos_diff = self.positions.diff()\r\n\r\n # Work out the intraday profit of the difference\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that prints the helpers of a certain command | def print_helpers(parser, key):
if len(parser.helpers[key]) != 0:
msg = "Las opciones para el comando " + key + " son: -"
for opcion in parser.helpers[key]:
msg += opcion + " "
else:
msg = "El comando no tiene opciones"
return msg | [
"def command_help():\n for command_name, command in commands.items():\n print('{}: {}'.format(command_name, command.__doc__))",
"def commands():\r\n print(\"HELP: Display the commands list\"\r\n \"\\nGO (area): Move to the inputted area\"\r\n \"\\nLOOK AROUND: Look around yo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Modify the walking turtle program so that rather than a 90 degree left or right turn the angle of the turn is determined randomly at each step. Modify the turtle walk program so that you have two turtles each with a random starting location. Keep the turtles moving until one of them leaves the screen. Modify the previo... | def wandering_turtle():
u = turtle.Turtle()
u.shape("turtle")
u.color("green")
t.color("red")
for i in [t, u]:
i.penup()
i.setpos(random.randrange(-300, 300), random.randrange(-300, 300))
i.pendown()
while True:
for t1, t2 in [(t, u), (u, t)]:
coin = ... | [
"def main():\n\n\n atlas = turtle.Turtle()\n atlas.hideturtle()\n atlas.speed(10)\n drawCircle(atlas)\n\n n = 1000 #limiting the number of steps\n drunkWalk(n,atlas)\n\n screen=atlas.getscreen()\n screen.exitonclick()",
"def pickDirection():\n turtle.right(random.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Picks points from a Binomial(n,p) and plots their histogram | def binomial_histogram(p: float, n: int, num_points: int) -> None:
data = [binomial(n,p) for _ in range(num_points)]
#print(data)
histogram= Counter(data)
#print(histogram)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / num_points for v in histogram.values()],
0.8, color='0.75... | [
"def binomial_histogram(p: float, n: int, num_points: int) -> None:\n data = [binomial(n, p) for _ in range(num_points)]\n\n # use a bar chart to show the actual binomial samples\n histogram = Counter(data)\n plt.bar([x - 0.4 for x in histogram.keys()],\n [v / num_points for v in histogram.va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads wavdata into torch array | def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.from_numpy(data).float(), sampling_rate | [
"def load_wav_to_torch(scp_path):\r\n data = read_matrix(scp_path).numpy().reshape(-1)\r\n data = data / MAX_WAV_VALUE\r\n return torch.from_numpy(data).float()",
"def wav_data(mono_wav):\n the_data = fft.data_from_file(mono_wav)\n return the_data",
"def load_wav_to_array(full_path):\n samplin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wipes data from a model | def model_clear():
Iris.objects.all().delete()
# Raw SQL is needed to update the system table that tracks the row number/pk id
# without resetting to 0 on a clear, the numbering will continue after objects are deleted
from django.db import connection
with connection.cursor() as cursor:
cu... | [
"def clear(self):\n self.models = {}\n self.model_ids = []",
"def wipe_data(self):\n GitController.wipe_project_data(self.git_project)",
"def on_clear_clicked(self, obj):\n self.book_model.clear()\n self.book.clear()",
"def remove_data(self, obj):\n del obj.data[self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the renewal date of the requested subscription. | def change_renewal_date(auth, subscription_id, date,
base_url='https://api.cratejoy.com/v1/'):
payload = json.dumps({u'end_date': date})
subscriptions_endpoint = '{}subscriptions/{}/'.format(
base_url, subscription_id)
resp = requests.put(
subscriptions_endpoint,
... | [
"def renew_subscriptions(self, cr, uid, context=None):\n today = date.today()\n\n #We renew all our subscriptions on the 14th\n #TODO SWITCH BACK TO 14\n renewal_day = 1\n\n if today.day == renewal_day:\n sale_order_object = self.pool.get('sale.order')\n\n #G... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets both arms back into a neutral pose. | def set_neutral(self):
print("Moving to neutral pose...")
self._left_arm.move_to_neutral()
self._right_arm.move_to_neutral() | [
"def set_neutral(self):\n\t\tprint (\"Moving to neutral pose...\")\n\t\tself.left_arm.move_to_neutral()\n\t\tself.right_arm.move_to_neutral()",
"def move_right_to_neutral(self):\n \n pose_target = self.create_pose_target(0.0194490701404,\t\t # Ww\n\t 0.053... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check a DataFrame for null values and returns the number of missing values | def null_count(df):
return df.isna().sum().sum() | [
"def null_count_alt(df):\n x = [test_df[col].isna().sum() for col in test_df.columns]\n y = 0\n for _ in x:\n y += _\n return y",
"def nnull(df: DataFrame, axis=0) -> DataFrame:\n n_missing = df.isnull().sum(axis=axis)\n p_missing = n_missing / df.shape[axis]\n return pd.DataFrame(dict... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check a Dataframe for null values and returns the number of missing values | def null_count_alt(df):
x = [test_df[col].isna().sum() for col in test_df.columns]
y = 0
for _ in x:
y += _
return y | [
"def nnull(df: DataFrame, axis=0) -> DataFrame:\n n_missing = df.isnull().sum(axis=axis)\n p_missing = n_missing / df.shape[axis]\n return pd.DataFrame(dict(n_missing=n_missing, p_missing=p_missing))",
"def check_for_null(df, columns=None):\n if not columns:\n columns = df.schema.names\n \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a train/test split function for a data frame that returns both the training and test sets. 'frac' refers to the percent of data you would like to set aside for training | def train_test_split(df, frac):
cutoff = df.index < int(df.shape[0] * frac)
df_train = df.loc[cutoff]
df_test = df.loc[~cutoff]
return df_train, df_test | [
"def train_val_test_split(dataset, train_frac=0, val_frac=0,\n test_frac=0):\n\n DATASET_LENGTH = len(list(dataset.as_numpy_iterator()))\n\n assert(train_frac or test_frac or val_frac),\\\n \"specify at least one of the fractions\"\n assert(train_frac + test_frac + val_frac <... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Develop a randomization function that randomizes all of a dataframe's cells then returns that randomized dataframe. This function also accepts a random seed for reproducible randomization | def randomize(df, seed=None):
df = df.copy()
columns = df.columns
df = shuffle(df[columns], random_state=seed)
return df | [
"def get_sample(df, col_name, n=100, seed=42):\n import numpy as np\n \n \n np.random.seed(seed) \n random_idx = np.random.choice(\n df[col_name].dropna().index\n , size=n\n , replace=False\n )\n \n return df.loc[random_idx, col_name]",
"def sample_df(df, n_rows,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Entry point for creating sensors for this station. | def create_sensors(self):
self._add_sensor('temp')
self._add_sensor('skytemp')
self._add_sensor('windspeed')
self._add_sensor('rain') | [
"def create_sensor(self):\n\n sensor = Sensor()\n self.sensors.append(sensor)\n return sensor",
"async def async_create(self, attributes: dict) -> dict:\n resp: dict = await self._request(\n \"post\", \"sensors\", json={\"sensors\": attributes}\n )\n return res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Entry point for updating sensor values for this station. This method connects to the Mt. Locke weather server via telnet and extracts the latest values. | def update(self):
log.info('Updating McDonald Locke telnet%s...' % self._station.code)
# read data
with Telnet(self._host, self._port) as tn:
res = tn.read_all().strip().decode('utf-8')
# split lines
lines = res.split('\n')
# get time: there is a timestamp ... | [
"def _sensor_update(self):\n while True:\n with self._lock:\n self._temperature = self._read_temp()\n self._light = self._read_light()\n time.sleep(2.0)",
"def run(self):\n time.sleep(5)\n while(1):\n time.sleep(5)\n te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sampling the genotypes with a specific method. | def sample(self, sample_type: Optional[str] = Allele.DEFAULT_SAMPLE_TYPE) -> Genotype:
genotype = list()
for a in self._genome:
if SGxRandom.random() < self._mutation_rate:
genotype.append(a.sample(sample_type='uniform'))
else:
genotype.append(a.sa... | [
"def sampleIndividuals(pop, param):\n (ssize, mutation, popsize, sim_id, num_loci) = param\n popID = pop.dvars().rep\n gen = pop.dvars().gen\n sample = drawRandomSample(pop, sizes=ssize)\n samplelist = []\n\n for idx in range(ssize):\n genotype_list = list(sample.individual(idx).genotype())... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the winner and the loser Genotype, so as to modify the new Learning Rate. | def update(self, winner: Genotype, loser: Genotype):
for a, w, l in zip(self._genome, winner, loser):
a.update(winner=w, loser=l) | [
"def update_race_winner(self):\n self.data_source.update_race_winner(self.race)",
"def update_genres(self, genre, score):\n print(genre, score)\n self.genres_scores[genre] += score\n return",
"def rate_1vs1(self, opponent, opponent_won=False):\n rating2 = opponent.rating\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluates a genotype according to a specific Fitness Function. | def evaluate(self, genotype: Genotype) -> Fitness:
return self._fitness_function(genotype) | [
"def fitness_function(self, individual):\n\n raise NotImplementedError",
"def eval_genome_fitness(self, genome) -> float:\n raise NotImplementedError(\"Should implement eval_genome_fitness()\")",
"def fitness(cls: Type, chromosome: ChromosomeT) -> float:\n ...",
"def generalFitness(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write and read back a value to a register, and validate result. Also read back several registers. | def verify_value_for_register(instr: minimalmodbus.Instrument, value: int) -> None:
START_READ_ADDR = 0x1000
NUMBER_OF_REGISTERS = 8
assert NUMBER_OF_REGISTERS > ADDRESS_SETPOINT - START_READ_ADDR
instr.write_register(ADDRESS_SETPOINT, value)
assert value == instr.read_register(ADDRESS_SETPOINT)
... | [
"def write(self, value):\n if self.register_type == Register.HOLDING_REGISTER or self.register_type == Register.INPUT_REGISTER:\n return self.modbus_client.write_single_register(self.register_address, value)\n\n else:\n raise Exception(\"invalid register type. Only use Register c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that we detect the slave reported error when we write to an readonly register. | def verify_readonly_register(instr: minimalmodbus.Instrument) -> None:
ADDRESS_FIRMWARE_VERSION = 0x102F
NEW_FIRMWARE_VERSION = 300
print("Verify detecting a READONLY register (detect slave error)")
did_report_error = False
try:
instr.write_register(ADDRESS_FIRMWARE_VERSION, NEW_FIRMWARE_VE... | [
"def test_fail_write_register(self, debug_session):\n debug_session.connect()\n\n with pytest.raises(Exception):\n debug_session.write_register(\"INVALIDREG\", 0xBEEF)",
"def test_fail_read_register(self, debug_session):\n debug_session.connect()\n\n with pytest.raises(Excep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given two tuples of integer data points return a new data tuple percentage between. >>> interpolate((1, 4, 9), (5, 12, 11), .5) (3, 8, 10) | def interpolate(current, next, percentage):
new = []
for i in range(len(current)):
step = int((next[i] - current[i]) * percentage)
new.append(current[i] + step)
return tuple(new) | [
"def interpolate(x, y, x1):\r\n\tfor item in x:\r\n\t\titem = float(item)\r\n\tfor item in y:\r\n\t\titem = float(item)\r\n\tx1 = float(x1)\r\n\t \r\n\ty1 = y[0] + (x1 - x[0]) / (x[1] - x[0]) * (y[1] - y[0])\r\n\t\r\n\treturn y1",
"def interpolate_position(progress, p1, p2):\n return Point(p2[0]*progress + p1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjust a number to fit on a scale. Eg for numbers 0, 100 with a scale of 800 100 should be 800, 0 should be 0, and 50 should be 400. Just realized that processing includes a "map" function. >>> normalize_r(0) 0 >>> normalize_r(40) 240 >>> normalize_r(1930, min=1900, max=1950) 360 | def normalize_r(val, min=0, max=100, scale=600, flip=False):
val = val - min # adjust for starting point
val = val / float(max-min) # get as percentage of max-min range
val = int(val * scale) # return as percentage of scale
if flip:
val = scale - val
return val | [
"def normalize(num, min, max):\r\n return (float(num) - float(min)) / (float(max) - float(min))",
"def scale(value, min_val, max_val):\n return (value - min_val) / (max_val - min_val)",
"def flatscale(value, min_val, max_val):\n result = scale(value, min_val, max_val)\n if result < 0:\n resul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This updates the visualisation environment. Often this can be slower than the cythonised force calculation so used is wisely. | def update(self, system):
update_cellview(self.ax[0, 0], system)
update_rdfview(self.ax[1, 1], system, self.average_rdf, self.r)
update_energyview(self.ax[0, 1], system)
update_msdview(self.ax[1, 0], system)
self.fig.canvas.draw() | [
"def _update_np_visualizer(self) -> None:\n # Refresh visualizer\n self.nb_visualizer.update_geometry(self.pcd_scene)\n self.nb_visualizer.update_geometry(self.mesh_stones)\n if not self.nb_visualizer.poll_events(): sys.exit() #TODO: or exit thread\n self.nb_visualizer.update_rend... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This updates the visualisation environment. Often this can be slower than the cythonised force calculation so used is wisely. | def update(self, system):
update_cellview(self.ax[0, 0], system)
update_forceview(self.ax[1, 1], system)
update_tempview(self.ax[0, 1], system)
update_pressureview(self.ax[1, 0], system)
self.fig.canvas.draw() | [
"def update(self, system):\n update_cellview(self.ax[0, 0], system)\n update_rdfview(self.ax[1, 1], system, self.average_rdf, self.r)\n update_energyview(self.ax[0, 1], system)\n update_msdview(self.ax[1, 0], system)\n self.fig.canvas.draw()",
"def _update_np_visualizer(self) ->... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The visualisation environment consists of a series of panes (1, 2, or 4 are allowed). This function allows the number of panes in the visualisation to be defined. | def environment(panes, size='medium'): # pragma: no cover
scale = 1
if size == 'small':
scale = 2
elif size == 'large':
scale = 0.5
if panes == 1:
fig, ax = plt.subplots(figsize=(4/scale, 4/scale))
elif panes == 2:
fig, ax = plt.subplots(1, 2, figsize=(8/scale, 4/sca... | [
"def panes(self):\n return self._panes[:]",
"def draw_panes(self, master_screen):\n\t\tfor p in self.panes:\n\t\t\tmaster_screen.blit(p.draw_pane_image(), (p.x_off, p.y_off))",
"def pane_example():\n m = PanedWindow(orient=VERTICAL)\n m.pack(fill=BOTH, expand=1)\n top = Label(m, text=\"top pane\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that dispenses the beverage type that is given by the user selection and requests payment | def dispense_beverage(type_of_beverage):
# if espresso
if type_of_beverage == 1:
# check resources espresso and if true -> request payment from the user, otherwise we say the coffee machine
# ran out of resources.
if check_resources(espresso_ingredients['water'], espresso_ingredients['mi... | [
"def request_payment(cost_of_beverage, beverage_type):\n\n # collect all the coins and determine monetary value\n quarters = int(input(\"How many quarters? \"))\n dimes = int(input(\"How many dimes? \"))\n nickels = int(input(\"How many nickels? \"))\n pennies = int(input(\"How many pennies? \"))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function requests coins from the user and if provided sufficient amount, dispenses coffee | def request_payment(cost_of_beverage, beverage_type):
# collect all the coins and determine monetary value
quarters = int(input("How many quarters? "))
dimes = int(input("How many dimes? "))
nickels = int(input("How many nickels? "))
pennies = int(input("How many pennies? "))
quarters_value = q... | [
"def take_coins(choice):\r\n coin_amounts = []\r\n coin_values = {\"quarters\": 0.25, \"dimes\": 0.10, \"nickels\": 0.05, \"pennies\": 0.01}\r\n price = MENU[choice][\"cost\"]\r\n\r\n print(\"Please insert coins.\")\r\n for coin in coin_values:\r\n valid_response = False\r\n while not v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build `display_address` value by appending notNone (in order) values of sample attributes | def build_display_address(sample_attributes):
display_address = ''
try:
transient_town_name = sample_attributes['transientTownName']
transient_accommodation_type = sample_attributes['transientAccommodationType']
if sample_attributes['language'] == 'cy':
... | [
"def get_full_address(self):\r\n\t\tfull_address = f\"{self.address.title()}, {self.city.title()}, \"\r\n\t\tfull_address = full_address + f\"{self.state.upper()}\" # get upper case\r\n\t\treturn full_address",
"def create_property_address(self, listing):\n property_address = '{address}, {city}, {state} ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connects vertices together. Creates and Edge from source vertex to destination vertex. Vertices will be created if not found in graph | def add_edge(
self, source_vertex: T, destination_vertex: T
) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adj... | [
"def _connect(self, v1, v2):\n v1.neighbours.append(v2)\n v2.neighbours.append(v1)",
"def add_edge(self, vertex_id1, vertex_id2):\n vertex1 = self.get_vertex(vertex_id1)\n vertex2 = self.get_vertex(vertex_id2)\n vertex1.add_neighbor(vertex2)\n if self.__is_directed == Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ctrlGroup( object,type,curve,upObject ) > ctrlGroup( 'group','cv','curve1','locator1',5,'True') > ctrlGroup( 'locator','cv','curve15754','locator1',1,'' ) | def ctrlGroup(object, type, bCurve, bUpLoc, bCtrlNum_s, level):
newGrpList = []
cmds.select(cl=True)
bCtrlNum = 0
if type == 'ep':
bCtrlNum = bCtrlNum_s + 1
if type == 'cv':
bCtrlNum = bCtrlNum_s + 3
for i in range(bCtrlNum):
cmds.refresh
pos_t = cmds.xform(bCurve... | [
"def attrNavigationControlGrp(groupName, unignore=\"string\", docTag=\"string\", extraButton=bool, delete=\"string\", columnWidth4=int, createNew=\"string\", popupMenuArray=bool, numberOfPopupMenus=bool, noBackground=bool, defineTemplate=\"string\", connectAttrToDropped=\"string\", label=\"string\", highlightColor=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ctrlGroupMake('locator',listJnts,2,'') listJnts = cmds.ls(sl=True) len 2%2 13%2 23/3 9/2 | def ctrlGroupMake(object, listJnts, ctrlNum, level):
clen = len(listJnts)
steps = clen / ctrlNum
groups = ''
groupsList = []
if steps == clen:
for i in range(2):
groups = nullObj(object)
cmds.delete(cmds.parentConstraint(listJnts[i], groups))
groupsList.ap... | [
"def create_locators(self):\n mc.spaceLocator(n='cn_headroot_jnt_L')\n mc.spaceLocator(n='cn_low_jaw_jnt_L')\n mc.move(0, 2, 0)\n mc.spaceLocator(n='cn_low_jaw_tip_jnt_L')\n mc.move(0, 4, 0)",
"def setup_locators(self):\n for i in range(0, 9):\n locator = cmd.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
listJnts = cmds.ls(sl=True,dag=True,type='joint') intensionYAttr = 'intensionY' offsetYAttr = 'offsetY' intensionZAttr = 'intensionZ' offsetZAttr = 'offsetZ' groups = [] | def createRollIK(listJnts, groups, intensionYAttr, offsetYAttr, intensionZAttr, offsetZAttr):
jntSize = len(listJnts)
node = {}
attr = [[intensionYAttr,
'double',
'',
''],
[offsetYAttr,
'double',
',,1',
''],
... | [
"def show_joint_orient(value=True):\n for each in cmds.ls(type=\"joint\"):\n for plug in [each + \".jo\" + x for x in \"xyz\"]:\n cmds.setAttr(plug, channelBox=value)",
"def create_single_line_nodelist(model,coord_start_seq,coord_end_seq,N,\n nodesetname=None,\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Windbased addition of atmospheric states. | def add_wind(self, other):
if isinstance(other, State):
assert self.grid is other.grid
u = self.u + other.u
v = self.v + other.v
else:
u, v = other
assert self.grid.shape == u.shape
assert self.grid.shape == v.shape
return S... | [
"def wind(self) -> WindData:\n pass",
"def wind(self) -> ObservationsSummaryWind:\n return ObservationsSummaryWind(self.summary[\"wind\"])",
"def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Relative vorticitybased addition of atmospheric states. | def add_vorticity(self, other):
if isinstance(other, State):
assert self.grid is other.grid
# Add vorticity to PV so that planetary vorticity is not doubled
pv = self.pv + other.vorticity
else:
assert self.grid.shape == other.shape
pv = self.pv... | [
"def augment_state(self, state, reference):\n augmented_state = [state[x] for x in self.ac_states] + [state[self.tracked_state] - reference[self.tracked_state]]\n return torch.tensor(augmented_state, requires_grad=True)",
"def update_voltage(self, step_number, timestep, artificial_stimulus):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform PV inversion to obtain wind components. | def _invert_pv(self):
# Compute wind from vorticity using div = 0
vorticity = self.vorticity_spectral
self._u, self._v = self.grid.wind(vorticity, np.zeros_like(vorticity)) | [
"def get_volume(self, vtu):\r\n\r\n self.warped_mesh(vtu)\r\n self.left_ventricle_volume = 0\r\n for e in self.left_ventricle_cavity.cells():\r\n x1, y1, z1 = self.left_ventricle_cavity.coordinates()[e][0]\r\n x2, y2, z2 = self.left_ventricle_cavity.coordinates()[e][1]\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enstropy after removal of the zonalmean PV. | def eddy_enstrophy(self):
return 0.5 * (self.pv - np.mean(self.pv, axis=ZONAL, keepdims=True))**2 | [
"def Enstrophy(zeta):\n\tEns = np.sum(zeta*zeta)\n\treturn Ens",
"def unplugged_mean(self) -> float:\n return self._unplugged_mean",
"def _compute_det_variance(self):",
"def demean(data):\n return data - data.mean()",
"def remove_piston(self):\n self.phase -= mean(self.phase)\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Zonalized PV profile on the regular latitude grid. | def pv_zonalized(self):
return self.grid.zonalize(self.pv, interpolate=True) | [
"def get_pz(table, norm=True, zmin=0., zmax=10.):\n\tz_ = np.linspace(zmin, zmax, 1000)\n\tpz = np.zeros(1000)\n\tfor row in table:\n\t\tpz += get_pz_spline(row['p_z'], row['z_i'])(z_)\n\tif norm:\n\t\tpz /= integrate.simps(pz, x=z_)\n\n\treturn pz",
"def velocidad_promedio(self): \n u_x = 0\n u_y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
FiniteAmplitude Local Wave Activity, phasefiltered based on v. The FALWA field is filtered based on the doubled dominant wavenumber of the meridional wind obtained from Fourier analysis at each latitude as in Ghinassi et al. (2020). | def falwa_filtered(self):
dominant_wavenumber = diagnostics.dominant_wavenumber_fourier(self.v, self.grid)
return diagnostics.filter_by_wavenumber(self.falwa, 2*dominant_wavenumber) | [
"def filter(self, v):\n log.info(\"Applying windowing scheme\")\n return self._window(v, self.filter_obj.filter(v))",
"def hanning_filt(self,t,tw,fc):\n hanningfilt = 1/2*(1+cos(2*pi*t/tw))*sinc(2*pi*fc*t)\n return hanningfilt",
"def get_vocalization_status(time_frame, np_freqs):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yields a file path from a file. | def get_file_paths(file_name):
with open(path / file_name, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
yield (path / line) | [
"def file_iter(path, encoding='utf-8', errors='strict'):\n with open(path, 'r', encoding=encoding, errors=errors) as f:\n for line in f:\n yield line",
"def iterate_file( file ):\n while 1:\n chunk = file.read( CHUNK_SIZE )\n if not chunk:\n break\n yield ch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Anonymize graph by replacing nodes of certain named types with tokens like "named0". | def anonymize_graph(g):
replacements = []
id_counters = {}
carg_triples = g.attributes(relation='carg')
# anonymize each instance that has a cargs value, storing the mapping from value to token
for carg_triple in carg_triples:
named_triple = g.triples(
relation='instance', source... | [
"def replace_node_names_with_node_objects(self):\n for node in self.nodes:\n for key, node_to in node.edges_to.items():\n node.edges_to[key] = self.get_node_by_name(node_to)\n\n node.parent = self.get_node_by_name(node.parent)",
"def apply_renamer(tree, renamer):\n c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group all attribute nodes into one. Attribute list is normalized by uppercasing the value and sorting the list by attribute name. Concatenated attributes are appended to the instance (predicate) target value so OpenNMT will interpret them as word features. Note that OpenNMT expects all tokens to have the same number of... | def combine_attributes(g):
for variable in g.variables():
old_attributes = [
attr for attr in g.attributes(source=variable) if attr.relation != 'instance'
]
new_targets = []
for old_attr in old_attributes:
old_relation = old_attr.relation
old_targe... | [
"def groupby_attribute(\n features_list: List[Features], attributes: Optional[Iterable[Text]] = None\n ) -> Dict[Text, List[Features]]:\n # ensure all requested attributes are present in the output - regardless\n # of whether we find features later\n extracted: Dict[Text, List[Feature... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read serialized graphs from a file. Stores concatenated comment lines (lines starting with "") as the graph label. Returns list of (label, serialized_graph) tuples. | def load_serialized_from_file(infilename):
serialized = []
with open(infilename) as infile:
heading = ''
partial = []
for line in infile:
line = line.strip()
if not line:
continue
if line.startswith('#'):
if partial:
... | [
"def graphs_from_file(file_path: str) -> Iterator[Graph]:\n content = read_gzip_txt_file(file_path)\n\n graph_header_sep_re = re.compile(\n r'(<graph center=[^ ]+ title=\"[^\"]+\">\\n)')\n graph_header_re = re.compile(\n r'<graph center=([^ ]+) title=\"([^\"]+)\">\\n')\n parts = graph_header_sep_re.sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a Penmanserialized graph, simplify, anonymize, and linearize it. Anonymization replaces nodes of specific classes with placeholders like named0, named1 and stores a mapping that can be used to recover original values. Returns tuple of (preprocessed_graph, anonymization_mapping) | def preprocess_penman(serialized):
codec = preprocess_penman.codec
g = codec.decode(serialized)
anon_map = anonymize_graph(g)
combine_attributes(g)
linearized = codec.encode(g)
return linearized, anon_map | [
"def anonymize_graph(g):\n replacements = []\n id_counters = {}\n carg_triples = g.attributes(relation='carg')\n # anonymize each instance that has a cargs value, storing the mapping from value to token\n for carg_triple in carg_triples:\n named_triple = g.triples(\n relation='insta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shrinks span boundary so it doesn't include punctuation. | def _adjust_span_boundaries(sentence, anon_dict):
start, end = anon_dict['span']
# adjust beginning of replacement span to exclude punc
while start < end and sentence[start] in {'(', '[', '"', '`', "'"}:
if sentence[start] == "'": # allow single quote but not double
if end - start > 1 a... | [
"def _Truncate(self, tokens, overflow):\n self._truncated = True\n marker_string = '...'\n marker_width = len(marker_string)\n marker_token = (Token.Markdown.Truncated, marker_string)\n if tokens and overflow:\n word, available = overflow # pylint: disable=unpacking-non-sequence\n if marke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert Penman serialized graphs to format that can be used for training. Reads Penmanserialized graphs from infilename, where infile was created by running mrstopenman.py (or has the same format.) Writes linear serializations to {outfile_prefix}src.txt, corresponding tokenized sentences to {outfile_prefix}tgt.txt, and... | def create_parallel_files(infilename, outfile_prefix, output_blank_for_failure=False):
data = load_serialized_from_file(infilename)
sys.stderr.write('Deserializing and processing {} graphs.'.format(len(data)))
sys.stderr.write('Using Moses tokenization from the nltk package.\n')
with io.open(get_src_fil... | [
"def preprocess_penman(serialized):\n codec = preprocess_penman.codec\n g = codec.decode(serialized)\n anon_map = anonymize_graph(g)\n combine_attributes(g)\n linearized = codec.encode(g)\n return linearized, anon_map",
"def from_graphML(self, in_file):\n pass",
"def convert_confnet(fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to save dvh_data into JSON file | def save_dvh_json(dvh_data_dict, file_path_name):
with open(file_path_name, "w", encoding="utf-8") as json_file:
json.dump(dvh_data_dict, json_file, ensure_ascii=False) | [
"def to_json_file(self, file_path_name):\n save_dvh_json(self.dvh_data, file_path_name)",
"def save_json(data, file_name: str = 'hsweep'):\n # if results directory does not exist, create it!\n results_path = check_results_path()\n\n file_path = results_path / Path(file_name + '.json')\n\n with ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Class to encapsulate IO methods for DVH data storage It receives a PyPlanScoring DVH data dictionary | def __init__(self, dvh_data_dict=None, header_info=None):
if dvh_data_dict is None:
dvh_data_dict = {}
self._header = None
self._dvh_data = {}
# setters
self.dvh_data = dvh_data_dict
self.header = header_info | [
"def __init__(self):\r\n self.filename = None\r\n self.data_dict = {}",
"def wrap_DHVSequence_in_dict(self, rtss_file:str) -> dict:\n\n instance_rtss_object = Instance_RTSS(rtss_file)\n dictionnary = {}\n for i in range(len(self.dicomData.DVHSequence)) : \n referenced... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save pickle .dvh file | def to_dvh_file(self, file_path_name):
save(self.dvh_data, file_path_name) | [
"def save_pvd(self, filename):\n if filename[-4:] != '.pvd':\n filename += '.pvd'\n pvd_file = df.File(filename)\n pvd_file << self.f",
"def save(self):\n pickle.dump(self.keyValue, open(\"brain.dump\", \"w+\"))\n print \"Successfully saved file\"",
"def dump(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads pickle .dvh file | def read_dvh_file(self, file_path_name):
self.dvh_data = load(file_path_name)
return self.dvh_data | [
"def load_obj(load_dir):\r\n return pickle.load(open(load_dir, 'rb'))",
"def load(path=None):\n \n if path is None:\n path = get_path('hwdetect/data/data_sets/1_pixel_labels/ariel_26-10_5959.pkl')\n with open(path, 'rb') as f:\n ret = pickle.load(f)\n return ret",
"def load_obj(name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves serialized dvh data into .jdvh json file | def to_json_file(self, file_path_name):
save_dvh_json(self.dvh_data, file_path_name) | [
"def save_dvh_json(dvh_data_dict, file_path_name):\n\n with open(file_path_name, \"w\", encoding=\"utf-8\") as json_file:\n json.dump(dvh_data_dict, json_file, ensure_ascii=False)",
"def save(self):\r\n try:\r\n with open(self.json_name(), \"w\") as json_file:\r\n json_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Only load the module if WPCLI is installed; used by most of the methods | def __virtual__():
# REFINE Fix the formatting in the above docstring
if 'wordpress.check_cli_installed' in __salt__ and __salt__['wordpress.check_cli_installed']():
return __virtualname__
# REFINE Can this message be different from the 'main' module file?
return False, 'The wordpress execution ... | [
"def _patch_import_cli():\n try:\n return importlib.import_module(\"music_bot.cli\")\n except ModuleNotFoundError:\n pass\n\n from pathlib import Path\n sys.path.append(str(Path(__file__).parent.parent))\n\n return importlib.import_module(\"music_bot.cli\")",
"def is_installed():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
X is a data matrix, size N x D Y is a data matrix, size N x R kern, likelihood, mean_function are appropriate GPflow objects Z is a matrix of pseudo inputs, size M x D num_latent is the number of latent process to use, default to Y.shape[1] q_diag is a boolean. If True, the covariance is approximated by a diagonal matr... | def __init__(self, X, Y, kern, likelihood, Z,
mean_function=None,
num_latent=None,
q_diag=False,
whiten=True,
minibatch_size=None,
**kwargs):
# sort out the X, Y into MiniBatch objects if required.
self... | [
"def __init__(self, X, Y, kern, likelihood, feat=None,\n mean_function=None,\n num_latent=None,\n q_diag=False,\n whiten=True,\n minibatch_size=None,\n Z=None,\n num_data=None,\n q_mu=None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the 'since' api index files. | def OutputSinceIndexes():
for version in set(Since.values()):
logging.info("Since : [%s]", version)
index = {x: IndexEntriesSince[x] for x in IndexEntriesSince.keys() if Since[x] == version}
OutputIndex("api-index-" + version, index) | [
"def api_index():\n return render_template('api_docs.html')",
"def get_index_files(self):\n return",
"def write_api_docs(self, indent=4):\n # Welcome message\n if self.verbose > 0:\n print(\"[info] Generating documentation index in {0}.\".format(\n self.generate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a glossary of the used annotation terms. The glossary file can be included into the main document. | def OutputAnnotationGlossary():
# if there are no annotations used return
if not AnnotationsUsed:
return
old_glossary = os.path.join(DB_OUTPUT_DIR, "annotation-glossary.xml")
new_glossary = os.path.join(DB_OUTPUT_DIR, "annotation-glossary.new")
lastletter = " "
divopen = False
# ad... | [
"def write_glossary(domain='opendata.cityofnewyork.us', resource_filename=None, glossary_filename=None,\n use_cache=True, timeout=60):\n\n # Load the glossarization to-do list.\n resource_list, glossary = load_glossary_todo(resource_filename, glossary_filename, use_cache)\n\n # Generate t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the Since and StabilityLevel paragraphs for a symbol. | def OutputSymbolTraits(symbol):
desc = ''
if symbol in Since:
link_id = "api-index-" + Since[symbol]
desc += "<para role=\"since\">Since: <link linkend=\"%s\">%s</link></para>" % (link_id, Since[symbol])
if symbol in StabilityLevel:
stability = StabilityLevel[symbol]
if st... | [
"def OutputSymbolsWithoutSince():\n new_nosince_file = os.path.join(ROOT_DIR, MODULE + \"-nosince.txt\")\n with open(new_nosince_file, 'w', encoding='utf-8') as out:\n for symbol in sorted(SourceSymbolDocs.keys()):\n if symbol in Since:\n out.write(symbol + \"\\n\")",
"def M... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns extralinks for the symbol (if enabled). | def OutputSymbolExtraLinks(symbol):
desc = ''
if False: # NEW FEATURE: needs configurability
sstr = uri_escape(symbol)
mstr = uri_escape(MODULE)
desc += '''<ulink role="extralinks" url="http://www.google.com/codesearch?q=%s">code search</ulink>
<ulink role="extralinks" url="http://lib... | [
"def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()",
"def symbols(self):\r\n return [symbolData.symbol for symbolData in self.symbolData]",
"def symbols(self):\n url = MARKET_URL + '/v1/common/symbols'\n params = {}\n return self._get(url, params)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the synopsis and detailed description of a typedef. | def OutputTypedef(symbol, declaration):
sid = common.CreateValidSGMLID(symbol)
condition = MakeConditionDescription(symbol)
desc = "<refsect2 id=\"%s\" role=\"typedef\"%s>\n<title>%s</title>\n" % (sid, condition, symbol)
synop = "<row><entry role=\"typedef_keyword\">typedef</entry><entry role=\"function... | [
"def description(self):\n return type_get_description(self)",
"def CreateHelpText(synopsis, description):\n return SYNOPSIS_PREFIX + synopsis + DESCRIPTION_PREFIX + description",
"def get_description(cls):\n if cls.__doc__ is None:\n return \"\"\n return cls.__doc__.strip().spli... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the synopsis and detailed description of a union. | def OutputUnion(symbol, declaration):
is_gtype = False
if CheckIsObject(symbol):
logging.info("Found union gtype: %s", symbol)
is_gtype = True
sid = None
condition = None
if is_gtype:
sid = common.CreateValidSGMLID(symbol + "_union")
condition = MakeConditionDescript... | [
"def _description_string(self) -> str:",
"def describe(self):\n #Interfaces are tricky because we can have an arbitrary number of embedded procedures\n #that each have different calling interfaces and return types. We are trying to \n #summarize that information in a single interface. Interfa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the DocBook output describing the parameters of a symbol. This can be used for functions, macros or signal handlers. | def OutputParamDescriptions(symbol_type, symbol, fields):
output = ''
num_params = 0
field_descrs = None
if fields:
field_descrs = [f for f in fields if f not in ['void', 'Returns']]
else:
field_descrs = []
params = SymbolParams.get(symbol)
logging.info("param_desc(%s, %s) ... | [
"def doc(self):\r\n return '%s\\n\\n%s' % (self.get_call_signature(), self.docstr)",
"def help(cls):\n param_docs = inspect.getdoc(cls._validate_parameters)\n param_start_ind = param_docs.find('parameters:\\n') + 12\n param_end_ind = param_docs.find(':type parameters:') - 1\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a stability level and outputs a warning if it isn't valid. | def ParseStabilityLevel(stability, file, line, message):
stability = stability.strip()
sl = stability.strip().lower()
if sl == 'stable':
stability = "Stable"
elif sl == 'unstable':
stability = "Unstable"
elif sl == 'private':
stability = "Private"
else:
common.Log... | [
"def get_severity(level):\n try:\n return Level.levels[level]\n except:\n return False",
"def parse_level(notice):\n prefix, _, _ = notice.partition(':')\n if prefix in ['PANIC', 'FATAL']:\n return logging.CRITICAL\n if prefix in ['ERROR']:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Outputs the final DocBook file for one program. | def OutputProgramDBFile(program, section_id):
logging.info("Output program docbook for %s", program)
short_desc = SourceSymbolDocs.get(program + ":Short_Description")
if not short_desc or short_desc.strip() == '':
short_desc = ''
else:
# Don't use ConvertMarkDown here for now since we d... | [
"def generate_documentation(self):\n pages = []\n for output in self.outputs:\n title = output.get_documentation_title()\n pages.append({\n 'title': title,\n 'filename': self.create_filename(title),\n 'content': output.get_documentatio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies an "extra" DocBook file into the output directory, expanding abbreviations. | def OutputExtraFile(file):
basename = os.path.basename(file)
old_db_file = os.path.join(DB_OUTPUT_DIR, basename)
new_db_file = os.path.join(DB_OUTPUT_DIR, basename + ".new")
contents = open(file, 'r', encoding='utf-8').read()
with open(new_db_file, 'w', encoding='utf-8') as out:
out.writ... | [
"def include_other_documents(self):\n patterns = ['readme', 'license', 'changes']\n entries = os.listdir('.')\n entries = filter(os.path.isfile, entries)\n matches = filter(lambda e: any(p in e.lower() for p in patterns), entries)\n for match in matches:\n copy_file(mat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand the shortcut notation for symbol references. This turns the abbreviations function(), macro(), , %constant, and symbol into appropriate DocBook markup. CDATA sections and parts are skipped. | def ExpandAbbreviations(symbol, text):
# Note: This is a fallback and normally done in the markdown parser
logging.debug('expand abbreviations for "%s", text: [%s]', symbol, text)
m = re.search(r'\|\[[^\n]*\n(.*)\]\|', text, flags=re.M | re.S)
if m:
logging.debug('replaced entities in code bloc... | [
"def OutputSymbolExtraLinks(symbol):\n desc = ''\n\n if False: # NEW FEATURE: needs configurability\n sstr = uri_escape(symbol)\n mstr = uri_escape(MODULE)\n desc += '''<ulink role=\"extralinks\" url=\"http://www.google.com/codesearch?q=%s\">code search</ulink>\n<ulink role=\"extralinks... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rewrite XML blocks. Looks for given XML element tags within the text, and calls the callback on pieces of text inside & outside those elements. Used for special handling of text inside things like CDATA and . | def ModifyXMLElements(text, symbol, start_tag_regexp, end_tag_func, callback):
before_tag = start_tag = end_tag_regexp = end_tag = None
result = ''
logging.debug('modify xml for symbol: %s, regex: %s, text: [%s]', symbol, start_tag_regexp, text)
m = re.search(start_tag_regexp, text, flags=re.S)
wh... | [
"def _replaceNestedElementText(self, names, text, escapeAmpLtGt=False):\n openingTagsPattern = r\"\"\n closingTagsPattern = r\"\"\n firstLevel = True\n while names:\n nextName = names.pop(0)\n if not firstLevel:\n openingTagsPattern = openingTagsPatte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a docbook header for the given tag. | def MakeDocHeader(tag):
header = re.sub(r'<!DOCTYPE \w+', r'<!DOCTYPE ' + tag, doctype_header)
# fix the path for book since this is one level up
if tag == 'book':
header = re.sub(
r'<!ENTITY % gtkdocentities SYSTEM "../([a-zA-Z./]+)">', r'<!ENTITY % gtkdocentities SYSTEM "\1">', header)... | [
"def _make_header(metadata):\n # All headers are astropy headers until we update fitsio\n # if use_fitsio:\n # hdr = fitsio.FITSHDR(metadata)\n if metadata is None:\n hdr = fits.Header()\n else:\n hdr = fits.Header(metadata)\n\n return hdr",
"def make_header(args):\n header ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This returns a crossreference link to the given symbol. Though it doesn't try to do this for a few standard C types that it knows won't be in the documentation. | def MakeXRef(symbol, text=None):
symbol = symbol.strip()
if not text:
text = symbol
# Get rid of special suffixes ('-struct','-enum').
text = re.sub(r'-struct$', '', text)
text = re.sub(r'-enum$', '', text)
if ' ' in symbol:
return text
logging.info("Getting ty... | [
"def symbol_reference(name: str, typ: IntType) -> FixedValueReference:\n return FixedValueReference(SymbolValue(name, typ.width), typ)",
"def get_xrefs_symbol(self, species, symbol):\n self.endpoint = '/xrefs/symbol/'\n url = self.server + self.endpoint + species + '/' + symbol + '?'\n\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This returns a deprecation warning for the given symbol. | def MakeDeprecationNote(symbol):
desc = ''
if symbol in Deprecated:
desc += "<warning><para><literal>%s</literal> " % symbol
note = Deprecated[symbol]
m = re.search(r'^\s*([0-9\.]+)\s*:?', note)
if m:
desc += "has been deprecated since version %s and should not be us... | [
"def _getWarningString(self, attr):\n return _getDeprecationWarningString(\n deprecatedattributes.__name__ + \".\" + attr,\n deprecatedattributes.version,\n DEPRECATION_WARNING_FORMAT + \": \" + deprecatedattributes.message,\n )",
"def manual_warn(message, stacklevel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the object inheritance graph. Returns the DocBook output describing the ancestors and immediate children of a GObject subclass. It uses the global Objects and ObjectLevels arrays to walk the tree. | def GetHierarchy(gobject, hierarchy):
# Find object in the objects array.
found = False
children = []
level = 0
j = 0
for i in range(len(Objects)):
if found:
if ObjectLevels[i] <= level:
break
elif ObjectLevels[i] == level + 1:
chi... | [
"def ReadObjectHierarchy(ifile):\n\n Objects[:] = []\n ObjectLevels[:] = []\n\n if not os.path.isfile(ifile):\n logging.debug('no *-hierarchy.tx')\n return\n\n INPUT = open(ifile, 'r', encoding='utf-8')\n\n # Only emit objects if they are supposed to be documented, or if\n # they hav... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate interface usage graph. Returns the DocBook output describing the implementations of an interface. It uses the global Interfaces hash. | def GetImplementations(gobject):
text = ''
impls = []
for key in Interfaces:
if re.search(r'\b%s\b' % gobject, Interfaces[key]):
impls.append(key)
count = len(impls)
if count > 0:
impls.sort()
text = '''<para>
%s is implemented by
''' % gobject
for i in r... | [
"def __generate_source(self):\n self._output = ''\n\n # We add the current date and time\n self._output += '#\\n'\n self._output += '# Generated: {}\\n'.format(datetime.datetime.now())\n self._output += '#\\n'\n\n # We add the imports, used functions and the header of the c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates interface requirements. Returns the DocBook output describing the prerequisites of an interface. It uses the global Prerequisites hash. | def GetPrerequisites(iface):
text = ''
if iface in Prerequisites:
text = '''<para>
%s requires
''' % iface
prereqs = Prerequisites[iface].split()
count = len(prereqs)
for i in range(count):
sid = common.CreateValidSGMLID(prereqs[i])
text += " <link linken... | [
"def GetDerived(iface):\n text = ''\n derived = []\n for key in Prerequisites:\n if re.search(r'\\b%s\\b' % iface, Prerequisites[key]):\n derived.append(key)\n\n count = len(derived)\n if count > 0:\n derived.sort()\n text = '''<para>\n%s is required by\n''' % iface\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the DocBook output describing the derived interfaces of an interface. It uses the global %Prerequisites hash. | def GetDerived(iface):
text = ''
derived = []
for key in Prerequisites:
if re.search(r'\b%s\b' % iface, Prerequisites[key]):
derived.append(key)
count = len(derived)
if count > 0:
derived.sort()
text = '''<para>
%s is required by
''' % iface
for i in rang... | [
"def GetPrerequisites(iface):\n\n text = ''\n if iface in Prerequisites:\n text = '''<para>\n%s requires\n''' % iface\n prereqs = Prerequisites[iface].split()\n count = len(prereqs)\n for i in range(count):\n sid = common.CreateValidSGMLID(prereqs[i])\n text +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate signal docs. Returns the synopsis and detailed description DocBook output for the signal handlers of a given GObject subclass. | def GetSignals(gobject):
synop = ''
desc = ''
for i in range(len(SignalObjects)):
if SignalObjects[i] == gobject:
logging.info("Found signal: %s", SignalNames[i])
name = SignalNames[i]
symbol = '%s::%s' % (gobject, name)
sid = common.CreateValidSGMLID... | [
"def docstring(self):\n docs = []\n for key, func in self.items():\n sig = getattr(key, 'sig', '')\n doc = func.__doc__ or ''\n docs.append(f'{func.__name__}{sig}\\n {doc}')\n return '\\n\\n'.join(docs)",
"def help_generate_events(self):\n _generate_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the documentation embedded in comment blocks in the source code. It recursively descends the source directory looking for source files and scans them looking for speciallyformatted comment blocks. | def ReadSourceDocumentation(source_dir, suffix_list, source_dirs, ignore_files):
if IgnorePath(source_dir, source_dirs, ignore_files):
return
logging.info("Scanning source directory: %s", source_dir)
# This array holds any subdirectories found.
subdirs = []
for ifile in sorted(os.listdir(... | [
"def scanFolder(self,name):\n\t\tfor root, dirs, files in os.walk( os.path.join( Config().RootDirectory, name )):\n\t\t\tfor file in files:\n\t\t\t\tif file.endswith(\".py\"):\n\t\t\t\t\tpath = os.path.join(root, file)\n\t\t\t\t\tmodule = self.importFile(path)\n\t\t\t\t\tdoc = module.__doc__\n\t\t\t\t\tif doc != No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scans one source file looking for speciallyformatted comment blocks. Later MergeSourceDocumentation() is copying over the doc blobs that are not suppressed/ignored. | def ScanSourceFile(ifile, ignore_files):
m = re.search(r'^.*[\/\\]([^\/\\]*)$', ifile)
if m:
basename = m.group(1)
else:
common.LogWarning(ifile, 1, "Can't find basename for this filename.")
basename = ifile
# Check if the basename is in the list of files to ignore.
if re.se... | [
"def ingest_comments(self, raw_lines):\n # First get a dictionary with every existing line of code. That way\n # we know whether to look for an inline comment or a full line comment\n for file in self.output_files:\n all_lines_dict = {}\n for cfunction in file.functions.va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reports undeclared symbols. Outputs symbols that are listed in the section file, but have no declaration in the sources. | def OutputUndeclaredSymbols():
old_undeclared_file = os.path.join(ROOT_DIR, MODULE + "-undeclared.txt")
new_undeclared_file = os.path.join(ROOT_DIR, MODULE + "-undeclared.new")
with open(new_undeclared_file, 'w', encoding='utf-8') as out:
if UndeclaredSymbols:
out.write("\n".join(sorted... | [
"def OutputUnusedSymbols():\n num_unused = 0\n old_unused_file = os.path.join(ROOT_DIR, MODULE + \"-unused.txt\")\n new_unused_file = os.path.join(ROOT_DIR, MODULE + \"-unused.new\")\n\n with open(new_unused_file, 'w', encoding='utf-8') as out:\n\n for symbol in sorted(Declarations.keys()):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reports unused documentation. Outputs symbols that are documented in comments, but not declared in the sources. | def OutputUnusedSymbols():
num_unused = 0
old_unused_file = os.path.join(ROOT_DIR, MODULE + "-unused.txt")
new_unused_file = os.path.join(ROOT_DIR, MODULE + "-unused.new")
with open(new_unused_file, 'w', encoding='utf-8') as out:
for symbol in sorted(Declarations.keys()):
if symbol... | [
"def report_unused(self) -> None:\n unused = set(self._dict) - self._used\n if unused:\n print('The following documentation entries were not used in code'\n ' generation:')\n for k in sorted(unused):\n print(' ', k)",
"def OutputUndeclaredSymbo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Outputs list of all symbols to a file. | def OutputAllSymbols():
new_symbols_file = os.path.join(ROOT_DIR, MODULE + "-symbols.txt")
with open(new_symbols_file, 'w', encoding='utf-8') as out:
for symbol in sorted(AllSymbols.keys()):
out.write(symbol + "\n") | [
"def futures_write_symbols_to_file(self):\n symbol_list = self.futures_get_all_symbols()\n with open(\"binance_futures_symbols.txt\", \"w+\") as file:\n for symbol in symbol_list:\n file.write(symbol+\"\\n\")",
"def dump_symbol_set(fp, ss):\n first = True\n fp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Outputs list of all symbols without a since tag to a file. | def OutputSymbolsWithoutSince():
new_nosince_file = os.path.join(ROOT_DIR, MODULE + "-nosince.txt")
with open(new_nosince_file, 'w', encoding='utf-8') as out:
for symbol in sorted(SourceSymbolDocs.keys()):
if symbol in Since:
out.write(symbol + "\n") | [
"def OutputUndeclaredSymbols():\n old_undeclared_file = os.path.join(ROOT_DIR, MODULE + \"-undeclared.txt\")\n new_undeclared_file = os.path.join(ROOT_DIR, MODULE + \"-undeclared.new\")\n\n with open(new_undeclared_file, 'w', encoding='utf-8') as out:\n if UndeclaredSymbols:\n out.write(\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merges documentation read from a source file. Parameter descriptions override any in the template files. Function descriptions are placed before any description from the template files. | def MergeSourceDocumentation():
# add whats found in the source
symbols = set(SourceSymbolDocs.keys())
# and add known symbols from -sections.txt
for symbol in KnownSymbols.keys():
if KnownSymbols[symbol] == 1:
symbols.add(symbol)
logging.info("num source entries: %d", len(sym... | [
"def create_file_documentation(\n source_file: Path, output_path: Path, args: argparse.Namespace\n) -> str:\n name = source_file.name\n normalized_name = name.replace(\".\", \"_\")\n doc_name = f\"{normalized_name}.rst\"\n doc_file = output_path / doc_name\n if not args.force and doc_file.exists()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a docstring is empty. It is also regarded as empty if it only consist of whitespace or e.g. FIXME. | def IsEmptyDoc(doc):
if re.search(r'^\s*$', doc):
return True
if re.search(r'^\s*<para>\s*(FIXME)?\s*<\/para>\s*$', doc):
return True
return False | [
"def has_docstring(self):\n return type(self.docstring) != type(None)",
"def test_any_docstring(self):\n self.assertTrue(len(Base.__doc__) >= 1)",
"def has_docstring(func):\n return func.__doc__ is not None",
"def _is_valid_docstring(cls, cursor: Cursor, raw: str, doc_extent: SourceRange) -> ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads in a file containing the function/macro/enum etc. declarations. Note that in some cases there are several declarations with the same name, e.g. for conditional macros. In this case we set a flag in the DeclarationConditional hash so the declaration is not shown in the docs. If a macro and a function have the same... | def ReadDeclarationsFile(ifile, override):
if override == 0:
Declarations.clear()
DeclarationTypes.clear()
DeclarationConditional.clear()
DeclarationOutput.clear()
INPUT = open(ifile, 'r', encoding='utf-8')
declaration_type = ''
declaration_name = None
declaration = ... | [
"def get_prototypes_from_file(self, include_file_name):\n\n # read file\n include_file=file(include_file_name,\"r\")\n contents=include_file.read()\n include_file.close()\n\n # skip begin and end\n decl_only_pattern=re.compile(\".*__BEGIN_DECLS(.*)__END_DECLS.*\",re.MULTILI... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads information about object signals. It creates the arrays and containing details about the signals. The first line of the SignalPrototype is the return type of the signal handler. The remaining lines are the parameters passed to it. The last parameter, "gpointer user_data" is always the same so is not included. | def ReadSignalsFile(ifile):
in_signal = 0
signal_object = None
signal_name = None
signal_returns = None
signal_flags = None
signal_prototype = None
# Reset the signal info.
SignalObjects[:] = []
SignalNames[:] = []
SignalReturns[:] = []
SignalFlags[:] = []
SignalPrototyp... | [
"def GetSignals(gobject):\n synop = ''\n desc = ''\n\n for i in range(len(SignalObjects)):\n if SignalObjects[i] == gobject:\n logging.info(\"Found signal: %s\", SignalNames[i])\n name = SignalNames[i]\n symbol = '%s::%s' % (gobject, name)\n sid = common.C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the $MODULEhierarchy.txt file. This contains all the GObject subclasses described in this module (and their ancestors). It places them in the Objects array, and places their level in the object hierarchy in the ObjectLevels array, at the same index. GObject, the root object, has a level of 1. This also generates ... | def ReadObjectHierarchy(ifile):
Objects[:] = []
ObjectLevels[:] = []
if not os.path.isfile(ifile):
logging.debug('no *-hierarchy.tx')
return
INPUT = open(ifile, 'r', encoding='utf-8')
# Only emit objects if they are supposed to be documented, or if
# they have documented chil... | [
"def GetHierarchy(gobject, hierarchy):\n # Find object in the objects array.\n found = False\n children = []\n level = 0\n j = 0\n for i in range(len(Objects)):\n if found:\n if ObjectLevels[i] <= level:\n break\n\n elif ObjectLevels[i] == level + 1:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the $MODULE.interfaces file. | def ReadInterfaces(ifile):
Interfaces.clear()
if not os.path.isfile(ifile):
return
INPUT = open(ifile, 'r', encoding='utf-8')
for line in INPUT:
line = line.strip()
ifaces = line.split()
gobject = ifaces.pop(0)
if gobject in KnownSymbols and KnownSymbols[gobje... | [
"def _load_interfaces(self):\n self._load_devices()\n try:\n r = self.call_api(endpoint=\"/ports?columns=port_id,device_id,ifName\")\n self.interfaces = json.loads(r.text, object_pairs_hook=AttrDict)\n except requests.exceptions.HTTPError as err:\n raise Librenm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This reads in the $MODULE.prerequisites file. | def ReadPrerequisites(ifile):
Prerequisites.clear()
if not os.path.isfile(ifile):
return
INPUT = open(ifile, 'r', encoding='utf-8')
for line in INPUT:
line = line.strip()
prereqs = line.split()
iface = prereqs.pop(0)
if iface in KnownSymbols and KnownSymbols[if... | [
"def read_requirements() -> List[str]:\n\n with open_local(REQUIREMENTS_TXT) as req_file:\n return [line.strip() for line in req_file.readlines() if line.strip()]",
"def _load_requirements(path):\n with open(path) as fp:\n deps = []\n for line in fp:\n if _is_ignored_requirem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a line art tree. Add unicode lineart to a preindented string array and returns it as as multiline string. | def AddTreeLineArt(tree):
# iterate bottom up over the tree
for i in range(len(tree) - 1, -1, -1):
# count leading spaces
m = re.search(r'^([^<A-Za-z]*)', tree[i])
indent = len(m.group(1))
# replace with ╰───, if place of ╰ is not space insert ├
if indent > 4:
... | [
"def indented_tree_line_generator(el, max_lines=None):\n gen = tree_line_generator(el, max_lines)\n for start_ref, end_ref, indentation_level, line in gen:\n # Escape line\n if line.startswith(\">\"):\n line = \"\\\\\" + line\n yield start_ref, end_ref, \"> \" * indentation_lev... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if symbols is an object. It uses the global Objects array. Note that the Objects array only contains classes in the current module and their ancestors not all GObject classes. | def CheckIsObject(name):
root = ObjectRoots.get(name)
# Let GBoxed pass as an object here to get -struct appended to the id
# and prevent conflicts with sections.
return root and root != 'GEnum' and root != 'GFlags' | [
"def _is_object_type(attribute_info):\n lsa_type, get_type, cmo_type = _get_attribute_types(attribute_info)\n return lsa_type == alias_constants.OBJECT or \\\n (_is_type_an_unknown_type(lsa_type) and\n (get_type == alias_constants.OBJECT or (cmo_type == alias_constants.OBJECT)))",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the file line where the symbol docs where taken from. | def GetSymbolSourceLine(symbol):
return SourceSymbolSourceLine.get(symbol, 0) | [
"def get_doc_start():\n start = \"*** START OF THIS PROJECT GUTENBERG EBOOK THE ADVENTURES OF SHERLOCK HOLMES ***\"\n with open(filename, \"r\") as f:\n for num, line in enumerate(f, 1):\n if start in line:\n x = num\n start_line = 1 + x\n f.close... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Domain object that is the union of the original object with dom (also a Domain object) | def union(self, domain): | [
"def __create_domain_objs(self, domain):\n\n new_objs = {'URI': None,\n 'Whois': None,\n 'DNSQueryV4': None,\n 'DNSResultV4': None,\n 'ipv4': None,\n 'DNSQueryV6': None,\n 'DNSResultV6': None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes diameter, volume and uniformity parameter v of the domain for later use | def compute_parameters(self):
diameter = np.sqrt(np.sum([(self.bounds[i][1] - self.bounds[i][0])**2 for i in range(self.n)]))
volume = np.array([self.bounds[i][1] - self.bounds[i][0] for i in range(self.n)]).prod()
return diameter, volume, 1.0 | [
"def di_dv(self, v: float) -> float:\n raise NotImplementedError",
"def test_compute_domain_volume(p):\n Ω = (-2, 3)\n mesh, master, dofh, _map = computational_domain(p=p, n_elm=4, Ω=Ω)\n u, detJ = np.ones_like(dofh.dgnodes), _map._detJ[0]\n domain_volume = np.sum(np.dot(master.M, detJ*u))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the bounding nBox | def bbox(self):
lower = np.array([[self.nboxes[i].bounds[j][0] for j in range(self.n)]
for i in range(len(self.nboxes))]).min(axis=0)
upper = np.array([[self.nboxes[i].bounds[j][1] for j in range(self.n)]
for i in range(len(self.nboxes))]).max(axis=0... | [
"def boundingBox(self):\r\n\t\tfrom blur3d.lib.cartesian import BoundingBox, Point\r\n\t\tp1, p2 = mxs.nodeGetBoundingBox(self.nativePointer(), mxs.matrix3(1))\r\n\t\treturn BoundingBox(Point.newFromMaxPoint(p1), Point.newFromMaxPoint(p2))",
"def boundingBox(self):\n xpos = self.xpos\n\n minXY = np.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws N samples uniformly from the union of disjoint nBoxes | def sample_uniform(self, N):
volumes = [nbox.volume for nbox in self.nboxes]
weights = volumes/np.sum(volumes)
np.random.seed()
select = np.random.choice(np.arange(len(volumes)), p=weights, size=N)
samples = np.array([nbox.sample_uniform(N) for nbox in self.nboxes])
retur... | [
"def draw_uniform_sample(choices: List[T], n: int) -> List[T]:\n return random.default_rng().choice(a=choices, size=n)",
"def draw_sample(sample_size, n):\n sample = set()\n i = 0\n while i < sample_size:\n sample.add(draw_element(n))\n if len(sample) < 1 + 1:\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Projects the points in 'points' onto the domain (i.e. returns x = argmin_{s \in domain} ||sx||_2^2. For a union of disjoint nBoxes we can just project on each nBox and take the point with the minimum distance. | def project(self, points):
proj_points = np.array([nbox.project(points) for nbox in self.nboxes])
dists = np.array([np.linalg.norm(points - proj, axis=1) for proj in proj_points])
return proj_points[np.argmin(dists, 0), np.arange(proj_points.shape[1])] | [
"def gen_project(self, points, mats):\n proj_points = np.array([nbox.project(points) for nbox in self.nboxes])\n dists = np.array([np.einsum('ij,ij->i',np.einsum('ijk...,ik...->ij...', mats, proj-points), proj-points) for proj in proj_points]) \n return proj_points[np.argmin(dists, 0), np.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs a generalized projection of the points in 'points' onto the domain, returning x = argmin_{s \in domain} (sx)^TA(sx). For a union of disjoint nBoxes we can just project on each nBox and take the point with the minimum distance. | def gen_project(self, points, mats):
proj_points = np.array([nbox.project(points) for nbox in self.nboxes])
dists = np.array([np.einsum('ij,ij->i',np.einsum('ijk...,ik...->ij...', mats, proj-points), proj-points) for proj in proj_points])
return proj_points[np.argmin(dists, 0), np.arange(p... | [
"def project(self, points):\n proj_points = np.array([nbox.project(points) for nbox in self.nboxes])\n dists = np.array([np.linalg.norm(points - proj, axis=1) for proj in proj_points])\n return proj_points[np.argmin(dists, 0), np.arange(proj_points.shape[1])]",
"def get_nearest_mesh_value(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an array with the the vertices of the DifferenceOfnBoxes. This is just the union of the set of vertices from the individual nBoxes. | def vertices(self):
return np.vstack([self.outer.vertices(), np.vstack([nbox.vertices() for nbox in self.inner])]) | [
"def box_vertices(cls, nodes):\n q = [[[1,1,0],[0,1,0],[0,1,1],[1,1,1]], \\\n [[1,0,1],[0,0,1],[0,0,0],[1,0,0]], \\\n [[1,1,1],[0,1,1],[0,0,1],[1,0,1]], \\\n [[1,0,0],[0,0,0],[0,1,0],[1,1,0]], \\\n [[0,1,1],[0,1,0],[0,0,0],[0,0,1]], \\\n [[1,1,0],[1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws N samples uniformly from the kunit simplex. This is just sampling from a Dirichlet distribution with all parameters equal to 1. | def sample_uniform(self, N):
np.random.seed()
return np.random.dirichlet([1]*self.k, N) | [
"def draw_uniform_sample(choices: List[T], n: int) -> List[T]:\n return random.default_rng().choice(a=choices, size=n)",
"def draw_weighted_sample(\n choices: List[T],\n probabilities: List[float],\n n: int,\n) -> List[T]:\n return random.default_rng().choice(\n a=choices,\n size=n,\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a UnionOfDisjointnBoxes with specified v | def vboxes(n, v):
a, b = (1-v)**(1/n), v**(1/n)
return UnionOfDisjointnBoxes([nBox([(0,a),]*n), nBox([(-b,0),]*n)]) | [
"def vL(v, Npath=None, epsilon=0):\n if v == 1:\n L = nBox([(0,1),(0,1)])\n else: \n a = (1+v, 1-v)\n b = (v, 1)\n L = UnionOfDisjointnBoxes([nBox([(0,b[0]), (0,b[1])]), nBox([(b[0], a[0]), (0,a[1])])])\n L.v = v\n if Npath is not None:\n if v == 1:\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an Lshaped UnionOfDisjointnBoxes with specified v in dimension 2 | def vL(v, Npath=None, epsilon=0):
if v == 1:
L = nBox([(0,1),(0,1)])
else:
a = (1+v, 1-v)
b = (v, 1)
L = UnionOfDisjointnBoxes([nBox([(0,b[0]), (0,b[1])]), nBox([(b[0], a[0]), (0,a[1])])])
L.v = v
if Npath is not None:
if v == 1:
path = (1-epsilo... | [
"def vboxes(n, v):\n a, b = (1-v)**(1/n), v**(1/n)\n return UnionOfDisjointnBoxes([nBox([(0,a),]*n), nBox([(-b,0),]*n)])",
"def lattice(u, v, dtype=np.float32):\n L = np.zeros((len(u), len(v), 2), dtype=dtype)\n for i in range(len(u)): L[i,:,1] = v\n for j in range(len(v)): L[:,j,0] = u\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes an finished or unfinished corpus from the database and removes all downloaded files. | def removeCorpus(request):
corpusid = request.GET["corpusid"] if request.method == "GET" else request.POST["corpusid"]
session = Session.objects.all().filter(id=corpusid).first()
folder = os.path.join(settings.BASE_PROJECT_DIR, session.folder)
manager = TweetIO.getManager()
fetcher = manager.get(co... | [
"def delete_corpus_directory(dirname=CORPUS_DIR):\n shutil.rmtree(dirname, ignore_errors=True)",
"def clean_documents():\n write_message(\"\"\"CLEANING OF OBSOLETED DELETED DOCUMENTS STARTED\"\"\")\n write_message(\"select id from bibdoc where status='DELETED' and NOW()>ADDTIME(modification_date, '%s 0:0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resumes (=restarting subprocess) a corpus creation process. | def resumeCorpus(request):
sid = request.GET["id"]
session = Session.objects.all().filter(id=sid).first()
session.working = True
session.completed = False
session.save()
folderPath = os.path.join(settings.BASE_PROJECT_DIR, session.folder)
csvFile = open(os.path.join(folderPath, "tweets.csv"... | [
"def resume(self):\n self._call(\"resume\")",
"def resume(self, pid):\n pass",
"def prep_resume(self) -> None:\n pass",
"def resume(self):\n assert self.running\n\n self._paused = False\n\n for process in self.processes:\n process.resume()",
"def resume(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compute the Ncontrast loss | def Ncontrast(x_dis, adj_label, tau = 1):
x_dis = torch.exp( tau * x_dis)
x_dis_sum = torch.sum(x_dis, 1)
x_dis_sum_pos = torch.sum(x_dis*adj_label, 1)
loss = -torch.log(x_dis_sum_pos * (x_dis_sum**(-1))+1e-8).mean()
return loss | [
"def contrastive_loss(y_true, y_pred):\n margin = 1\n return K.mean(y_true * K.square(y_pred) +\n (1 - y_true) * K.square(K.relu(margin - y_pred)))",
"def contrastive_loss(y_true, y_pred):\n margin = 1\n return K.mean(y_true * K.square(y_pred) +\n (1 - y_true) * K.squ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |