query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Return a prettytable styled for use in the shell. field_names is a list of table header strings. | def get_default_table(field_names, widths=None):
table = prettytable.PrettyTable(field_names)
table.border = True
table.hrules = prettytable.ALL
table.vertical_char = ' '
table.junction_char = '-'
def left_align(name):
# Try different ways of setting the alignment to support older versi... | [
"def restructured_table(column_names, column_ids, object_list, truncate_len=13):\r\n single_cell_border = \"+\" + (truncate_len+2) * \"-\"\r\n border = len(column_names) * single_cell_border + \"+\"\r\n table = \"\\n\" + border + \"\\n\"\r\n # Column Headers first\r\n for column in column_names:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render device state in a table. | def dstate():
field_names = ["Name", "Parameters"]
table = get_default_table(field_names)
for name, device in _current_instances(Device):
values = _get_param_value_table(device)
table.add_row([name, values])
print(table.get_string()) | [
"def render(self, output):\n self._table._output = output\n\n return self._table.render()",
"def circuit_data_table(circuit: QuantumCircuit) -> wid.HTML:\n\n circuit = circuit.decompose()\n ops = circuit.count_ops()\n num_nl = circuit.num_nonlocal_gates()\n\n html = \"<table>\"\n html... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Abort all actions related with parameters on all devices and lock the devices. | def abort():
from concert.devices.base import Device
tuples = _current_instances(Device)
return device_abort(zip(*tuples)[1]) | [
"def commsUnkillAll(self):\n\n for dev in indigo.devices.itervalues(\"self\"):\n indigo.device.enable(dev, value=True)",
"def commsKillAll(self):\n\n for dev in indigo.devices.itervalues(\"self\"):\n indigo.device.enable(dev, value=False)",
"def cancel_all_others(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles get provider logo request. Returns logo if one exists. | def getProviderLogo(request):
if request.method != 'GET':
return
provider_id = request.GET.get('id', None)
provider = None
print provider_id
if provider_id:
provider = Provider.get_by_id(int(provider_id))
if provider is None:
if not check_session(request):
re... | [
"def get_share_logo(ticker):\n stock_logo_url = \"/stock/\" + ticker + \"/logo\"\n ticker_logo = requests.get(IEX_BASE+stock_logo_url)\n logo = ticker_logo.json()\n return logo['url']",
"def find_image(\n provider: \"openstack::Provider\", os: \"std::OS\", name: \"string\" = None\n) -> \"string\":\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the provider's logo | def updateLogo(request):
if not check_session(request):
return HttpResponseRedirect('/login')
if not request.session['is_provider']:
return HttpResponseRedirect('/login')
provider = Provider.get_by_id(request.session['user_id'])
provider.logo = request.body
if request.body:
... | [
"def logo():",
"def logo_image(self, logo_image):\n self._logo_image = logo_image",
"def branding_logo(self, branding_logo):\n\n self._branding_logo = branding_logo",
"def company_logo(self, company_logo):\n\n self._company_logo = company_logo",
"def updateImage(self):\n self.ima... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the provider's document for dwolla verification | def updateDoc(request):
if not check_session(request):
return HttpResponseRedirect('/login')
if not request.session['is_provider']:
return HttpResponseRedirect('/login')
provider = Provider.get_by_id(request.session['user_id'])
if 'file' in request.FILES and request.POST.get('docType'... | [
"def put(self):\n if not self.validate_agreement_document('update'):\n return\n document = upload_file(self.request)\n self.request.validated['agreement'].documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Updated tender agreement documen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of nodes, this tool will create a 2 x 2 grid containing 4 windows of equal size. We will iterate over each node, looking at it's x & y values. We will apply floor division (//) to the x & y values and observe the quotients, which will represent the row and column in which the node should be added. Then we ... | def window_tool(directory, nodes_raw, lowerT, upperT, layout):
print timestamp(), 'Windowing starting...'
sys.stdout.flush()
# Define the iterative function for drawing the adaptive grid
def window_level(minX, minY, windowWidth, gridId):
"""
This collects node names belonging to a 2 x... | [
"def window_level(minX, minY, windowWidth, gridId):\n W = windowWidth\n\n # Write the grid for this level to a file\n midX = minX + W\n midY = minY + W\n row = [minX, midY, minX + W * 2, midY]\n f.writerow(row)\n row = [midX, minY, midX, minY + W * 2]\n f.writ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This collects node names belonging to a 2 x 2 grid of 4 windows, with window lengths of W. minX and minY are the minimum coordinates of the grid. Note that gridId is just for debugging. | def window_level(minX, minY, windowWidth, gridId):
W = windowWidth
# Write the grid for this level to a file
midX = minX + W
midY = minY + W
row = [minX, midY, minX + W * 2, midY]
f.writerow(row)
row = [midX, minY, midX, minY + W * 2]
f.writerow(row)
... | [
"def window_tool(directory, nodes_raw, lowerT, upperT, layout):\n\n print timestamp(), 'Windowing starting...'\n sys.stdout.flush()\n\n\n # Define the iterative function for drawing the adaptive grid\n def window_level(minX, minY, windowWidth, gridId):\n \"\"\"\n This collects node names b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluates the cost of a given exchange map. | def evaluate_exchange_map_cost(exchanges, distances,
supply_site_code, sku_code) -> float:
exchange_grid = exchanges[exchanges['Supply Site Code']
== supply_site_code]
exchange_grid = exchange_grid[exchange_grid['SKU'] == sku_code]
cost = 0
o... | [
"def estimate_cost(self, board):\n pass",
"def calculate_cost_of_powerplant_dict(load_dict):\n total_cost = 0\n for powerplant, load_value in load_dict.items():\n total_cost = total_cost + powerplant.cost_euros_per_load(load_value)\n return total_cost",
"def setCostMap(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots two histograms of the percentage of balanced stocks in a grid. The balance points are max stock and reorder stock. | def plot_stock_balance(data, data_name, balance=False) -> None:
supply_sites = data['Supply Site Code'].unique()
max_metric = []
reorder_metric = []
for i in range(len(supply_sites)):
supply_site = supply_sites[i]
supply_site_rows = (data['Supply Site Code'] == supply_site)
suppl... | [
"def plot_balance_trace(self):\n\n reward = pd.DataFrame(self.chess.balance_trace[80:]) # exclude first game (distort graph)\n reward.rolling(window = 500, min_periods = 0).mean().plot(figsize = (16, 9), title = \"Average Balance\")\n plt.show()",
"def plot_SHAP_charts():\n plt.rcParams.up... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sky shade this block. | def sky_shade(self, brightness=0xf):
if not brightness:
return 0x0
return brightness | [
"def spice_bloom(self, x=-1, y=-1):\n w, h = self.surface.get_size()\n if x < 0 or x >= w or y < 0 or y >= h:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n r, g, b, a = self.surface.get_at((x, y))\n while (r, g, b) != self.sand_colour and (r, g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates chunks out of 4 mapblocks. gets the height of each block, determines, where it starts and where it ends. creates higher chunks first returns chunks, bitmask, add_bitmask | def _chunkify(mb1, mb2, mb3, mb4, continuous=False):
chunks = []
bitmask = 0x0
add_bitmask = 0x0
# first create the heightmap
height_map = numpy.zeros((16, 16), dtype=int)
# now create the tile map
tile_map = numpy.zeros((16, 16), dtype=int)
#####
if mb1 is not None:
height_m... | [
"def regenerate_heightmap(self):\n\n for x, z in product(xrange(16), repeat=2):\n for y in range(127, -1, -1):\n if self.blocks[x, z, y]:\n break\n\n self.heightmap[x, z] = y",
"def calculate_chunk_bounds(index):\n # Remember: z increments and wrap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all input contents from DDM. | def get_input_contents(self):
try:
ret_files = []
return ret_files
except Exception as ex:
self.logger.error(ex)
self.logger.error(traceback.format_exc())
raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc())) | [
"def readAll(input: 'SoInput') -> \"SoSeparator *\":\n return _coin.SoDB_readAll(input)",
"def _get_contents(self, dikt):\n content_dict = dikt.get('content')\n if content_dict is None:\n return []\n\n contents = []\n for _format, info in content_dict.items():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
XOR of two strings This function will process only shortest length of both strings, ignoring remaining one. | def strxor(a, b):
mlen = min(len(a), len(b))
a, b, xor = bytearray(a), bytearray(b), bytearray(mlen)
for i in xrange(mlen):
xor[i] = a[i] ^ b[i]
return bytes(xor) | [
"def xor_str(a, b):\n return b''.join(map(int2byte, [x ^ y for x, y in zip_longest(iterbytes(a), iterbytes(b), fillvalue=0)]))",
"def xor(b1, b2):\n answer = \"\"\n for i in range(len(b1)):\n x, y = b1[i], b2[i]\n assert(x in \"01\" and y in \"01\")\n if x == y:\n answer +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate required pad size to full up blocksize | def pad_size(data_size, blocksize):
if data_size < blocksize:
return blocksize - data_size
if data_size % blocksize == 0:
return 0
return blocksize - data_size % blocksize | [
"def _get_total_same_padding(interval_size, kernel_size, stride, dilation):\n effective_kernel_size = (kernel_size - 1) * dilation + 1\n required_total_padding = (interval_size - 1) * \\\n stride + effective_kernel_size - interval_size\n return required_total_padding",
"def get_siz... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select CTC states according to best ids | def index_select_state(self, state, best_ids):
r, s, f_min, f_max, scoring_idmap = state
# convert ids to BHO space
n_bh = len(s)
n_hyps = n_bh // self.batch
vidx = tf.reshape(best_ids + tf.reshape(self.idx_b * (n_hyps * self.odim), [-1, 1]), [-1])
# select hypothesis sco... | [
"def getState(id):",
"def getStates():",
"def allStates():",
"def selectXClassifierT(self):\r\n \r\n actionSetSize = len(self.clSet) \r\n tSize = int(actionSetSize*cons.theta_Select) # sets the number of items in the action set to be included in the tournament selection\r\n posList... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the Catalog instance with dictionary data. | def __init__(self, data=None):
super(Catalog, self).__init__(data) | [
"def __init__(self, meta_root=None, sign=True):\n\n self.__data = {}\n CatalogPartBase.__init__(self, name=\"catalog.attrs\",\n meta_root=meta_root, sign=sign)\n\n if self.loaded:\n # If the data is already seen as 'loaded' durin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display the Catalog as HTML for a rich display in IPython. | def _repr_html_(self): # pragma: no cover
return Utils.render_html('catalog.html', catalog=self) | [
"def show(self):\r\n self.render_html()\r\n from IPython.display import display, HTML\r\n\r\n display(HTML(self.html))",
"def showCatalog():\n state = generateState(login_session, 'state')\n categories = session.query(Category).all()\n return render_template('allCategories.html', cat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given the content of a recent job mail, return the link to the sfbi website | def __extract_link(self, content):
flag = 0
link = ''
for line in content.splitlines():
# some mails contain corrupted changed links because of some outlook shit that happened
# during the second half of october 2017. Detect and skip them.
if re.search('safeli... | [
"def display_job_listings_with_apply_link():\r\n result_elements = get_job_listings_from_website()\r\n relevant_jobs = result_elements.find_all('h2',string=re.compile(\"Data Scien*\")) \r\n # print(relevant_jobs)\r\n #print(results.prettify())\r\n for job in relevant_jobs:\r\n link = job.find... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns one charset and payload from a mail, multipart or not. Takes the first charset that is defined in a mutlipart message, and returns this part's payload Charset can be None in some weird case that should be dismissed. | def get_one_charset_payload(msg):
if msg.is_multipart():
for part in msg.walk():
if part.get_content_charset() is not None:
return part.get_content_charset(), part.get_payload(decode=True)
else:
if msg.get_content_charset() is None:
... | [
"def get_email_content(self, email):\r\n\r\n if email.is_multipart():\r\n self.log('Extracting email contents from multipart message')\r\n\r\n magic_type = 'multipart/alternative'\r\n payload_types = dict((p.get_content_type(), i) for i, p in enumerate(email.get_payload()))\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Import a solved scenario, i.e the manoeuvre plan, from pickle file 'filename.pickle' | def import_solved_scenario(self, filename):
# Actual path
abs_path = os.path.dirname(os.path.abspath(__file__))
scenario_path = os.path.join(abs_path, '../example/' + filename + '.pickle')
# Try to import the file
try:
with open(scenario_path, 'rb') as file:
... | [
"def importStep(fileName):\n #Now read and return the shape\n try:\n rshape = Part.read(fileName)\n\n # Extract all solids and surfaces\n geometry = []\n for solid in rshape.Solids:\n geometry.append(Shape.cast(solid))\n\n for shell in rshape.Shells:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Export a solved scenario into pickle file 'scenario_name_date.pickle' in the /example folder. | def export_solved_scenario(self, manoeuvre_plan):
# Actual path
abs_path = os.path.dirname(os.path.abspath(__file__))
# Check if "/example" folder exists
if not os.path.exists(os.path.join(abs_path, '../example')):
os.makedirs(os.path.join(abs_path, '../example'))
... | [
"def save_to_file(the_experiment, filename):\n #Pickle dumps\n datas = dumps(the_experiment)\n f = open(filename, 'w')\n f.write(datas)\n f.close()",
"def save_experiment(self, descr='_experiment'):\n st = datetime.datetime.fromtimestamp(t.time()).strftime('%Y-%m-%d-%H')\n name = st+d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse scenario and import initial conditions from .yaml files in the /cfg folder. | def import_yaml_scenario(self, filename, ic_name='std_ic'):
# Opening scenario file
abs_path = os.path.dirname(os.path.abspath(__file__))
scenario_path = os.path.join(abs_path, '../cfg/' + filename + '.yaml')
scenario_file = file(scenario_path, 'r')
scenario = yaml.load(scenario... | [
"def test_classification(init_env, config_file):\n run_all_steps(init_env, config_file)",
"def parse_feature(basedir: str, filename: str, encoding: str = \"utf-8\") -> Feature:\n __tracebackhide__ = True\n abs_filename = os.path.abspath(os.path.join(basedir, filename))\n rel_filename = os.path.join(os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test parsing of mzml file to spectrum objects | def test_load_from_mzml():
module_root = os.path.join(os.path.dirname(__file__), "..")
mzml_file = os.path.join(module_root, "testdata", "testdata.mzml")
spectrums = list(load_from_mzml(mzml_file))
assert len(spectrums) == 10, "Expected 10 spectrums."
assert int(spectrums[5].get("precursor_mz")) =... | [
"def test_mtz_parser_1(self):\n input_mtz = os.path.join(EXAMPLE_DIR, \"toxd\", \"toxd.mtz\")\n mp = MtzParser(input_mtz)\n mp.parse()\n self.assertEqual(np.round(mp.resolution, 1), 2.3)\n self.assertEqual(np.round(mp.cell.a, 4), 73.5820)\n self.assertEqual(np.round(mp.cell... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
seperate projects in incomplete_assignments or not. 1. For projects in incomplete_assignments, show resume button and disable select button 2. Otherwise, show select button only. | def get_user_incomplete_assignments(self, worker, projects):
worker_hits = HIT.objects.filter(Q(project__in=projects), Q(user_id_list__icontains=worker.id))
now = datetime.datetime.now()
assignments = Assignment.objects.filter(Q(hit__in=worker_hits), Q(complete_indicator=False),
... | [
"def filter_projects(self):\n text = self.text_search.text().lower()\n for i in range(self.list_projects.count()):\n item = self.list_projects.item(i)\n item.setHidden(text not in item.text().lower())\n self.toogle_dev_tools(visible=self.dev_tool_visible)",
"def updatePr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the pitch using haversine formula | def _calculate_pitch(self, lat_sat, long_sat, alt_sat, lat_drone, long_drone, alt_drone):
R = 6371000
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - lo... | [
"def pitch(eulers):\n return eulers[1]",
"def pitch_to_freq(pitch):\n return 440.*2**((pitch-69)/12)",
"def get_pyin_pitchtrack(audio, fs, tonic=440.0):\n params = {\"outputunvoiced\": 2.0}\n data = vamp.collect(audio, fs, \"pyin:pyin\", output='smoothedpitchtrack', parameters=params)\n hop, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the bearing based on antenna and uav gps coordinates | def _calculate_bearing(self, lat_sat, long_sat, lat_drone, long_drone):
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - ... | [
"def get_bearing(aLocation1, aLocation2): \n off_x = aLocation2.lon - aLocation1.lon\n off_y = aLocation2.lat - aLocation1.lat\n bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795\n if bearing < 0:\n bearing += 360.00\n return bearing",
"def get_bearing(self, aLocation1, aLocation2):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test pagination header is documented | def test_pagination_header_documentation(self, app):
api = Api(app)
class CustomBlueprint(Blueprint):
PAGINATION_HEADER_FIELD_NAME = 'X-Custom-Pagination-Header'
blp = CustomBlueprint('test', __name__, url_prefix='/test')
@blp.route('/')
@blp.response(200)
... | [
"def test_pagination_header_documentation(self, app, openapi_version):\n app.config[\"OPENAPI_VERSION\"] = openapi_version\n api = Api(app)\n\n class CustomBlueprint(Blueprint):\n PAGINATION_HEADER_NAME = \"X-Custom-Pagination-Header\"\n\n blp = CustomBlueprint(\"test\", __nam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in ASCII map and return a graph representation of the map. Also return starting vertex of robot and goal vertex. Return a map with vertex keys to (row, column) tuples of the vertex coordinates in the map. | def map_to_graph(filename):
arr = np.loadtxt(filename, dtype=object, comments=None, delimiter='\n')
nrows = len(arr) # Number rows in map
ncols = len(arr[0]) # Number columns in map
g = Graph() # Undirected graph
vert... | [
"def load_map(mapFilename):\n # TODO\n print \"Loading map from file...\"\n #建立图\n G = WeightedDigraph()\n #遍历每一行,建立对应的node还有edge\n with open(r'mit_map.txt') as f:\n for line in f:\n params = map(int,line.split())\n src = Node(params[0])\n dest = Node(params... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the shortestpath tree rooted at start vertex. Return tree as a map of vertices v (excluding start) to edges e=(u, v). The vertex u is the preceding node along the shortest path to v. The edge is specified as an incoming edge in the case of a directed graph. The cloud map from Djikstra's algorithm is used to det... | def shortest_path_tree(graph, start, cloud):
tree = Map() # Map vertices to parent edges
for vertex, _ in cloud:
if vertex is not start:
for edge in graph.incident_edges(vertex, out=False):
u = edge.opposite(vertex)
weight = edge.elemen... | [
"def shortest_path_tree(self, graph, start):\n\n distance_map, predecessor_map = \\\n self._create_distance_and_previous_maps(graph)\n\n distance_map[start] = 0\n\n queue = []\n queue.append(start)\n\n while len(queue) > 0:\n node = queu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate through edges backwards from goal to start. Return a list of tuples representing the (row, column) position of the characters in the ASCII map along the shortest path. | def calculate_shortest_path_coords(start, goal, tree, vert_map):
coords = []
vertex = tree[goal].opposite(goal) # Vertex directly preceding goal
while vertex is not start:
coord = vert_map[vertex] # (row, column) tuple of vertex in ASCII map
coords.append(coord)
edge = tree[vertex]... | [
"def bfs_paths(self, start, goal):\n queue = [(start, [start])]\n while queue:\n (vertex, path) = queue.pop(0)\n for next in self.get_adj(vertex) - set(path):\n if next == goal:\n yield path + [next]\n else:\n qu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solution to shortest path from the robot location to goal location. Return filename of solution file written to disk. | def robot_solution(filename):
filename = Path(filename) # Ensure filename is a Path object
# Read in ASCII map and return graph representation
graph, start, goal, vert_map = map_to_graph(filename)
# Run Djikstra's algorithm to calculate the shortest path length
cloud = shortest_path_length(graph, s... | [
"def write_solution(self):\n filename = self.create_output_name()\n v = self.get_solution()\n print('writing file:', filename)\n if v is not None:\n np.savetxt(filename, v)",
"def write_solutions(self):\n # take the name of the short read file and add _solution.txt\n title = self.fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Records the outcome of a single match between two players. If only one player ID is given, this indicates that a bye is to be given to that player. A null is recorded in the loser_pid column of the matches table. | def reportMatch(winner, loser=None):
with connect_to_db() as database:
if loser is None:
# So a bye is to be given to player `winner`.
# Check to see whether player `winner` has be give a bye before.
query = "SELECT had_bye FROM players WHERE id=%s"
parameter ... | [
"def reportMatch(winner, loser):\n\n db = connect()\n c = db.cursor()\n\n # these following lines will retrieve\n query = \"SELECT score FROM players WHERE id = %s\"\n\n data = (winner, )\n c.execute(query, data)\n w_score = c.fetchone()\n w_score = int(w_score[0])\n\n data = (loser, )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a player's name based on a given ID number. | def id_to_name(player_id):
query = "SELECT name FROM players WHERE id=%s"
parameter = (player_id,)
with connect_to_db() as database:
database['cursor'].execute(query, parameter)
player_name = database['cursor'].fetchone()[0]
return player_name | [
"def get_player_name(self, player_number):\n p, q = self.players\n return p if self.__piece_type__(p) == player_number else q",
"def findPlayerName(dataPlayersLeague, playerId):\n for each in dataPlayersLeague:\n if each[\"personId\"] == playerId:\n return each[\"firstName\"] + ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates pairings given player IDs sorted into win groups. | def generate_pairings(win_groups):
# For each win group, try each combination of matches, checking for
# rematches.
pairings = []
for idx, win_group in enumerate(win_groups):
win_group_success = False
# Go through each pair in the win group, checking for rematches.
for pairs in a... | [
"def swissPairings():\n # get the current playerStandings and pairup and make a list\n # as the tuples are already sorted by the number of wins a\n # sequential pairing will pair players with equal wins or nearly\n # equal wins\n results = playerStandings()\n pairings = []\n i = 0\n while i ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of lists and moves one item to the list specified from the next list. This function works inplace upon the list of lists. | def move_item_to_list(list_of_lists, target_list_idx):
# Check to see if the next list exists
if target_list_idx + 2 > len(list_of_lists):
raise IndexError("No list to move an item from exists.")
# Add an element from the next group to the group specified in the arguments
list_of_lists[target_l... | [
"def list_replace(lst: list, old, new) -> None:\n idx = -1\n try:\n while True:\n i = lst.index(old, idx + 1)\n lst[i] = new\n except ValueError:\n pass",
"def move_to_next_startpoint(startpoint, list_size):\n return startpoint + list_size + 1",
"def replace_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a player id of a player that hasn't already taken a bye. The query to the database makes sure to return the player with the fewest number of wins, to avoid a situation where one of the top players wins the tournament with a bye. | def select_player_for_bye():
query = """SELECT players.id
FROM players, num_wins
WHERE players.had_bye=FALSE and players.id = num_wins.id
ORDER BY num_wins.wins;"""
with connect_to_db() as database:
database['cursor'].execute(query)
non_bye_player_id... | [
"def getUnusedPointsPlayerID():\n global _unusedPointsPlayer, unusedPointsPlayerName\n if _unusedPointsPlayer:\n return _unusedPointsPlayer\n with getCur() as cur:\n cur.execute(\"SELECT Id from Players WHERE Name = ? AND\"\n \" MeetupName IS NULL\",\n (u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create random pairings for before any matches have been played. | def random_pairing(standings, pairings):
# Randomly shuffle the standings in place.
shuffle(standings)
# Check to see if there are an odd number of players.
have_odd_players = False
if len(standings) % 2 != 0:
have_odd_players = True
# If we have an odd number of players, we need to de... | [
"def _initialisation_matching(self):\n\n\t\t# Order players, better ranked first\n\t\torderedPlayers = sorted(self.players)\n\t\t\n\t\tfirstPlayers = random.sample(orderedPlayers[:32], \n\t\t\t\t\t\t\t\t\t len(orderedPlayers[:32]))\n\t\totherPlayers = random.sample(orderedPlayers[32:], \n\t\t\t\t\t\t\t\t\t len(orde... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each question, a matching column in the tools sheet must be found | def validate_questions(data):
question_ids = [x[0] for x in data['questions'][1:]]
tool_columns = data["tools"][0]
for question in question_ids:
if question not in tool_columns:
sys.exit("Question %s is missing from tools, should be a column." % question) | [
"def errorCheckSubmission( self, answer):\n \n for colName in [\"Code\", \"Convention\", \"GroupOrder\"]:\n assert colName in answer.columns, \"We need a %s column in the master spreadsheet\" % colName",
"def validate_columns(data):\n validate_column_names(\n data[\"questions\"]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that each row has the same number of columns, and that the required names are provided for each. For the tools file, there should be a column for each question_ defined in questions. | def validate_columns(data):
validate_column_names(
data["questions"][0],
["unique_id", "question", "options", "include", "order", "tooltip"],
"questions",
)
# The resource column names also need to include question ids
columns_required = [
"unique_id",
"name",
... | [
"def validate_questions(data):\n question_ids = [x[0] for x in data['questions'][1:]]\n tool_columns = data[\"tools\"][0]\n for question in question_ids:\n if question not in tool_columns:\n sys.exit(\"Question %s is missing from tools, should be a column.\" % question)",
"def errorChec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each entry in tools, since we allow one or more answers, we must ensure that they correspond exactly to valid answers for questions. To do this, we generate a lookup for questions, and then iterate through tools to check each. If any resource is invalid, the user is notified and we exit with the error. | def validate_question_answers(data):
questions = generate_tsv_lookup(data["questions"])
tools = generate_tsv_lookup(data["tools"])
for resource_id, metadata in tools.items():
for question_id, answers in metadata.items():
if not question_id.startswith("question_"):
continu... | [
"def validate_questions(data):\n question_ids = [x[0] for x in data['questions'][1:]]\n tool_columns = data[\"tools\"][0]\n for question in question_ids:\n if question not in tool_columns:\n sys.exit(\"Question %s is missing from tools, should be a column.\" % question)",
"def test_mult... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a tsv lookup of questions or tools, or a dictionary where keys correspond to unique ids for the question or. | def generate_tsv_lookup(rows, included_only=False):
# Create a question columns lookup
columns = rows[0]
lookup = {columns.index(x): x for x in columns}
entries = {}
for row in rows[1:]:
entry = {}
for idx, key in lookup.items():
# For comma separated, we want a list
... | [
"def tqa_map(ex):\n return {\n 'inputs':\n tf.strings.join(\n [prefix, 'question:', ex['question']], separator=' '),\n 'targets': ex['answer']['value'],\n 'answers': ex['answer']['aliases'],\n }",
"def create_tool_from_suggestion():\n pass",
"def make_qid_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
apply time lag to return columns selected according to delta. Days to lag are contained in the lads list passed as argument. Returns a NaN free dataset obtained cutting the lagged dataset at head and tail | def applyTimeLag(dataset, lags, delta, back, target):
if target == 'CLASSIFICATION':
maxLag = max(lags)
columns = dataset.columns[::(2*max(delta)-1)]
for column in columns:
for lag in lags:
newcolumn = column + str(lag)
dataset[newcolumn] = d... | [
"def applyTimeLag(dataset, lags, delta):\n\n dataset.Return_Out = dataset.Return_Out.shift(-1)\n maxLag = max(lags)\n\n columns = dataset.columns[::(2*max(delta)-1)]\n for column in columns:\n for lag in lags:\n newcolumn = column + str(lag)\n dataset[newcolumn] = dataset[co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts list of MARCXML records and invokes the parser for each. If an error occurs None is returned and filter() removes them from the list | def parseMARC(records, marcRels):
logger.info('Transforming MARCXML records into SFR objects')
return list(filter(None, (transformMARC(r, marcRels) for r in records))) | [
"def clean (self, recs):\n\t\t## Main:\n\t\t# storage for output\n\t\terrors = []\n\t\tproc_recs = []\n\n\t\t# for each record\n\t\tfor i, r in enumerate (recs):\n\t\t\trec_id = self.id_fxn (r)\n\t\t\tself.message (\"Processing record index %s (id '%s')\" % (i, rec_id))\n\t\t\tnew_rec, errs = self.clean_rec (r)\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts a marcalyx object and transforms the MARC record into a SFR data object. | def transformMARC(record, marcRels):
doabID = record[0]
dateIssued = record[1]
marcRecord = record[2]
logger.info('Transforming record {} into a SFR object'.format(doabID))
work = WorkRecord()
instance = InstanceRecord()
item = Format(source='doab', contentType='ebook')
# Add issued da... | [
"def parseMARC(records, marcRels):\n logger.info('Transforming MARCXML records into SFR objects')\n return list(filter(None, (transformMARC(r, marcRels) for r in records)))",
"def cli_fix_marc(options, explicit_recid_set=None, interactive=True):\n ffts = {}\n if explicit_recid_set is not None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract's agent names and roles from the relevant MARC fields and appends SFR Agent objects to the current record. | def extractAgentValue(data, rec, field, marcRels):
for agentField in data[field]:
if len(agentField['a']) == 0: continue
name = agentField['a'][0].value
roleCode = agentField['4'][0].value if len(agentField['4']) > 0 else 'aut'
agentType = 'corporate' if field not in ['100', '700'] e... | [
"def transformMARC(record, marcRels):\n doabID = record[0]\n dateIssued = record[1]\n marcRecord = record[2]\n logger.info('Transforming record {} into a SFR object'.format(doabID))\n\n work = WorkRecord()\n instance = InstanceRecord()\n item = Format(source='doab', contentType='ebook')\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts holdings data from MARC and adds it to the current SFR object as links. | def extractHoldingsLinks(holdings, instance, item):
itemURIs = set()
for holding in holdings:
if holding.ind1 != '4':
continue
try:
uri = holding.subfield('u')[0].value
itemURIs.add(parseHoldingURI(uri))
except (IndexError, DataError):
logg... | [
"def transformMARC(record, marcRels):\n doabID = record[0]\n dateIssued = record[1]\n marcRecord = record[2]\n logger.info('Transforming record {} into a SFR object'.format(doabID))\n\n work = WorkRecord()\n instance = InstanceRecord()\n item = Format(source='doab', contentType='ebook')\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts subject fields from the MARC record and assigns them to the current SFR Work record. | def extractSubjects(data, rec, field):
subjectFields = ['a']
for subj in data[field]:
subject = {
'subject': [],
'subdivision': [],
'authority': None,
'uri': None
}
# Extract subject text from MARC, will add additional fields as it
... | [
"def get_subjects(marc_record,record):\n subject_fields = [] # gets all 65X fields\n for tag in ['600', '610', '611', '630', '648', '650',\n '651', '653', '654', '655', '656', '657',\n '658', '662', '690',\n '691', '696', '697', '698', '699']:\n all_fields ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the most recent file from a list of files. | def getmostrecent(files):
if not isinstance(files, list):
files = [files]
if len(files) > 1:
whichout = files[0]
dt1 = dt.strptime(gettimestamp(whichout)[4:], "%b %d %H:%M:%S %Y")
for o in files[1:]:
dt2 = dt.strptime(gettimestamp(o)[4:], "%b %d %H:%M:%S %Y")
... | [
"def _find_latest_file(path):\n files = glob.glob(path)\n if files:\n return max(files, key=os.path.getctime)\n return None",
"def find_latest_modified_file(list_of_files):\n\n if not list_of_files:\n logger.warning('there were no files to check mod time for')\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute slurm seff command on each outfile's slurm_job_id to ensure it ran without error. Exit otherwise. | def check_seff(outs):
print('checking seff')
jobid = os.environ['SLURM_JOB_ID']
for i,f in enumerate(outs):
pid = f.split("_")[-1].replace(".out", "")
if not pid == jobid:
seff, seffcount = '', 0
while isinstance(seff, list) is False:
# sometimes slurm... | [
"def end_slurm_script(opts, run_dir, output_dir):\n script = []\n \n script.append('# terminate slurm script')\n script.append('[[ -s {}.err ]] || rm {}.err'.format(*[opts['prog_name']]*2))\n script.append('')\n script.append('exit 0')\n script.append('')\n \n return script",
"def submi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Using a list of sample names, find the realigned bamfiels. | def get_bamfiles(samps, pooldir):
print('getting bamfiles')
found = fs(op.join(pooldir, '04_realign'))
files = dict((samp, f.replace(".bai", ".bam")) for samp in samps for f in found if samp in f and f.endswith('.bai'))
if not len(files) == len(samps):
print('len(files) != len(samps)')
p... | [
"def map_trimmed_reads(fastqs, bam_file, sample_id):\n fq1=fastqs[0]\n fq2=fastqs[1]\n fq1u=fastqs[2]\n fq2u=fastqs[3]\n\n read_groups = ['@RG\\tID:{rgid}\\tSM:{rgid}\\tLB:{lb}'.format(rgid=sample_id, lb=sample_id),\n '@RG\\tID:{rgid}\\tSM:{rgid}\\tLB:{lb}'.format(rgid=sample_id, lb=sample_id+... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get samtools commands to reduce a bamfile to intervals in the bedfile. | def get_small_bam_cmds(bamfiles, bednum, bedfile):
smallbams = []
cmds = '''module load java\nmodule load samtools/1.9\n'''
for bam in bamfiles:
pool = op.basename(bam).split("_realigned")[0]
smallbam = f'$SLURM_TMPDIR/{pool}_realigned_{bednum}.bam'
cmd = f'''samtools view -b -L {bed... | [
"def parse_bam(args, bam, break_dict):\n seq_list = []\n with pysam.AlignmentFile(bam, 'rb') as in_bam:\n cutoff = False\n seq_ord = []\n seq_end_dict = {}\n for seq_desc in in_bam.header['SQ']:\n seq = seq_desc['SN']\n seq_end = seq_desc['LN']-1 # 0-based\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create command to call varscan. | def get_varscan_cmd(bamfiles, bedfile, bednum, vcf, ref, pooldir, program):
smallbams, smallcmds = get_small_bam_cmds(bamfiles, bednum, bedfile)
smallbams = ' '.join(smallbams)
ploidy = pklload(op.join(parentdir, 'ploidy.pkl'))[pool]
# if single-sample then set minfreq to 0, else use min possible allele... | [
"def do_nfv(self, cmd):\n\n # remove unwanted spaces to avoid invalid command error\n tmparg = self.clean_cmd(cmd)\n cmds = tmparg.split(';')\n if len(cmds) < 2:\n print(\"Required an ID and ';' before the command.\")\n elif str.isdigit(cmds[0]):\n if self._i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create sh file for varscan command. | def make_sh(bamfiles, bedfile, shdir, pool, pooldir, program, parentdir):
num, ref, vcf = get_prereqs(bedfile, parentdir, pool, program)
cmd, finalvcf = get_varscan_cmd(bamfiles, bedfile, num,
vcf, ref, pooldir, program)
fields = '''-F ADP -F WT -F HET -F HOM -F NC -GF ... | [
"def write_VarScan_script(self, out, exe, ref, file1, file2):\n output_dir = os.path.join(out, self.out)\n script_file = os.path.join(output_dir, self.script_filename)\n output_snp = os.path.join(output_dir, 'output.snp')\n output_indel = os.path.join(output_dir, 'output.indel')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a list of paths to all of the bed files for ref.fa. | def get_bedfiles(parentdir, pool):
ref = pklload(op.join(parentdir, 'poolref.pkl'))[pool]
beddir = op.join(op.dirname(ref), 'bedfiles_%s' % op.basename(ref).split(".fa")[0])
return [f for f in fs(beddir) if f.endswith('.bed')] | [
"def get_reference_files( self ):\n files_pattern = \"<ReferenceFiles>.*</ReferenceFiles>\"\n path_pattern = \"<Path>(.*)</Path>\"\n results = re.search( files_pattern, self.__file_contents, re.S )\n results = re.findall( path_pattern, results.group() )\n reffiles = []\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and sbatch shfiles, record pid to use as dependency for combine job. | def create_sh(bamfiles, shdir, pool, pooldir, program, parentdir):
bedfiles = get_bedfiles(parentdir, pool)
pids = []
for bedfile in bedfiles:
file = make_sh(bamfiles, bedfile, shdir, pool, pooldir, program, parentdir)
pids.append(sbatch(file))
return pids | [
"def make_sh(bamfiles, bedfile, shdir, pool, pooldir, program, parentdir):\n\n num, ref, vcf = get_prereqs(bedfile, parentdir, pool, program)\n\n cmd, finalvcf = get_varscan_cmd(bamfiles, bedfile, num,\n vcf, ref, pooldir, program)\n fields = '''-F ADP -F WT -F HET -F HOM... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Executed to upload records. | def upload_records(self, records, initial=False):
raise NotImplementedError # pragma: no cover | [
"def upload(self, arr): # real signature unknown; restored from __doc__\n pass",
"def _finalize_upload_after_mcc(self):\n # Upload waveforms and command tables\n for command_list in self.post_sequencer_code_upload.values():\n for upload_func, parameter_dict in command_list:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called to programmatically update the rendered spikes by replotting after changing their visibility/opacity/postion/etc | def update_spikes(self):
# full rebuild (to be safe):
historical_spikes_pdata, historical_spikes_pc = build_active_spikes_plot_data_df(self.spikes_df, spike_geom=SpikeRenderingPyVistaMixin.spike_geom_cone.copy(), enable_debug_print=self.debug_logging)
self.plots_data['spikes_pf_active'] = {'hist... | [
"def update_rasters(self):\n # Update preview_overview_scatter_plot\n self.plots.preview_overview_scatter_plot.setData(self.plots_data.all_spots)\n if self.Includes2DActiveWindowScatter:\n self.plots.scatter_plot.setData(self.plots_data.all_spots)",
"def plotSpikes(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The additional_render_opacity_modifier optionally allows implementors to provide an additional column that will be added to the render_opacity prior to clipping. Must be either None or an array the same length as a column of self.spikes_df. | def additional_render_opacity_modifier(self):
return None | [
"def opacity(\n self,\n default: Optional[Union[float, Undefined]] = UNDEF,\n unselected: Optional[Union[float, Undefined]] = UNDEF,\n by: Optional[Union[str, List[float], np.ndarray, Undefined]] = UNDEF,\n map: Optional[Union[Auto, dict, List[float], Tuple[float, float, int], Und... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
change_spike_rows_included presents an IDX vs. ID agnostic interface with the self.spikes_df to allow the bulk of the code to work for both cases. | def change_spike_rows_included(self, row_specifier_mask, are_included):
if are_included:
self.update_active_spikes(row_specifier_mask, is_additive=True)
else:
# in remove mode, make the passed values negative and again specify is_additive=True mode:
remove_opacity_spe... | [
"def change_unit_spikes_included(self, neuron_IDXs=None, cell_IDs=None, are_included=True):\n assert (neuron_IDXs is not None) or (cell_IDs is not None), \"You must specify either neuron_IDXs or cell_IDs, but not both\"\n if neuron_IDXs is not None:\n # IDXs mode, preferred.\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called to update the set of visible spikes for specified cell indicies or IDs | def change_unit_spikes_included(self, neuron_IDXs=None, cell_IDs=None, are_included=True):
assert (neuron_IDXs is not None) or (cell_IDs is not None), "You must specify either neuron_IDXs or cell_IDs, but not both"
if neuron_IDXs is not None:
# IDXs mode, preferred.
if self.debug... | [
"def update_spikes(self):\n # full rebuild (to be safe):\n historical_spikes_pdata, historical_spikes_pc = build_active_spikes_plot_data_df(self.spikes_df, spike_geom=SpikeRenderingPyVistaMixin.spike_geom_cone.copy(), enable_debug_print=self.debug_logging)\n self.plots_data['spikes_pf_active'] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new Tile object Input is a list with 4 booleans values indicating the presence of a wall in the north, east, south and west direction respectively. | def __init__(self, walls = [0,0,0,0], rotations=0):
if len(walls) != 4 :
raise ValueError('The walls argument must contain 4 booleans, not {:d}'.format(len(walls)))
super(Tile, self).__init__()
self.walls = [bool(x) for x in walls]
self.rotate(rotations) | [
"def __tiles_between(self, source, destination, create_walls=False, validate_hallway=False, layout=[]):\n source_y, source_x = source\n dest_y, dest_x = destination\n\n left = min(source_x, dest_x)\n top = min(source_y, dest_y)\n\n right = max(source_x, dest_x) + 1\n bot = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check whether this Tile object has a wall in the given direction Returns True if there is a wall or False other wise | def has_wall(self, direction):
if not isinstance(direction, int):
raise ValueError('direction must be an integer, not {:s}'.format(type(direction)))
if direction >3 or direction < 0:
raise ValueError('direction must be 0, 1, 2 or 3, not {:d}'.format(direction))
return ... | [
"def is_wall(self, x, y):\n return self.get_tile(x, y) == Tile.wall",
"def is_wall(self, x, y):\n\t\treturn self.get_bool(x, y, 'wall')",
"def is_wall(self, row, col):\n \n return self.maze[row][col] == WALL",
"def wall_in_walls(self, wall):\n for w in self.walls:\n if w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check whether this Tile object is open in the given direction, i.e. has no wall Returns False if there is a wall or True other wise | def is_open(self, direction):
if not isinstance(direction, int):
raise ValueError('direction must be an integer, not {:s}'.format(type(direction)))
if direction >3 or direction < 0:
raise ValueError('direction must be 0, 1, 2 or 3, not {:d}'.format(direction))
return n... | [
"def __direction_is_allowed(self, direction):\n\n current_cell = self.game.level.get_cell(self.grid_position)\n # Cannot move in the direction if there is a wall\n if current_cell.get_edge(direction).type == level_cell.CellEdgeType.wall:\n return False\n elif direction == self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a link to a workspace document (by UID) to a GEVER doc. | def link_workspace_document(workspace_doc_uid): | [
"def link_gever_document(gever_doc_uid):",
"def add_document ( self, doc_id ):\n\n self.size_ += 1\n self.linked_list_.append( doc_id )",
"def add_link_dialog(self):\n myLink = GKLink(None, None)\n my_dlg = GKUILinkDialog(self.m_display, self.m_node_manager, myLink)\n if my_dl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a link to a GEVER document (by uid) to a workspace doc. | def link_gever_document(gever_doc_uid): | [
"def link_workspace_document(workspace_doc_uid):",
"def add_document ( self, doc_id ):\n\n self.size_ += 1\n self.linked_list_.append( doc_id )",
"def add_reference(self, uri, text):\n el = SubElement(self.get_element_person(), 'ref')\n el.set('target', uri)\n el.text = text\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set a single character or string into the grid starting at key | def __setitem__(self,key,val):
self.__checkkey(key)
if not isinstance(val,str):
raise Exception("Val must be 'str', got %r"%val)
if len(val) < 1:
raise Exception("Val must be at least 1 character long, got %d (%s)"%(len(val),val))
cOffset = 0
for c in val... | [
"def set_points_in_grid(grid, points, char):\n for point in points:\n grid[point] = char",
"def change_char_g(self, x, y):\n\n self.game_grid[y][x] = '*'",
"def setChar(*args, **kwargs):\n \n pass",
"def change_grid(x: int, y: int, new: str):\r\n grid[y][x] = new",
"def set_let... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add a new row above 'row' (shifting existing rows down) | def insertRowsAbove(self,row,num):
keys = filter(lambda k: k[0] >= row,self.keys())
self.__moveCells(keys,(num,0)) | [
"def append_row(self, row):\n self.rows.append(row)",
"def addRow(self, row, rowNum):\n col = 0\n width = 1\n last = row[0]\n bound = self.numCols - 1\n while col < bound:\n nxt = row[col+1]\n if nxt == last:\n width += 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add a new column to the left of 'col' (shifting existing cols right | def insertColsToLeft(self,col,num):
keys = filter(lambda k: k[1] >= col,self.keys())
self.__moveCells(keys,(0,num)) | [
"def shift(self, col, row):\n self.start_col += col\n self.end_col += col\n self.start_row += row\n self.end_row += row\n\n assert self.start_col >= 0\n assert self.start_row >= 0\n\n return self",
"def add_new_column(self, col_name):\n self.board.append(Col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called by insertRowAbove... Moves all cells in 'keys' in 'direction'. Each key is 'keys' specified by (row,col). Direction specified by (rowOffset,colOffset). | def __moveCells(self,keys,direction):
while len(keys) > 0:
keys = self.__moveCell(keys[0],keys,direction) | [
"def __moveCell(self,srcKey,keys,direction):\n self.__checkkey(srcKey)\n self.__checkkey(direction)\n destKey = (srcKey[0]+direction[0],srcKey[1]+direction[1])\n # If destination already exists\n if destKey in self:\n keys = self.__moveCell(destKey,keys,direction)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called by __moveCells recursively moves cells to move srcKey cell | def __moveCell(self,srcKey,keys,direction):
self.__checkkey(srcKey)
self.__checkkey(direction)
destKey = (srcKey[0]+direction[0],srcKey[1]+direction[1])
# If destination already exists
if destKey in self:
keys = self.__moveCell(destKey,keys,direction)
# copy c... | [
"def __moveCells(self,keys,direction):\n while len(keys) > 0:\n keys = self.__moveCell(keys[0],keys,direction)",
"def move_cell(self, x1, y1, x2, y2):\n self.grid[x1][y1] = self.grid[x2][y2]\n self.grid[x2][y2] = \"0\"",
"def move(self, key):\n \n #Checks if input i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the name of the key used in memcached to identify the session record with the given session_token | def _get_session_record_id(session_token: str) -> str:
return MEMCACHED_PREFIX + "sessions." + session_token | [
"def redis_session_key(self):\n return RedisKeys.session_info.format(session_id=self.session_id)",
"def get_session_key(self):\n pass",
"def get_session_key(self):\n return self.model['session_key']",
"def cache_token_key_for_record(record):\n klass = record.__class__\n return \":\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the session record for the given session token. | def get_session_record(session_token: str) -> dict:
mc = get_memcache()
session_record = mc.get(_get_session_record_id(session_token))
return _deserialize_session_record(session_record) | [
"def get_session_by_token(token: str) -> Session:\n conn = get_connection()\n cur = conn.cursor()\n\n cur.execute(\"SELECT * FROM sessions WHERE token = ?\", (token,))\n result = cur.fetchone()\n conn.close()\n\n if result is None:\n raise(SessionNotFoundException)\n\n return Session(res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts a new session record for the party. Also pins the session to the clients IP to help with mitm attacks on session tokens. | def new(party_id: int, party_type: PartyTypes=PartyTypes.DataClient, ttl: int=1800) -> Optional[str]:
session_token = os.urandom(g._config['AUTH_SESSION_TOKEN_LENGTH']).hex()
while is_valid(session_token):
session_token = os.urandom(g._config['AUTH_SESSION_TOKEN_LENGTH']).hex()
assert type(ttl) == ... | [
"def add(self, session):\n uuid = session.uuid\n timestamp = time.mktime(session.timestamp.timetuple())\n pickled_session = sqlite3.Binary(pickle.dumps(session, -1))\n\n query = \"INSERT INTO sessions VALUES (?, ?, ?);\"\n params = (uuid, timestamp, pickled_session)\n\n c =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invalidates a session token by removing it from memcached. | def invalidate(session_token: str) -> bool:
mc = get_memcache()
return mc.delete(_get_session_record_id(session_token)) != 0 | [
"def invalidate_cache(self):\n self._access_token = None",
"def invalidate_request_token(self, client_key, request_token, request):",
"def invalidate_token():\n authenticate_request(request)\n content = request.get_json()\n \n parsed_data = parse_message(content, TokenSchema())\n\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a bool indicating whether the given session token is valid. | def is_valid(session_token: str) -> bool:
if type(session_token) != str:
raise TypeError
session_record = get_session_record(session_token)
if session_record is None:
debug_print('Auth: No session record found in MC.')
return False
if time.time() - session_record['last_seen'] ... | [
"def is_token_valid(self):\r\n if not self.auth_token:\r\n return False\r\n\r\n if not self.auth_token_expires:\r\n return False\r\n\r\n expires = self.auth_token_expires - \\\r\n datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)\r\n\r\n time_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the Party uuid for the given session_token. | def who_is(session_token: str) -> Optional[int]:
if not is_valid(session_token):
return None
session_record = get_session_record(session_token)
return session_record['uuid'] | [
"def get_user_id(token):\n auth = AstakosClient(auth_url, token)\n try:\n logging.info(' Get the uuid')\n uuid = auth.user_info['id']\n return uuid\n except ClientError:\n msg = 'Failed to get uuid from identity server'\n raise ClientError(msg)",
"def _get_session_recor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Logs out the current user, raises UserNotLoggedInException if the session is already invalid. | def logout():
if g._current_user is None:
raise UserNotLoggedInException()
if not invalidate(g._current_session_token):
raise UserNotLoggedInException() | [
"def logout():\n if (model.validateSession(session)):\n print(\"Logging out a user...\")\n # Remove the session object from the list of permitted sessions\n model.invalidateSession(session['SESSION_ID'])\n \n # Invalidate the current session\n session.clear()\n\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the created_date of this RewardSetResource. The date/time this resource was created in seconds since unix epoch | def created_date(self, created_date):
self._created_date = created_date | [
"def created_since(self, created_since):\n\n self._created_since = created_since",
"def last_created(self, last_created):\n\n self._last_created = last_created",
"def SetCommentCreatedOn(self, _date):\n self.comment_created_on = _date",
"def _set_dates(self):\n if self.id is None o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the currency_rewards of this RewardSetResource. The currency to give as rewards | def currency_rewards(self):
return self._currency_rewards | [
"def get_rewards(self):\n\n pass",
"def cumulativeReward(self, timestep, rewards):\n ret = 0\n\n for t in range(timestep, min(len(rewards), len(rewards) + self.horizon)):\n ret += (self.discountFactor ** (t - timestep)) * rewards[t]\n \n return ret",
"def get_cumula... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the currency_rewards of this RewardSetResource. The currency to give as rewards | def currency_rewards(self, currency_rewards):
self._currency_rewards = currency_rewards | [
"def _reset_rewards(self):\n self.rewards = [0, 0, 0, 0]\n self.nstep = self.game.active_player",
"def get_rewards(self):\n\n pass",
"def add_rewards(self, team, reward):\n bots = self.botdb.query(Bot).filter_by(team_name=str(team)).all()\n for bot in bots:\n bot.to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the item_rewards of this RewardSetResource. The items to give as rewards | def item_rewards(self):
return self._item_rewards | [
"def get_rewards(self):\n\n pass",
"def episodes_rewards(self):\n return self.logs['rewards']",
"def get_rewards(self, context):\n if (context == self.contexts[self.cur_idx]).all():\n return self.rewards[self.cur_idx], self.rewards[self.cur_idx]\n\n for i in range(self.n_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the item_rewards of this RewardSetResource. The items to give as rewards | def item_rewards(self, item_rewards):
self._item_rewards = item_rewards | [
"def _reset_rewards(self):\n self.rewards = [0, 0, 0, 0]\n self.nstep = self.game.active_player",
"def refund_items(self, refund_items):\n\n self._refund_items = refund_items",
"def get_rewards(self):\n\n pass",
"def update_reward(self, reward, force=False):\n if force or reward... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the long_description of this RewardSetResource. A longer describe the reward set, usually included in details | def long_description(self, long_description):
self._long_description = long_description | [
"def set_description(description):",
"def _set_description(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the max_placing of this RewardSetResource. The maximum placing that will receive a reward | def max_placing(self):
return self._max_placing | [
"def max_placing(self, max_placing):\n\n self._max_placing = max_placing",
"def rebounds_max(self):\n if self._games is None:\n raise TypeError('games has not been set')\n return self._games['rebounds'].max()",
"def best_reward(self):\n return max(self.means)",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the max_placing of this RewardSetResource. The maximum placing that will receive a reward | def max_placing(self, max_placing):
self._max_placing = max_placing | [
"def max_placing(self):\n return self._max_placing",
"def setMaxForwards(self, maxForwards):\n self._maxForwards = maxForwards",
"def maximum(self, maximum):\n\n self._maximum = maximum",
"def property_max_rate(self, property_max_rate):\n\n self._property_max_rate = property_max_ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the short_description of this RewardSetResource. A short paragraph to describe the reward set, usually included in listings. Max 255 characters | def short_description(self):
return self._short_description | [
"def question_description(self) -> str:\n return self._question_description",
"def description(self, value):\r\n if self.description is not None:\r\n if isinstance(value, basestring):\r\n output = self._update(\r\n backupset_name=self.backupset_name,\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the short_description of this RewardSetResource. A short paragraph to describe the reward set, usually included in listings. Max 255 characters | def short_description(self, short_description):
self._short_description = short_description | [
"def description(self, value):\r\n if self.description is not None:\r\n if isinstance(value, basestring):\r\n output = self._update(\r\n backupset_name=self.backupset_name,\r\n backupset_description=value,\r\n default_backupse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the unique_key of this RewardSetResource. A provided unique key for this reward set | def unique_key(self, unique_key):
self._unique_key = unique_key | [
"def _set_unique_keys(self):\n\n self.unique_keys = IAMRole.UNIQUE_KEYS",
"def set_unique_id(self, unique_id):\n pass",
"def setId(self,uniqueId):\n self.id = uniqueId",
"def setCacheKey(self, key):\n\t\tself.cacheKey = key",
"def _set_key(self, key, hexkey=False):\n self.key = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the updated_date of this RewardSetResource. The date/time this resource was last updated in seconds since unix epoch | def updated_date(self):
return self._updated_date | [
"def lastmodifieddate(self):\n return datetime.utcfromtimestamp(\n self.st_mtime)",
"def refresh_update_date(self):\n self.last_updated = datetime.datetime.now()",
"def item_updateddate(self, item: Series) -> datetime:\n return item.modified",
"def last_modified_at(self) -> \"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the updated_date of this RewardSetResource. The date/time this resource was last updated in seconds since unix epoch | def updated_date(self, updated_date):
self._updated_date = updated_date | [
"def updated_at(self, updated_at): # noqa: E501\n self._updated_at = updated_at",
"def refresh_update_date(self):\n self.last_updated = datetime.datetime.now()",
"def set_updated_at(self):\n self.record['updated_at'] = datetime.utcnow()",
"def updated_at_lte(self, updated_at_lte):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Title displayed as a map header | def get_title(self):
return _("Map") if self.title is None else self.title | [
"def header(self):\r\n # Logo\r\n self.image(self.logo, 5, 0, 27, 18)\r\n \r\n # Arial bold 15\r\n self.set_font('Arial', 'BU', 15)\r\n # Title\r\n self.cell(w=0, h=0, txt =self.title, align = 'C')\r\n # Line break\r\n self.ln(10)",
"def display_tit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lat/Lon tuple or None (if unset) | def get_lat_lng(self):
if self.lat and self.lng:
return self.lat, self.lng
return None | [
"def get_lat_lon(self) -> tuple[float, float]:\n return (self.lat, self.lon)",
"def getlatlon():\n pass",
"def safe_parse_lat_lng_string(s: str) -> Optional[Tuple[float, float]]:\n try:\n return parse_lat_lng_string(s)\n except Exception: # noqa\n return None",
"def get_lat_lon(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that all clients can access public endpoints. | async def test_public(administrator, authenticated, privilege_routes, spawn_client):
client = await spawn_client(
authorize=authenticated,
administrator=administrator,
addon_route_table=privilege_routes(PublicRoutePolicy),
)
for url in ("/view", "/func"):
for method in ["get... | [
"def test_private_to_public(self):\r\n pass",
"def test_public_to_private(self):\r\n pass",
"def test_list_cloud_access(self):\n pass",
"def test_get_common_endpoints(self):\n # Create a few common endpoints and one not, test results.\n ezdiscovery.register_common_endpoint('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that only authenticated, administrator clients can access admin endpoints. | async def test_administrator(
administrator, authenticated, spawn_client, privilege_routes
):
client = await spawn_client(
authorize=authenticated,
administrator=administrator,
addon_route_table=privilege_routes(
AdministratorRoutePolicy(AdministratorRole.BASE)
),
... | [
"def test_otoroshi_controllers_adminapi_users_controller_web_authn_admins(self):\n pass",
"def test_admin_index(self):\n response = self.client.get('/admin/')\n self.failUnlessEqual(response.status_code, 200)\n response = self.client.login(username=self.user.username,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that attempting to load more than one policy on a functionbased route leads to a ``PolicyError``. | async def test_more_than_one_function():
routes = Routes()
with pytest.raises(PolicyError):
@routes.get("/func")
@policy(AdministratorRoutePolicy(AdministratorRole.BASE))
@policy(PublicRoutePolicy)
async def get(_):
"""An example public route."""
return ... | [
"async def test_more_than_one_view(spawn_client):\n routes = Routes()\n\n with pytest.raises(PolicyError):\n\n @routes.view(\"/view\")\n class TooManyPolicies(PydanticView):\n @policy(AdministratorRoutePolicy(AdministratorRole.BASE))\n @policy(PublicRoutePolicy)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that attempting to load more than one policy on a viewbased route leads to a ``PolicyError``. | async def test_more_than_one_view(spawn_client):
routes = Routes()
with pytest.raises(PolicyError):
@routes.view("/view")
class TooManyPolicies(PydanticView):
@policy(AdministratorRoutePolicy(AdministratorRole.BASE))
@policy(PublicRoutePolicy)
async def get(... | [
"async def test_more_than_one_function():\n routes = Routes()\n\n with pytest.raises(PolicyError):\n\n @routes.get(\"/func\")\n @policy(AdministratorRoutePolicy(AdministratorRole.BASE))\n @policy(PublicRoutePolicy)\n async def get(_):\n \"\"\"An example public route.\"\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes parameters for propellers. | def MakeParams():
propellers = [None] * system_types.kNumPropVersions
# The propellers follow the sign convention where kPositiveX means
# that the propeller is rotating in a positive direction (i.e. right
# hand rule) about the propeller axis, which is predominately in the
# same direction as the body x-axi... | [
"def process_epidemic_parameters(self):",
"def generative_parameters(self):\n pass",
"def create_planners():\n\tompl.initializePlannerLists()\n\t# Create the geometric planners\n\tplanners = ompl.PlanningAlgorithms(og)\n\tparams_dict = planners.getPlanners()\n\n\t# TODO: Por que? Is this accidentally lef... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds validation to check that no two links have the same anchor or URL and that all links have both an anchor and URL. | def clean(self):
if any(self.errors):
return
anchors = []
urls = []
duplicates = False
for form in self.forms:
if form.cleaned_data:
anchor = form.cleaned_data['anchor']
url = form.cleaned_data['url']
# Ch... | [
"def check_inputs(source_link, target_link):\n if not is_url_valid(DEFAULT_URL + source_link):\n raise ValueError('RESULT: The source link \"{}\" is invalid page'.format(unquote(source_link)))\n if not is_url_valid(DEFAULT_URL + target_link):\n raise ValueError('RESULT: The target link \"{}\" is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalizes the timestamp, throwing away milliseconds and timezone data. timestamp the timestamp to normalize Returns the normalized timestamp. | def normalize_timestamp(timestamp):
return datetime(
timestamp.year,
timestamp.month,
timestamp.day,
timestamp.hour,
timestamp.minute,
timestamp.second,
) | [
"def _GetNormalizedTimestamp(self):\n if self._normalized_timestamp is None:\n if self._timestamp is not None:\n self._normalized_timestamp = (\n decimal.Decimal(self._timestamp) -\n self._OLE_AUTOMATION_DATE_TO_POSIX_BASE)\n self._normalized_timestamp *= definitions.SECO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |