query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Initialize a player at Python Casino, we give new players 100 chips to play | def __init__(self, name="Player"):
self.name = name
self.chips = 100
self.hand1 = []
self.hand2 = []
self.bet = 0
self.lastbet = 0 | [
"def init_player(self, player: Player) -> None:\r\n pass",
"def initialize_players():\n for idx_player in range(len(player_list)):\n load_pawns(board_size_input, idx_player)\n gen_playing_field(board_size_input, idx_player)",
"def __init__(self):\n\n self.name = 'KuhnPoker'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive a card dealt during the deal round | def dealt_card(self, card):
self.hand1.append(card)
print(f"{self.name} was dealt a {card}") | [
"def deal(self):\n dealt_card = self.deck_of_cards.pop()\n print(\"You have been dealt the {} \".format(dealt_card.value) \\\n + \"of {}.\".format(dealt_card.suit) + \"\\n\")",
"def deal_card(self):\n return self._deal(1)[0]",
"def deal(self):\n\n if self.dealer... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the value of the players hand. Still need to handle split hands somehow | def hand_value(self):
return deck.bj_hand_value(self.hand1) | [
"def player_hand_value(self, hand_idx=0):\n return self._get_hand_value(self.players[hand_idx]['hand'])",
"def get_hand(self):\n return self.player_hand",
"def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val",
"def calculate_value(self, hand):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lost hand, lost bet | def lose(self, dlr):
print(f"Sorry {self.name}, your total of {sum(self.hand1)} didn't beat the dealers {dlr}")
self.lastbet = self.bet
self.bet = 0 | [
"def win_bet(self):\n self.total += self.bet*2",
"def lose(self, bet):\n self.stake -= bet.loseAmount()",
"def lose_bet(self):\n print(\"Sorry you lose\")",
"def win(self, bet):\n self.stake += bet.winAmount()",
"def big_blind(self):\n player = self.players[1] #Player to l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if creates data collection. | def test_creates_data_collection(self):
data_collection = create_data_collection(read_config_file("test/data_collection.yaml"))
self.assertIsInstance(data_collection, DataCollection) | [
"def test_create_collection(self):\n pass",
"def test_collections_create_collection_hit_for_collection(self):\n pass",
"def test_collections_create_collection(self):\n pass",
"def check_for_new_data(self):\n return",
"def _validate_create_data(self, data):\n return",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the current IAM policy data for listing example ```python import pulumi import pulumi_gcp as gcp policy = gcp.bigqueryanalyticshub.get_listing_iam_policy(project=google_bigquery_analytics_hub_listing["listing"]["project"], location=google_bigquery_analytics_hub_listing["listing"]["location"], data_exchange_id... | def get_listing_iam_policy(data_exchange_id: Optional[str] = None,
listing_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> ... | [
"def get_listing_iam_policy_output(data_exchange_id: Optional[pulumi.Input[str]] = None,\n listing_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[Optional[str]]] = None,\n project: Optional[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the current IAM policy data for listing example ```python import pulumi import pulumi_gcp as gcp policy = gcp.bigqueryanalyticshub.get_listing_iam_policy(project=google_bigquery_analytics_hub_listing["listing"]["project"], location=google_bigquery_analytics_hub_listing["listing"]["location"], data_exchange_id... | def get_listing_iam_policy_output(data_exchange_id: Optional[pulumi.Input[str]] = None,
listing_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[Optional[str]]] = None,
project: Optional[pulumi.I... | [
"def get_listing_iam_policy(data_exchange_id: Optional[str] = None,\n listing_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initialize with location of my articles and outdir | def __init__(self, workdir = "archived_links", outdir = "tmp"):
self.workdir = workdir
self.outdir = outdir
self.bigdf = ""
self.ArticlesLoaded = False
self.clf = "" | [
"def __init__(self):\n if exists(link):\n return\n if not exists('docs'):\n\n # we cannot raise here, we can't dissallow being started from elsewhere user\n # won't have the link but he'll read the docs hopefully when mkdocs fails\n # with custom dir not fou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that view renders data from model | def test_the_view_render_Contact_instance(self):
my_info = self.response.context_data['info']
self.assertIsInstance(my_info, Contact)
model_instance = Contact.objects.first()
self.assertIn(model_instance.name, self.response.content)
self.assertIn(model_instance.surname, self.re... | [
"def test_render__view(self):\n renderer = Renderer()\n\n view = Simple()\n actual = renderer.render(view)\n self.assertEqual('Hi pizza!', actual)",
"def test_modelview_instanceloader_view(self) -> None:\n doc = ViewDocument(name='test1', title=\"Test\")\n self.session.ad... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for form date field validation | def test_form_date_validation(self):
form = My_add_data_form(data={'date': date(1800, 05, 03)})
self.assertEqual(form.errors['date'], ['You already dead now'])
form = My_add_data_form(data={'date': date(2200, 05, 03)})
self.assertEqual(form.errors['date'], ['You not born yet']) | [
"def test_date_valid_data(self):\n date_form = DateForm(data={\n 'date': date.today(),\n })\n\n self.assertTrue(date_form.is_valid())",
"def test_date_invalid_data(self):\n date_form = DateForm(data={\n 'date': 'invalid date',\n })\n\n self.assertFal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that view return errors in Json format | def test_that_view_return_errors_in_json(self):
self.client.login(username='admin', password='admin')
url = reverse("to_form", args=str(self.my_instance.id))
response = self.client.post(url, data={'name': 'Oleg'}, format='json')
self.assertEqual(response.status_code, 200)
for c ... | [
"def test_non_json_response() -> None:\n with raises(TypeError):\n _view_function_response_failure()",
"def test_view_400(request):\n return HttpResponseBadRequest(\"400 view\")",
"def test_bad_request_response(self):\n response = self.client.post(\n '/json_bad_request/',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that view saves data if form valid | def test_that_view_saves_data_if_form_valid(self):
self.client.login(username='admin', password='admin')
url = reverse("to_form", args=str(self.my_instance.id))
response = self.client.post(url, data={'name': 'Oleg', 'surname': 'Senyshyn', 'date': date(1995, 05, 03),
... | [
"def test_submit_form_using_valid_data():",
"def test_check_form_is_validated_when_there_are_no_errors():",
"def test_success_form_validation(self):\n\n form = QuestionForm(self.params, current_user=self.user)\n self.assertTrue(form.is_valid())",
"def form_valid(self, form):\n # This meth... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads patient Procedure observations | def load(cls):
# Loop through procedures and build patient procedure lists:
procs = csv.reader(file(PROCEDURES_FILE,'U'),dialect='excel-tab')
header = procs.next()
for proc in procs:
cls(dict(zip(header,proc))) # Create a procedure instance | [
"def load(cls):\n\n # Loop through procedures and build patient procedure lists:\n items = csv.reader(open(PROCEDURES_FILE, 'U'), dialect='excel-tab')\n header = next(items)\n for item in items:\n cls(dict(zip(header, item))) # Create a procedure instance",
"def load_patient... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a tabseparated string representation of a procedure | def asTabString(self):
dl = [self.pid, self.date, self.snomed, self.name[:20]]
s = ""
for v in dl:
s += "%s\t"%v
return s[0:-1] # Throw away the last tab | [
"def print_para_table(s):\n if MODE == 1:\n t = [['Parameter', 'Value', 'Unit'],\n ['Number of bends', NBENDS, '/'], \n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', LAMBDA*(NBENDS+1), 'm'],\n ['Arc wavelength', LAMBDA, 'm'],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initiate the root XML, parse it, and return a dataframe | def process_data(self):
structure_data = self.parse_root(self.root)
dict_data = {}
for d in structure_data:
dict_data = {**dict_data, **d}
df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T
return df | [
"def _xmlRoot2Dfs(cls,root):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n logger.debug(\"{0:s}parse Xml ...\".format(logStr)) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the Doxygen XML. We do not have to remove any old XML or similar since we use the index.xml file to parse the rest.. So if some stale information is in the output folder it is ok we will not use it anyway | def generate(self):
# Write Doxyfile
doxyfile_content = DOXYFILE_TEMPLATE.format(
name="wurfapi",
output_path=self.output_path,
source_path=" ".join(self.source_paths),
recursive="YES" if self.recursive else "NO",
extra="",
)
... | [
"def generate_doxygen_xml(app):\n subprocess.call('doxygen')",
"def set_doxygen_xml(app):\n err = ExtensionError(\n '[sphinxcontrib-autodoc_doxygen] No doxygen '\n 'xml output found in doxygen_xml=\"%s\"' % app.config.doxygen_xml)\n\n if not os.path.isdir(app.config.doxygen_xml):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sidebar widget to select your data view | def select_data_view() -> str:
st.sidebar.markdown('### Select your data view:')
view_select = st.sidebar.selectbox('', DATA_VIEWS, index=0). \
replace(' (NEW)', '')
return view_select | [
"def sidebar():\n return render_template('sidebar.html')",
"def sidebar_toggled(self, active):\n pass",
"def main_layout_sidebar():\n return html.Div(\n [\n dbc.Container(\n fluid=True,\n children=dbc.Row(\n [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sidebar widget to select fiscal year | def select_fiscal_year(view_select) -> str:
if 'Wage Growth' in view_select:
working_fy_list = FY_LIST[:-1]
else:
working_fy_list = FY_LIST
st.sidebar.markdown('### Select fiscal year:')
fy_select = st.sidebar.selectbox('', working_fy_list, index=0).split(' ')[0]
return fy_select | [
"def search_year():\n date_editor.find_element_by_xpath(\".//*[contains(@id, 'DateEditorLayoutY') and text()={year}]\"\n .format(year=date_obj.year)).click()",
"def YearFieldWidget(field, request):\n return FieldWidget(field, YearWidget(request))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sidebar widget to select pay rate conversion (hourly/annual) | def select_pay_conversion(fy_select, pay_norm, view_select) -> int:
st.sidebar.markdown('### Select pay rate conversion:')
conversion_select = st.sidebar.selectbox('', PAY_CONVERSION, index=0)
if conversion_select == 'Hourly':
if view_select != 'Trends':
pay_norm = FISCAL_HOURS[fy_selec... | [
"def select_rates_tab(self):\n self.select_static_tab(self.rates_tab_locator, True)",
"def render_investip():\n\tlinewidth = 2\n\n\tst.sidebar.markdown('# Dashboard')\n\tstock = st.sidebar.selectbox('Stock:', stocks)\n\n\tstartdd = datetime.datetime(2020, 3, 1)\n\tstartdd = st.sidebar.date_input('start-dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sidebar widget to select trends for Trends page | def select_trends() -> str:
trends_checkbox = st.sidebar.checkbox(f'Show all trends', True)
if trends_checkbox:
trends_select = TRENDS_LIST
else:
trends_select = st.sidebar.multiselect('Select your trends', TRENDS_LIST)
return trends_select | [
"def get_trends():\n return api.trends_available()",
"def trending(request):\n\n # TODO: Fix query filter for trending recipes\n recipe_list = Recipe.objects.all()\n # Recipe.objects.raw(\n # RawQueries.trending_select,\n # [settings.TRENDING['time_window'], settings.TRENDING['review... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sidebar widget to select minimum salary for Highest Earners page | def select_minimum_salary(df, step, college_select: str = ''):
st.sidebar.markdown('### Enter minimum FTE salary:')
sal_describe = df[SALARY_COLUMN].describe()
number_input_settings = {
'min_value': 100000,
'max_value': int(sal_describe['max']),
'value': 500000,
'step': ste... | [
"def __showMaxMin(self):\n\t\thigh=tk.Label(self.book,text=self.max_price,fg='gray',bg='black')\n\t\thigh.config(font=(\"Helvetica\",20))\n\t\thigh.place(x=840,y=20,width=120,height=30)\n\n\t\tlow=tk.Label(self.book,text=self.min_price,fg='gray',bg='black')\n\t\tlow.config(font=(\"Helvetica\",20))\n\t\tlow.place(x=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper around k8s.load_and_create_resource to create a SageMaker resource | def create_sagemaker_resource(
resource_plural, resource_name, spec_file, replacements, namespace="default"
):
reference, spec, resource = k8s.load_and_create_resource(
resource_directory,
CRD_GROUP,
CRD_VERSION,
resource_plural,
resource_name,
spec_file,
... | [
"def resource_creator():\n create_ec2_resource()\n s3 = create_s3_resource()\n create_iam_access()\n launch_redshift_cluster()",
"def create_resource(\n service_name: str, config_name: str = None, **resource_args\n):\n session = get_session(config_name)\n return session.resour... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper around k8s.load_and_create_resource to create a Adopoted resource | def create_adopted_resource(replacements, namespace="default"):
reference, spec, resource = k8s.load_and_create_resource(
resource_directory,
ADOPTED_RESOURCE_CRD_GROUP,
CRD_VERSION,
"adoptedresources",
replacements["ADOPTED_RESOURCE_NAME"],
"adopted_resource_base",
... | [
"def create_resource(\n service_name: str, config_name: str = None, **resource_args\n):\n session = get_session(config_name)\n return session.resource(service_name, **resource_args)",
"def create_namespaced_resource(namespace, body):\n api = get_api(body[\"apiVersion\"], body[\"kind\"])\n return ap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the scale for a unit | def get_scale(units, compartmentId, volume, extracellularVolume):
if compartmentId == 'c':
V = volume
else:
V = extracellularVolume
if units == 'uM':
return 1. / N_AVOGADRO / V * 1e6
elif units == 'mM':
return 1. / N_AVOGADRO / V * 1e3
elif units == 'molecu... | [
"def getScale(self):\n return _libsbml.Unit_getScale(self)",
"def get_scale(resolution, units=\"degrees\"):\r\n return resolution * INCHES_PER_UNIT[units] * DOTS_PER_INCH",
"def unit_scale(quantity):\n scales = {\n 'rate': 1.0,\n 'dt': 1.0,\n 'fluence': 1e39,\n 'peak': 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Euclidean distance between vector and matrix. | def euclid_dist(vec, mat):
return np.linalg.norm(mat - vec, axis=1) | [
"def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))",
"def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a description for the combination of well, tile, channel and, optionaly, depth and/or time | def generate_tile_description(tile, time = None, depth = None):
desc = "s"+ str(tile)
if depth is not None:
desc = desc + "_z" + str(depth)
if time is not None:
desc = desc + "_t" + str(time)
return desc | [
"def generate_basic(self, time):\n output_text = ''\n try:\n output_text = 'for x=1 to ' + str(int(time)//4) + ':\\n'\n except:\n return\n for i, layer in enumerate(self.c_layers):\n c = ['0'] * 8\n b = ['0'] * 8\n d = ['0'] * 8\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a name for a file using the description and channel | def generate_file_name(well, channel, desc):
return "bPLATE_w" + well + "_" + desc + "_c" + channel + ".png" | [
"def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def gen_channel_name_v2(shotnr: int, channel_rg: str):\n return f\"{shotnr:05d}_ch{channel_rg:s}\"",
"def __responseName__(channelNum):\n baseName = 'D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor that takes the config and the well we are generating images for. | def __init__(self, config, well, directory):
self.config = config
self.well = well
self.directory = directory | [
"def __init__(self, generate_image_pyramid: bool = True):\n if not os.path.exists(MANIFEST_OUTPUT_DIR):\n os.makedirs(MANIFEST_OUTPUT_DIR)\n self._manifest_factory = ManifestFactory()\n self._manifest_factory.set_base_prezi_uri(MANIFEST_BASE_URL)\n self._manifest_factory.set_b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a name for a file using the description and channel | def _generate_raw_file_name(self, well, channel, desc):
return "bPLATE_w" + well + "_" + desc + "_c" + channel + ".png" | [
"def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def gen_channel_name_v2(shotnr: int, channel_rg: str):\n return f\"{shotnr:05d}_ch{channel_rg:s}\"",
"def __responseName__(channelNum):\n baseName = 'DanteDataS_1_'\n exte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The function generates and returns a combined table with names of stocks in columns and dates in indexes. Profit tables for individual stocks are taken from stock_retrunrs function. | def Generating_stock_daily_return_table():
#Getting Names list
Profitfile='pap//CombProfit.csv'
path='D://Doktorat Marek//dane//'
ProfitsFilePath=path+Profitfile
quarterly_profit=pd.read_csv(ProfitsFilePath,index_col=0,header=0,parse_dates=True)
Names_list=quarterly_profit.columns.tolist()... | [
"def setupStockTable(self):\n # Get the date\n # NOTE: This is probably un\n date = datetime.date()\n dateStr = date.month() + \"/\" + date.day() + \"/\" + date.year()\n\n stocks = (\"INTC\", \"AAPL\", \"GOOG\", \"YHOO\", \"SYK\", \"VZ\")\n\n for stock in stocks:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
General method for costing belt filter press. Capital cost is a function of flow in gal/hr. | def cost_filter_press(blk):
t0 = blk.flowsheet().time.first()
# Add cost variable and constraint
blk.capital_cost = pyo.Var(
initialize=1,
units=blk.config.flowsheet_costing_block.base_currency,
bounds=(0, None),
doc="Capital cost of unit operation... | [
"def determine_cost(self):\n pass",
"def calc_rc_model_demand_heating_cooling(bpr, tsd, t, gv):\n\n # following the procedure in 2.3.2 in SIA 2044 / Korrigenda C1 zum Merkblatt SIA 2044:2011\n # / Korrigenda C2 zum Mekblatt SIA 2044:2011\n\n # ++++++++++++++++++++++++++++++\n # CASE 0 - NO HEA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for create_symlink_file | def test_create_symlink_file(self):
pass | [
"def testSymlink(self):\n \n self.tempDirs.append(tempfile.mkdtemp())\n symTarget = os.path.basename(self.tempDirs[1])\n symTargetFile = file(os.path.join(self.tempDirs[1], \"foo.txt\"), \"w\") \n symTargetPath = os.path.join(self.tempDirs[0], symTarget)\n os.symlink(self.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_meta_range | def test_get_meta_range(self):
pass | [
"def test_get_range(self):\n pass",
"def get_range(self, start, end):",
"def getRange(self):\n \n pass",
"def __call__(self, *args):\n return self.range_",
"def test_get_range_empty(self):\n\n queryset = mock.Mock()\n queryset.aggregate.return_value = None\n\n di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_range | def test_get_range(self):
pass | [
"def get_range(self, start, end):",
"def getRange(self):\n \n pass",
"def test_get_meta_range(self):\n pass",
"def get_range(lst):\n pass",
"def __call__(self, *args):\n return self.range_",
"def get_range(min_val, max_val):\n return range(min_val, max_val)",
"def _in_range... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compiles network for training | def compile_network(model, optimizer):
compile_network_model(model, optimizer, categorical_crossentropy) | [
"def compile_model(net):\n # Prepare Theano variables for inputs and targets\n target_var = T.ivector('targets')\n # target_var = T.vector('targets') # Theano requires an ivector\n\n # Create a loss expression for training, i.e., a scalar objective we want\n # to minimize (for our multi-class problem, it is th... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction | def rotate_point_cloud(batch_data):
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in np.arange(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[c... | [
"def rotate_point_cloud(data):\n rotated_data = np.zeros(data.shape, dtype=np.float32)\n for k in xrange(data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get maximum depth of given tree by BFS | def max_depth(root):
# basic case
if root is None:
return 0
# breadth-first traversal
queue = collections.deque([root])
depth = 0
while queue:
queue_size = len(queue)
for i in range(queue_size):
curr = queue.popleft()
if curr.left is not None:
... | [
"def get_max_depth(self):\n depth = 1\n for node in self.nodes:\n depth = max(depth,1+self.nodes[node].get_max_depth())\n return depth",
"def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Crawls each authors pages starting from allauthors main page stored in authors report | def start_requests(self):
authors_pandas = conf.read_from_data('authors.json')
author_link_list = list(
map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']),
authors_pandas))
for link in author_link_list:
yield Request... | [
"def scrape_page_authors(soup):\n authors = [re.sub('\\n', '', a.get_text()) for a in soup.find_all('span', itemprop=\"name\")]\n authors = ' & '.join([str(a) for a in authors])\n return authors",
"def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
construct_network establishes all weight matrices and biases and connects them. The outputs may include parameters of the flow | def construct_network(self, n_units, n_samples=1, noise_dim=0,
keep_p=1., nonlinearity=True, init_params=None, name=""):
print "constructing network, n_units: ",n_units
# TODO use kwargs for more elagant solutions to being called by this
# base class
assert keep_p ==1. and n... | [
"def buildNetwork(inputs,hidden,outputs):\n\t# initialise the network\n\tnetwork = pb.structure.FeedForwardNetwork()\n\n\t# create the layers for the network \n\tinLayer = pb.structure.LinearLayer(inputs)\n\thiddenLayer = pb.structure.SigmoidLayer(hidden)\n\toutLayer = pb.structure.LinearLayer(outputs)\n\n\t# add l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add transaction to the history. | def add_history(self):
# add separator, if there already are history entries
if self.parentApp.History != '':
self.parentApp.History += (
'\n\n--- --- --- --- --- --- --- --- --- --- --- ---\n\n'
)
# add the transaction to it
self.parentApp.Histor... | [
"def add_transaction(self, transaction):\n\n self.transactions.append(transaction)",
"def add_transaction(self, tx: Transaction) -> None:\n self.txs.append(tx)\n self.id = None",
"def add_history(self, history_entry, details=()) :\n \n self.history.add(history_entry, details=d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Press cancel go back. | def on_cancel(self, keypress=None):
self.parentApp.switchFormPrevious() | [
"def go_back(self):",
"def press_back_button(self):\n self.driver.back()",
"def skip(self):\n self.click_back_button()",
"def back(self):\n self.parent.back()",
"def cancelButton(self):\n \n self.answer=-1\n self.top.destroy()",
"def press_back(self, num=1):",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse or generate classification (e.g. public health, education, etc). | def _parse_classification(self, item):
full_name = item.css('td[headers=Name]::text').extract_first()
if "Metra" in full_name and "Board Meeting" in full_name:
return BOARD
elif "Citizens Advisory" in full_name:
return ADVISORY_COMMITTEE
elif "Committee Meeting" ... | [
"def _parse_classification(self, name):\n if \"committee\" in name.lower():\n return COMMITTEE\n if \"hearing\" in name.lower():\n return FORUM\n return BOARD",
"def classification(self) -> 'outputs.CaseClassificationResponse':\n return pulumi.get(self, \"classifi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cache the response if this request qualifies and has not been cached yet or for restbased and restandtimebased evict the record from the cache if the request method is POST/PATCH/PUT or DELETE | def process_response(self, req, resp, resource, req_succeeded):
# Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the
# POST/PATCH/PUT/DELETE calls are never cached and even more they
# invalidate the record cached by the GET method
if self.cache_config['CACHE_EVICTIO... | [
"def cached(self, vary=None):\n if self.has_param('no_cache'):\n return\n cached = memcache.get(\n key=self.cache_key(vary=vary),\n namespace=\"handler-cache\")\n if cached:\n # update the response\n self.response_dict(**cached)\n return True",
"def cache():\n is_conditio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the cache key from the request using the path and the method | def generate_cache_key(req, method: str = None) -> str:
path = req.path
if path.endswith('/'):
path = path[:-1]
if not method:
method = req.method
return f'{path}:{method.upper()}' | [
"def _generate_view_response_cache_key( # pylint: disable=unused-argument\n handler: Callable[..., Awaitable[StreamResponse]],\n request: Request,\n *args,\n **kwargs,\n) -> str:\n get_params = request.query\n\n hash_ = sha1(request.path.encode('utf-8'))\n\n for param in sorted(get_params):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Serializes the response, so it can be cached. If CACHE_CONTENT_TYPE_JSON_ONLY = False (default), then we need to keep the response ContentType header, so we need to serialize the response body with the content type with msgpack, which takes away performance. For this reason the user can set CACHE_CONTENT_TYPE_JSON_ONLY... | def serialize(self, req, resp, resource) -> bytes:
if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']:
if FALCONVERSION_MAIN < 3:
return resp.body
else:
return resp.text
else:
if FALCONVERSION_MAIN < 3:
return msgpack.... | [
"def render_response(self, content, response_type='json'):\n if response_type == 'json':\n response = HttpResponse(content_type=\"application/json; charset=UTF-8\")\n response.write(\n json.dumps(content, cls=JSONEncoder, ensure_ascii=False))\n return response\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deserializes the cached record into the response Body or the ContentType and Body | def deserialize(self, data: bytes) -> Tuple[str, Any]:
if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']:
return data
else:
return msgpack.unpackb(data, raw=False) | [
"def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])",
"def deserialize(self, response):\n return",
"def decode(self, response):\n\n if response.status_code < 200 or response.status_code >= 300:\n raise BeanBagException(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes an incoming socket and either stores the command to the command queue, or performs another action based on the command. | def receive_and_store(self, socket, addr):
# Create the incoming connection
conn = IncomingConnection(addr, socket)
# Receive the data from the connection
data = conn.recv_data()
if not data:
logger.warning("Invalid data received")
return
# Get t... | [
"def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get base64 string repr of object or np image | def getbase64(nparr,):
if type(nparr) == type({}):
nparr = nparr['img']
im = Image.fromarray(nparr)
buf = BytesIO()
im.save(buf,format="JPEG")
return base64.b64encode(buf.getvalue()).decode('ascii') | [
"def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')",
"def tostring(self):\r\n return self.img.tostring()",
"def data64(self) -> str:\n return Image.encode64(self.data)",
"def convert_numpy_to_base64_image_string(image_np):\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make a plot of each object and put image next to it. func defines the type of plot and anything that is done to each image,obj pair | def _dump_plotly(objs, images, func):
l = len(objs)
#print(l)
titles = []
for i,x in enumerate(objs):
if 'id' in x:
titles.append('shape id %d' % x.id)
else:
titles.append('item %d' % i)
fig = tools.make_subplots(rows=l, cols=1, subplot_titles = titles,print_g... | [
"def plot_single(potential_func, obstacles, filename, xlim=(-400, 400), ylim=(-400, 400)):\n print \"Generating\", filename\n fig = plt.figure()\n plot = plt.subplot(111)\n show_arrows(plot, potential_func, xlim=xlim, ylim=ylim)\n for obstacle in obstacles:\n show_obstacle(plot, obstacle)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses pidgin's htmlformated logfiles HTML within messages is converted to normal text, so messages about HTMLcode will get lost | def parse_html(root, filename):
root_filename = os.path.join(root, filename)
match_date = regex_date.findall(filename)
if not match_date:
raise Exception(root_filename, 'r')
year = int(match_date[0][0])
month = int(match_date[0][1])
day = int(match_date[0][2])
file = open(root_fi... | [
"def _process_html(self):\n pass",
"def process_log(self, log):\n for line in log.splitlines():\n if line and line != \".\":\n text = BeautifulSoup(line, \"html.parser\").text\n\n if \"text-danger\" in line:\n self.log.warning(text)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asks user for own nicks after listing all encountered ones | def names_interaction():
already_printed = []
for protocol in protocols:
for account in protocol.accounts:
for contact in account.contacts:
for message in contact.messages:
if message.name not in already_printed:
already_printed.app... | [
"def take_sticks_ai(self, sticks):\n print(\"\\nThere are {} sticks on the board\".format(sticks))\n sticks_taken = random.choice(self.hats[sticks]['content'])\n self.hats[sticks]['choice'] = sticks_taken\n sticks -= sticks_taken\n return sticks",
"def user_picks():\r\n print... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implement the check_unused_args in superclass. | def check_unused_args(self, used_args, args, kwargs):
for k, v in kwargs.items():
if k in used_args:
self._used_kwargs.update({k: v})
else:
self._unused_kwargs.update({k: v}) | [
"def _check_unused_parameters(self):\n all_params = set(self.parameters.keys())\n processed_params = set(self.processed_parameters)\n unused_params = all_params - processed_params - RESERVED_ARGS\n\n if unused_params:\n self.log.warning(\"The following parameters were ignored:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
format a string by a map | def format_map(self, format_string, mapping):
return self.vformat(format_string, args=None, kwargs=mapping) | [
"def _reprOfStringToValueMap (stringMap : Map) -> String:\n\n entrySeparator = u\"§\"\n entryTemplate = \"%s: %s\"\n keyList = sorted(list(stringMap.keys()))\n result = \"\"\n \n for key in keyList:\n value = stringMap[key] \n result += (iif(result == \"\", \"\", entrySeparator)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get used kwargs after formatting. | def get_used_kwargs(self):
return self._used_kwargs | [
"def get_kwargs_for_plotting(self):\n return self.plot_kwargs",
"def get_form_kwargs(self, **kwargs):\n return kwargs",
"def get_unused_kwargs(self):\n return self._unused_kwargs",
"def get_form_kwargs(self, prefix, **kwargs):\n return kwargs",
"def interpolator_kwargs(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get unused kwargs after formatting. | def get_unused_kwargs(self):
return self._unused_kwargs | [
"def _get_clean_parameters(kwargs):\n return dict((k, v) for k, v in kwargs.items() if v is not None)",
"def get_used_kwargs(self):\n return self._used_kwargs",
"def _get_clean_parameters(kwargs):\n\t\treturn dict((k, v) for k, v in kwargs.items() if v is not None)",
"def _scrub_kwargs(kwargs: D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add element_by alias and extension' methods(if_exists/or_none). | def add_element_extension_method(Klass):
def add_element_method(Klass, using):
locator = using.name.lower()
find_element_name = "element_by_" + locator
find_element_if_exists_name = "element_by_" + locator + "_if_exists"
find_element_or_none_name = "element_by_" + locator + "_or_none... | [
"def contains(self, element):",
"def element_exists(*, driver, locator, by=By.CSS_SELECTOR):\n\n by = check_if_by_should_be_xpath(by=by, locator=locator)\n return bool(driver.find_elements(by=by, value=locator))",
"def iselement(element): # real signature unknown; restored from __doc__\n pass",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fluent interface decorator to return self if method return None. | def fluent(func):
@wraps(func)
def fluent_interface(instance, *args, **kwargs):
ret = func(instance, *args, **kwargs)
if ret is not None:
return ret
return instance
return fluent_interface | [
"def self_if_blank_arg(func):\n\n @wraps(func)\n def wrapped(instance, *args, **kwargs):\n if any(args) or any(kwargs.values()):\n return func(instance, *args, **kwargs)\n else:\n return instance\n return wrapped",
"def return_none() -> None:\n pass",
"def extract... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert value to a list of key strokes >>> value_to_key_strokes(123) ['1', '2', '3'] >>> value_to_key_strokes('123') ['1', '2', '3'] >>> value_to_key_strokes([1, 2, 3]) ['1', '2', '3'] >>> value_to_key_strokes(['1', '2', '3']) ['1', '2', '3'] | def value_to_key_strokes(value):
result = []
if isinstance(value, Integral):
value = str(value)
for v in value:
if isinstance(v, Keys):
result.append(v.value)
elif isinstance(v, Integral):
result.append(str(v))
else:
result.append(v)
r... | [
"def _force_key_as_list(self, key):\r\n return [key] if isinstance(key, (str, unicode)) else key",
"def strokes(self):\n\n # Expecting 8-byte chords (4 bytes of steno, 4 of timestamp.)\n assert self.data_length % 8 == 0\n # Steno should only be present on ACTION_READ packets\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Augment image and key points, bounding boxes !! | def img_and_key_point_augmentation(augmentation, img, bbox, key_points):
# img_copy = img.copy()
image_shape = img.shape
h, w = image_shape[0:2]
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the im... | [
"def _augment(self, img, hulls_key_points):\n # Make sequence deterministic\n myseq = self.simple_transformations.to_deterministic()\n\n # Augment the image\n img_aug = myseq.augment_images([img])[0]\n\n # Add the Card Key Point tp the hulls\n list_kps = [self.card_key_poin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Augment image and bounding boxes !! | def img_augmentation(augmentation, img, bbox):
# img_copy = img.copy()
image_shape = img.shape
h, w = image_shape[0:2]
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
det = augmentati... | [
"def augment_bounding_boxes(self, bounding_boxes_on_images, hooks=None):\n kps_ois = []\n for bbs_oi in bounding_boxes_on_images:\n kps = []\n for bb in bbs_oi.bounding_boxes:\n kps.extend(bb.to_keypoints())\n kps_ois.append(ia.KeypointsOnImage(kps, shap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add optimization to the store by inspecting the model field type. | def _optimize_field_by_name(self, store: QueryOptimizerStore, model, selection, field_def) -> bool:
name = self._get_name_from_field_dev(field_def)
if not (model_field := self._get_model_field_from_name(model, name)):
return False
_logger.info('_optimize_field_by_name %r %r', name, m... | [
"def optimise(self, time_step: TimeStep, environment_model: EnvironmentModel):\n pass",
"def _determine_field_storage(self, data_object, **kwargs):\n raise NotImplementedError",
"def prepare_field_type(self, obj):\n if hasattr(obj, 'metadata'):\n if isinstance(obj.metadata, Geogr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain an upload ticket from the API | def get_upload_ticket(self):
r = HTTPClient().fetch(self.config['apiroot'] + self.ticket_path, method="POST",
body=urlencode({'type': 'streaming'}), headers = self.standard_headers,
validate_cert=not self.config['dev'])
response = json.loads(r.body)
return respons... | [
"async def get_ticket(self, request):\n pass",
"def get_ticket(k):\n\n LOGGER.debug('Received call to get_ticket for %s', k)\n\n # make request url\n url = '%s/rest/api/2/issue/%s' % (jira_config['url'], k)\n\n # get ticket details\n r = s.get(url)\n r.raise_for_status()\n\n LOGGER.deb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload a piece of a video file to Vimeo Makes a PUT request to the given URL with the given binary data The _range parameter indicates the first byte to send. The first time you attempt an upload, this will be 0. The next time, it will be the number returned from get_last_uploaded_byte, if that number is less than the ... | def upload_segment(self, upload_uri, _range, data, filetype):
content_range = '%d-%d/%d' % (_range, len(data), len(data))
upload_headers = {'Content-Type': 'video/%s' % filetype,
'Content-Length': len(data),
'Content-Range': 'bytes: %s' % content_range}
... | [
"def upload_range( # type: ignore\n self, data, # type: bytes\n start_range, # type: int\n end_range, # type: int\n validate_content=False, # type: Optional[bool]\n timeout=None, # type: Optional[int]\n encoding='UTF-8',\n **kwargs\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the last byte index of the file successfully uploaded Performs a PUT to the given url, which returns a Range header indicating how much of the video file was successfully uploaded. If less than the total file size, this number is used in subsequent calls to upload_segment | def get_last_uploaded_byte(self, check_uri):
upload_check_headers = {'Content-Range': 'bytes */*'}
request_headers = dict(upload_check_headers.items() + self.standard_headers.items())
try:
HTTPClient().fetch(check_uri, method="PUT", body='', headers=request_headers)
except HT... | [
"def check_upload_status(self):\n headers = {'authorization': 'Bearer {0}'.format(session.get('access_token')),\n 'content-range': 'bytes */{0}'.format(self.chunk_info.get('total_size')),\n 'content-length': 0}\n r = req.Request(url=session['url'], headers=headers, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the upload ticket (to be used once get_last_uploaded_byte() == total file size) Makes a DELETE request to the given URI, removing the upload ticket and setting the upload status to "processing" | def delete_upload_ticket(self, complete_uri):
url = self.config['apiroot'] + complete_uri
log.info("Requesting %s" % url)
r = HTTPClient().fetch(url, method="DELETE", headers=self.standard_headers,
validate_cert=not self.config['dev'])
log.info("Upload comp... | [
"def delete(self):\n try:\n flash_message = request.json[\"flash_message\"]\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, current_user.net_id, request.json[\"folder_name\"])\n request_submitted = path.exists(\"{0}request.submitted\".format(folder_pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return world state HC state cooccurance matrix. | def get_co_occ_mat(s_hc_ml, n_s_real, n_s_hc):
co_occs = np.zeros((n_s_hc, n_s_real))
for idx, n in s_hc_ml.items():
co_occs[idx] = n
return co_occs | [
"def CTMCtoStormpy(h):\n\tstate_labelling = _buildStateLabeling(h)\n\ttransition_matrix = deepcopy(h.matrix)\n\te = array([h.e(s) for s in range(h.nb_states)])\n\ttransition_matrix /= e[:,newaxis]\n\ttransition_matrix = st.build_sparse_matrix(transition_matrix)\n\tcomponents = st.SparseModelComponents(transition_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return learned HC state order from cooccurance matrix by taking best matching pairs world location HC state pairs. | def get_s_order(co_occs, s_hc=None):
# Greedy approach: just go through items from max to min.
free_rows, free_cols = [list(range(n)) for n in co_occs.shape]
s_ord = -np.ones(co_occs.shape[0], dtype=int)
co_normed = norm_co_occ_matrix(co_occs)
isrtd = np.unravel_index(co_normed.argsort(axis=None)[... | [
"def calc_nearest_state(self): # TODO: Check if we need here state, instead of self.state\n self.stateC = self.toConceptual(self.state)\n CTP, winners = self.find_winner()\n\n state_name = self.find_TPname(filleridx=winners)\n binding = self.find_symBinding(filleridx=winners)\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Link a spotify account to the bot. | async def link(self, ctx):
if not is_linked(ctx.author.id):
token = str(uuid.uuid4())
valid_until = int((datetime.utcnow() + timedelta(days=1)).timestamp())
add_token(ctx.author.display_name, ctx.author.id, token, valid_until, str(ctx.author.avatar_url))
web_base_... | [
"async def link(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n await r(ctx, f'<https://www.motiondevelopment.top/bots/{bot.id}>')",
"async def info(self, ctx):\n if ctx.guild is not None:\n await ctx.reply(\"This comm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unlink a spotify account from the bot. | async def unlink(self, ctx):
# Remove all link tokens and spotify details for this user
remove_tokens(ctx.author.id)
remove_spotify_details(ctx.author.id)
await ctx.reply("All your linked accounts were removed, if you had any!") | [
"async def unlink_profile(self, context: Context):\n cursor = self.client.connection.cursor(buffered=True)\n author = context.message.author\n discord_uid = str(author.id)\n embed = discord.Embed(title=f\"Disconnecting '{author.display_name}'\", colour=0x00ff00)\n \n query = \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Displays basic info about your linked spotify account (name, avatar) | async def info(self, ctx):
if ctx.guild is not None:
await ctx.reply("This command can only be used in DMs, because of privacy reasons.")
raise commands.CommandError("Invoker not in DMs.")
if not is_linked(ctx.author.id):
await ctx.reply(f"You don't have a Spotify ac... | [
"async def info(self,ctx):\n avatar=self.bot.user.avatar_url_as(format=None,static_format='png',size=1024)\n repo=discord.Embed(color=embedColour)\n repo.set_author(name=self.bot.user.name,icon_url=avatar)\n repo.set_thumbnail(url=avatar)\n repo.add_field(name=\"Hva?\",value=\"Ein... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes the bot leave your voice channel | async def leave(self, ctx):
if ctx.guild is None:
await ctx.reply("This command can only be used in a server, not in DMs.")
raise commands.CommandError("Invoker not in a guild.")
if ctx.author.voice is None or ctx.author.voice.channel is None:
await ctx.reply("You ne... | [
"async def leave(ctx):\n channel = ctx.message.author.voice.channel\n voice = get(bot.voice_clients, guild=ctx.guild)\n\n if voice and voice.is_connected():\n await voice.disconnect()\n print(f\"The bot has left {channel}\")\n await ctx.send(f\"Bai Bai !!, SongBird Left {channel}\")\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will get the whole dictionary of ytm. Here we set the low bound 0.001 and the high boung 0.1, the episilon 1e10. | def get_ytm_dict(self):
ytm=self.ytm
for term in self.Rmn.keys():
ytm = Bootstrapping.bisection(self,0.001, 0.1, 1e-10, 2 * term, self.Rmn, ytm)
return ytm | [
"def yvals(self):\n return self.germs",
"def get_clinical_Y_range(): \n return (-100.0, 100.0) # in mm",
"def get_stig_y(self):\n raise NotImplementedError",
"def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This funtion is to get the dataframe of yield to maturity and discount rate value. | def get_ytm_discount_data(self):
ytm=Bootstrapping.get_ytm_dict(self)
data = pd.DataFrame()
for i in ytm.keys():
data.loc[i / 2, 'Yield to maturity'] = ytm[i]
data.loc[i / 2, 'discount_rate'] = Bootstrapping.P_Tn(self,ytm[i], i)
return data | [
"def dividend_yield(self) -> pd.DataFrame:\n div_yield_assets = self._list.dividend_yield\n currencies_dict = self._list.currencies\n if \"asset list\" in currencies_dict:\n del currencies_dict[\"asset list\"]\n currencies_list = list(set(currencies_dict.values()))\n di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Because get_ytm_discount_data will get dataframe but we want a function that we can input the term and then it give the discount rate. So this is that function. parameter | def get_discount_rate_function(self,x: float):
discount_rate = Bootstrapping.P_Tn(self,Bootstrapping.get_ytm_function(self,x), x)
return discount_rate | [
"def get_ytm_discount_data(self):\n ytm=Bootstrapping.get_ytm_dict(self)\n data = pd.DataFrame()\n for i in ytm.keys():\n data.loc[i / 2, 'Yield to maturity'] = ytm[i]\n data.loc[i / 2, 'discount_rate'] = Bootstrapping.P_Tn(self,ytm[i], i)\n return data",
"def dis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to draw the zerocoupon bond yield curve. | def draw_yield_curve(self):
data = Bootstrapping.get_ytm_discount_data(self)
fig = plt.figure(figsize=[10, 6])
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['Yield to maturity'])
ax.set_xlabel('year')
ax.set_ylabel('rate')
ax.set_title('Zero-coupon yield curve')
... | [
"def draw_discount_curve(self):\n data=Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['discount_rate'])\n ax.set_xlabel('Term')\n ax.set_ylabel('value')\n ax.set_title('Discount Curves')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to draw the zerocoupon bond yield curve. | def draw_discount_curve(self):
data=Bootstrapping.get_ytm_discount_data(self)
fig = plt.figure(figsize=[10, 6])
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['discount_rate'])
ax.set_xlabel('Term')
ax.set_ylabel('value')
ax.set_title('Discount Curves')
plt.sh... | [
"def draw_yield_curve(self):\n data = Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['Yield to maturity'])\n ax.set_xlabel('year')\n ax.set_ylabel('rate')\n ax.set_title('Zero-coupon yield ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test create maze with properties. | def test_create_maze(self):
maze = Maze(4, 4)
self.assertEqual(maze.row_count, 4)
self.assertEqual(maze.col_count, 4)
self.assertEqual(maze.size, 16)
self.assertTrue(isinstance(maze.entrance, list))
self.assertTrue(isinstance(maze.exit, list)) | [
"def testMazeExists(self):\n pass",
"def testMazeExists(self):\n\n pass",
"def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())",
"def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test create maze gets type error with noninteger. | def test_create_maze_non_integer(self):
try:
_ = Maze('I am not an integer', 4)
self.assertEqual(True, False, 'should not have got here: '
'maze created with non-integer index.')
except TypeError:
self.assertEqual(True, True) | [
"def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))",
"def testMaze... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test create maze gets type error with float. | def test_create_maze_with_float(self):
try:
_ = Maze(4.0, 4)
self.assertEqual(True, False, 'should not have got here: '
'maze created with float index.')
except TypeError:
self.assertEqual(True, True) | [
"def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))",
"def test_cre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test maze creates entrance as list of two integers. | def test_create_entrance_is_list(self):
maze = Maze(4, 4)
self.assertTrue(isinstance(maze.entrance[0], int))
self.assertTrue(isinstance(maze.entrance[1], int)) | [
"def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.exit[0], int))\n self.assertTrue(isinstance(maze.exit[1], int))",
"def init_maze(width: int, height: int) -> list[int]:\n return [0] * width * height",
"def mazeTest():\r\n\tmyMaze = Maze()\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test mazes creates exit as list of two integers | def test_create_entrance_is_list(self):
maze = Maze(4, 4)
self.assertTrue(isinstance(maze.exit[0], int))
self.assertTrue(isinstance(maze.exit[1], int)) | [
"def test_get_exit_points(self, cyl_generator):\n particle = Particle(particle_id='nu_e', energy=1e9,\n vertex=(0, 0, -1000), direction=(0, 0, 1))\n with np.errstate(divide='ignore'):\n points = cyl_generator.get_exit_points(particle)\n assert np.array_equa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test maze creates random indices between 0 and len(row) and 0 and len(column). | def test_get_random_indices_in_range(self):
maze = Maze(10, 10)
for test in range(1000):
position = maze._Maze__get_random_indices()
self.assertTrue(-1 < position[0] < 10)
self.assertTrue(-1 < position[1] < 10) | [
"def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))",
"def testMazeExists(self):\n\n pass",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a maze exit can be can be found at creation using private method __verify_exit_path. | def test_maze_created_can_be_traversed(self):
maze = Maze(100, 100)
self.assertTrue(maze._Maze__verify_exit_path()) | [
"def testMazeExists(self):\n pass",
"def testMazeExists(self):\n\n pass",
"def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a maze exit can be found at creation from random positions. | def test_maze_created_traversed_from_indices(self):
maze = Maze(100, 100)
for test in range(20):
self.assertTrue(maze.can_reach_exit([random.randint(0, 99),
random.randint(0, 99)])) | [
"def testMazeExists(self):\n pass",
"def testMazeExists(self):\n\n pass",
"def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())",
"def test_get_random_indices_in_range(self):\n maze = Maze(10, 10)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that maze can not be exited when entrance pointers are set to None. | def test_maze_entrance_pointers_are_none(self):
maze = Maze(100, 100)
row, col = maze.entrance
maze.grid[row][col].up = None
maze.grid[row][col].right = None
maze.grid[row][col].down = None
maze.grid[row][col].left = None
self.assertFalse(maze.can_reach_exit([ro... | [
"def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].do... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a maze cannot be exited when rooms adjacent to the exit have their pointers set to None. | def test_maze_exit_pointers_are_none(self):
maze = Maze(100, 100)
row, col = maze.exit
if maze.grid[row][col].up:
maze.grid[row][col].up.down = None
if maze.grid[row][col].right:
maze.grid[row][col].right.left = None
if maze.grid[row][col].down:
... | [
"def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a maze cannot be exited when rooms adjacent to the entrance are blocked. | def test_maze_entrance_adjacent_are_blocked(self):
maze = Maze(100, 100)
row, col = maze.entrance
if row - 1 >= 0:
maze.grid[row - 1][col].blocked = True
if col + 1 < 100:
maze.grid[row][col + 1].blocked = True
if row + 1 < 100:
maze.grid[row ... | [
"def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.gri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a maze cannot be exited when rooms adjacent to the exit are blocked. | def test_maze_exit_adjacent_are_blocked(self):
maze = Maze(100, 100)
row, col = maze.exit
if row - 1 >= 0:
maze.grid[row - 1][col].blocked = True
if col + 1 < 100:
maze.grid[row][col + 1].blocked = True
if row + 1 < 100:
maze.grid[row + 1][col... | [
"def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns 3xn numpy array describing motion accelerations Those acceleration are analytical calculated and aren't susceptible to errors | def get_analytical_accelerations(self):
# create empty numpy array for accelerations
accelerations = np.zeros((3, len(self.times)))
# radial accelerations is equal to angular velocity^2 / radius but radius is unitary is this trajectory
radial_acceleration = self.wz ** 2
# decompo... | [
"def calculate_acceleration(self) -> np.array:\n F = self.calculate_net_force()\n m = self.mass\n a = F / m\n\n return a",
"def target_acceleration(self, time):\n x_a = -self.w**2*self.r*sin(self.w*time)\n y_a = -self.w**2*self.r*cos(self.w*time)\n z_a = 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns 3xn numpy array describing motion velocities Those velocities are analytical calculated and aren't susceptible to errors | def get_analytical_velocities(self):
# create empty numpy array for accelerations
velocities = np.zeros((3, len(self.times)))
# tangential velocity is angular velocity multiplied by radius but radius is one
vt = self.wz
# decompose tangential velocity in x and y components
... | [
"def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)",
"def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])",
"def compute_rotor_velocities( self ):\n self.compute_Y()\n \n # Calculate rotor velocities\n w = self.Y * self.param +\\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check an external generated trajectory against the internally one | def check_trajectory(self, external_trajectory):
# Create empty array for error measure
error = np.zeros((3, external_trajectory.shape[1]))
# loop over external trajectory
for i, external_x in enumerate(external_trajectory.T):
# get trajectory coordinates at step i
... | [
"def validate_trajectory(self):\n raise NotImplementedError",
"def test_object_with_trajectory() -> None:\n system_name = \"Octanol2\"\n system = database.system(system_name)\n parser = FlatfileParser()\n simulation_data = parser.get_simulation_data(\n units=system.un... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For a given liaison and set of DLCs, update all unsent EmailMessages associated with those DLCs to have that Liaison. We can't make this part of, e.g., the save() method on DLC, because the liaison.dlc_set.update() commands used in views.py go straight to SQL, bypassing the ORM save() doesn't get hit, and neither do pr... | def update_emails_with_dlcs(dlcs, liaison=None):
for dlc in dlcs:
EmailMessage.objects.filter(
record__author__dlc=dlc,
date_sent__isnull=True).update(_liaison=liaison) | [
"def update_attributes_by_domains(etl, update_kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_domain_code,\r\n )\r\n tuple(func(**kwargs) for kwargs in update_kwargs)",
"def on_update(self):\n\t\tfor email_account in fra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Analytic expression for the normalized inverse cumulative mass function. The argument ms is normalized mass fraction [0,1] | def _icmf(self, ms):
return self._pot.a * numpy.sqrt(ms) / (1 - numpy.sqrt(ms)) | [
"def mass(query, ts):\n\n m = len(query)\n q_mean = np.mean(query)\n q_std = np.std(query)\n mean, std = mov_mean_std(ts, m)\n dot = sliding_dot_product(query, ts)\n return 2 * m * (1 - (dot - m * mean * q_mean) / (m * std * q_std))",
"def momentum(E,m):\n\treturn math.sqrt(E*E - m*m)",
"def c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split up seq in pieces of size | def split_seq(seq,size):
return [seq[i:i+size] for i in range(0, len(seq), size)] | [
"def splitseq(seq,size):\n return [seq[i:i+size] for i in range(0, len(seq), size)]",
"def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))",
"def get_chunks(sequence, chunk_size) :\n\n chunk_seq = []\n i=1\n while i*chunk_size < len(sequence) :\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the parent/enclosing tag (instance of PythonTag()) from the specified tag list. If no such parent tag exists, returns None. | def getParentTag(self, tagsStack):
# DOC {{{
# }}}
# CODE {{{
# determine the parent tag {{{
if (len(tagsStack)):
parentTag = tagsStack[-1]
else:
parentTag = None
# }}}
# return the tag
return parentTag
# }}} | [
"def findTypeParent(element, tag):\n \n p = element\n while True:\n p = p.getparent()\n if p.tag == tag:\n return p\n \n # Not found\n return None",
"def findTypeParent(element, tag):\n \n p = element\n while True:\n p ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns instance of PythonTag() based on the specified data. | def getPythonTag(self, tagsStack, lineNumber, indentChars, tagName, tagTypeDecidingMethod):
# DOC {{{
# }}}
# CODE {{{
# compute the indentation level
indentLevel = self.computeIndentationLevel(indentChars)
# get the parent tag
parentTag = self.getParentTag(tagsS... | [
"def getNodeClassFromData(self, data: OrderedDict):\n return Node if self.node_class_selector is None else self.node_class_selector(data)",
"def getTag(self, tag, record = None, data_type = None):\n \n # Get the official record and tag numbers\n tag_num = None\n record_num = None\n try:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns tag type of the current tag based on its previous tag (super tag) for classes. | def tagClassTypeDecidingMethod(self, parentTagType):
# DOC {{{
# }}}
# CODE {{{
# is always class no matter what
return PythonTag.TT_CLASS
# }}} | [
"def tag_type(self):\n return self._tag_type",
"def class_for_tag(self, tag) -> Optional[type]:\n qname = self.qname(tag)\n results = map(itemgetter(0), filter(lambda ct: ct[1] == qname, self._class_tag))\n try:\n return next(results)\n except StopIteration:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes instances of VimReadlineBuffer(). | def __init__(self, vimBuffer):
# DOC {{{
# }}}
# CODE {{{
# remember the settings
self.vimBuffer = vimBuffer
# initialize instance attributes {{{
self.currentLine = -1
self.bufferLines = len(vimBuffer)
# }}}
# }}} | [
"def __init__(self, *args, **kwargs):\n _richtext.RichTextBuffer_swiginit(self,_richtext.new_RichTextBuffer(*args, **kwargs))",
"def init_readline():\n if g.command_line:\n return\n\n if has_readline:\n g.READLINE_FILE = os.path.join(get_config_dir(), \"input_history\")\n\n if os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the index of line in 'tagLineNumbers' list that is nearest to the specified cursor row. | def getNearestLineIndex(row, tagLineNumbers):
# DOC {{{
# }}}
# CODE {{{
# initialize local auxiliary variables {{{
nearestLineNumber = -1
nearestLineIndex = -1
# }}}
# go through all tag line numbers and find the one nearest to the specified row {{{
for lineIndex, lineNumber ... | [
"def get_line_number(view, rg):\r\n return view.rowcol(rg.end())[0]",
"def line_pos(self) -> int:\n return self.source_position()[1]",
"def getLinescanPos(self):\n return self.handle.pos().toPoint()",
"def get_line_index(self):\n return self.line_index",
"def index_tag_in_lines(lines... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the tags for the specified buffer number. Returns a tuple (taglinenumber[buffer], tags[buffer],). | def getTags(bufferNumber, changedTick):
# DOC {{{
# }}}
# CODE {{{
# define global variables
global TAGLINENUMBERS, TAGS, BUFFERTICKS
# return immediately if there's no need to update the tags {{{
if (BUFFERTICKS.get(bufferNumber, None) == changedTick):
return (TAGLINENUMBERS[buffe... | [
"def findTag(bufferNumber, changedTick):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # try to find the best tag {{{\n try:\n # get the tags data for the current buffer\n tagLineNumbers, tags = getTags(bufferNumber, changedTick)\n\n # link to vim's internal data {{{\n currentBuffe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |