query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns the approximated Hessian of the function at the point x. | def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:
return hessian_approximation(self.f, x) | [
"def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))",
"def evaluateHessian(fgradient,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros((len(x),len(x)))\n for i in range(0,len(x)):\n # Define new gradient function which returns only the i:th element of \n # the grad... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the constraint function values at the point x. | def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:
return np.array([c(x) for c in self.constraints]) | [
"def calc_constraint_at(self, i: int, x: np.ndarray) -> float:\n return self.constraints[i](x)",
"def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.c, x) for c in self.constraints])",
"def constraint(self, x):\n return x[0]",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the constraint function value of constraint i at the point x. | def calc_constraint_at(self, i: int, x: np.ndarray) -> float:
return self.constraints[i](x) | [
"def constraint(self, x):\n return x[0]",
"def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.constraints[i], x)",
"def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([gradient_approximation(c.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the constraints approximated Jacobian at the point x. | def calc_constraints_jacobian_at(self, x: np.ndarray) -> np.ndarray:
return np.array([gradient_approximation(c.c, x) for c in self.constraints]) | [
"def jacobian(self, x):\n return self.jnz",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=np.object)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the approximated gradient values of constraint i at the point x. | def calc_constraint_gradient_at(self, i: int, x: np.ndarray) -> np.ndarray:
return gradient_approximation(self.constraints[i], x) | [
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def _gradient_terms(self, x):\n # gradient of predictive variance of y\n dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)\n dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the lagrangian function value at the point x. | def calc_lagrangian_at(self, x, lambda_) -> float:
assert len(lambda_) == len(self.constraints)
result = self.calc_f_at(x)
for i, lambda_i in enumerate(lambda_):
result -= lambda_i * self.calc_constraint_at(i, x)
return result | [
"def calc_lagrangian_gradient_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n return self.calc_lagrangian_at(x_, lambda_)\n\n return gradient_approximation(lagrangian, x)",
"def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:\n\n def lagrangian(x_):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the approximated lagrangian gradient with regard to x at the point x. | def calc_lagrangian_gradient_at(self, x, lambda_) -> np.ndarray:
def lagrangian(x_):
return self.calc_lagrangian_at(x_, lambda_)
return gradient_approximation(lagrangian, x) | [
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)",
"def LMLgrad_X(self):\n return _core.CGPbase_LMLgrad_X(self)",
"def gradient(x):\n\t\tpass",
"def f_gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the approximated lagrangian Hessian with regard to x at the point x. | def calc_lagrangian_hessian_at(self, x, lambda_) -> np.ndarray:
def lagrangian(x_):
return self.calc_lagrangian_at(x_, lambda_)
return hessian_approximation(lagrangian, x) | [
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n return hessian_approximation(self.f, x)",
"def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))",
"def default_hessian(self, x, f):\r\n n = len(x)\r\n G = zeros((n,n))\r\n h = 1e-3\r\n \r\n for i i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Destandardizes x based on the original problem. | def destandardize_x(self, x: np.ndarray) -> np.ndarray:
n = self.original_n
x_plus = x[:n] # take x_+ part
x_neg = x[n:n + len(self.indices_of_non_positive_constrained_vars)]
# subtract x_- from x_+ to get x
x_plus[self.indices_of_non_positive_constrained_vars] -= x_neg
... | [
"def transform_x(self, x):\n raise NotImplementedError()",
"def transform_x(self, x):\n if len(self.x_cols) == 0:\n return x\n self.logging('x shape: {}'.format(_shape(x)), level=logging.DEBUG)\n x_new = x.copy()\n if len(self.x_cols) > 0:\n x_new.drop(list... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Factory method to create standardizing meta info for a problem that is already standardized. This represents a default instance in the sense of a nonstandardized meta info. | def from_pre_standardized(cls, problem: 'LinearConstraintsProblem') -> 'StandardizingMetaInfo':
return StandardizingMetaInfo(problem.n, np.empty(0, dtype=int), 0, problem.constraints) | [
"def _construct_metadata(self):\n if self.properties:\n return self._step_type_to_output_format_map[self.type]()\n return None",
"def _meta(self, field, **kwargs):\n try:\n return self.meta[field][0]\n except (KeyError, IndexError):\n if 'default' in kw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the true application name from an inputted phrase Designed to find the closest app, account for poor listening | def get_app_name(app_names_list, app):
most_similar = 0.0
app_to_open = None
for app_name in app_names_list:
app_name_trimmed = app_name.split('.app')[0].lower()
similarity = SequenceMatcher(None, app_name_trimmed, app.lower()).ratio()
if similarity > most_s... | [
"def get_closest_name(self, word):\n self.get_distances(word)\n name = min(self.distances, key=self.distances.get)\n return self.app_names[name] if self.distances[name] < 5 else \"\"",
"def get_app_name(app_id: str) -> str:\n return get_app_names([app_id]).get(app_id)",
"def get_app_name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open a given app Must be within applications folder Append the opened process to processes list | def start_app(self, app_to_open, new_instance_command=False):
true_app_name = self.get_app_name(self.appNames, app_to_open)
activity_monitor_app_name = true_app_name.split('.app')[0]
new_instance = new_instance_command or not self.is_running(activity_monitor_app_name)
if new_instance:
... | [
"def open_application(self):\n return os.startfile(os.getcwd()+\"/broken-hashserve/broken-hashserve_win.exe\")",
"def localapp(path, newinstance=False, hide=False):\n\t# Always create AEAddressDesc by process serial number; that way there's no confusion if multiple versions of the same app are running\n\ti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if an application is currently running | def is_running(app_name):
count = int(subprocess.check_output(["osascript",
"-e", "tell application \"System Events\"",
"-e", "count (every process whose name is \"" + app_name + "\")",
... | [
"def is_program_running(self):\n return self.rob.secmon.is_program_running()",
"def exe_stillRunning(self):\n return self.t_exe.isAlive()",
"def isRunning (self):\n\t\tif not self.job.pid:\n\t\t\treturn False\n\t\treturn ps.exists(int(self.job.pid))",
"def _is_running(process):\n with hide('o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kill the last opened process Should be useful for bugtesting in the future Ie. No don't open that | def kill_last(self):
killed = False
while len(self.processes) > 0 and not killed:
last_process_opened = self.processes.pop()
try:
self.kill(last_process_opened)
killed = True
except ProcessLookupError:
pass
retur... | [
"def kill_process(self):\r\n self._proc.kill()",
"def kill_process(process):\n while True:\n process.terminate()\n if process.is_alive() == False:\n break",
"def terminate_process(self, upid):",
"def close(self):\r\n \r\n logging.debug('Cleanup...')#Used for de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the shuffled intervals do not overlap each other If there is a single overlap, discard this while shuffle step and redo (discarding only this interval would introduce a bias in the probability of the position and it would not be a purely random shuffle) | def test_shuffled_interval_overlap(intervals):
print "testing"
print intervals
results = {}
for interval in intervals.values()[0]:
try:
chromosome = interval[0]
if chromosome not in results:
results[chromosome] = {}
results[chromosome][interval... | [
"def test_shuffle_range(self):\n shuffle_range(self.to_test, 3, -3)\n self.assertEqual(self.to_test[:3],self.numbers)\n self.assertEqual(self.to_test[-3:], self.numbers)\n self.assertNotEqual(self.to_test[3:-3], 2*self.letters)\n self.assertEqualItems(self.to_test[3:-3], 2*self.le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initiate parallel threads for interval shuffling phase | def start_shuffle(output_file,np):
index=0
starttime = time.time()
individualIntervals = allIndividuals.items()
try:
print "starting parallel shuffle..."
pool = Pool(np)
results = pool.map(shuffle, individualIntervals)
print "pool finished\n"
print str(results)
pool.close()
p... | [
"def worker_init_fn(worker_id):\n np.random.seed(args.seed + worker_id)\n random.seed(args.seed + worker_id)",
"def initialize_workers(self):\n self.workers = []\n for j in range(self.n):\n # generate p according to spammer-hammer model\n p_j = np.random.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
slices with specific voxel size volume | def slices_in_scale(self, voxel_size: Cartesian) -> tuple:
minpt = tuple( p * s1 // s2 for p, s1, s2 in zip(
self.minpt, self.voxel_size, voxel_size
))
maxpt = tuple( p * s1 // s2 for p, s1, s2 in zip(
self.maxpt, self.voxel_size, voxel_size
))
bbox = Boun... | [
"def get_slice_from_volume(image, view, slice_id):\n if(view == 1):\n image = np.transpose(image, [2, 0, 1])\n elif(view == 2):\n image = np.transpose(image, [1, 0, 2])\n return image[slice_id]",
"def to_volume(slices):\n volume = np.stack([s.pixel_array for s in slices])\n volume = v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the ROITree from a single roi. This roi is not required to be aligned with the atomic block size. If it is not aligned, a roi will partially cover the volume. | def from_roi(cls, roi: RegionOfInterest, factor: Cartesian,
atomic_block_size: Cartesian, atomic_voxel_size: Cartesian):
pass
# assert roi.voxel_size % atomic_voxel_size == Cartesian(0, 0, 0)
# assert roi.voxel_size // atomic_voxel_size % factor == Cartesian(0, 0, 0)
... | [
"def set_roi(self, roi):\n with h5py.File(self.data_file, 'r+') as f:\n if 'roi' not in f:\n roigrp = f.create_group('roi')\n else:\n roigrp = f['roi']\n roigrp.create_dataset('roi{}'.format(self._next_roi_idx), data=np.asarray(roi), compression=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return column letter for given column. | def col_letter(col):
return chr(ord("A") + col - 1) | [
"def _get_column_name(self, column):\n return column",
"def _get_header_column_letter(self, title):\n return self._to_letter(self._get_header_index(title))",
"def _series_col_letter(self, series):\n start_col_ascii = ord('A') + series.categories.depth\n return chr(start_col_ascii + s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of variants for a product. | def get_product_variants(variants, sku):
product_variants = [
variant for variant in variants
if variant["Product SKU"] == sku and variant["Variant Enabled"] == "Y"
]
product_variants.sort(key=lambda variant: variant["Variant Sort"])
return product_variants | [
"def get_variants(self):\n return self.variants or []",
"def get_product_list(self):\n product_list = ProductModel.objects.in_bulk(self.keys())\n return product_list.values()",
"def get_products():",
"def get_all_variants():\n clean_expired_sessions()\n\n # reads the session\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add row for each product. | def add_products(args, worksheet, row, cc_browser, products):
# Add header row.
set_cell(
worksheet,
row,
COL_ITEM_NO,
"Item No",
font_bold=True,
alignment_horizontal="right"
)
set_cell(worksheet, row, COL_DESCRIPTION, "Description", font_bold=True)
s... | [
"def insert_product(self, table):\n for i in self.products:\n # extract data\n name = i[\"name\"]\n quantity = i[\"quantity\"]\n brand = i[\"brand\"]\n description = i[\"description\"]\n url = i[\"url\"]\n rating = i[\"rating\"]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the Wholesale Line Sheet worksheet. | def add_line_sheet(args, config, cc_browser, products, worksheet):
# Prepare worksheet.
worksheet.title = "Wholesale Line Sheet"
# Add title.
row = add_title(args, config, worksheet)
# Blank row.
row += 1
# Add products.
add_products(args, worksheet, row, cc_browser, products) | [
"def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_title_border = Borders()\n ware_or_loc_border = Borders()\n header_border.left, header_border.right, header_border.top, header_border.bottom = Borders.THIN, Bord... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is an initialization routine for the app. App needs to know the range of purchase dates in order to distinguish between prehistory and days with zero sales. Sets a timeseries config dict (ts_config) to contain these min and max dates. | def db_get_ts_config():
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
db_cursor.execute("select * from dbo.vTransactionStats") # Application needs to know, minimally, first and last overall transaction dates
result = db_cursor.fetchone()
ts_config["minPurchas... | [
"def _create_init_time_series(self, ts_data):\n\n # Avoid changing ts_data outside function\n ts_data_used = ts_data.copy()\n\n if self.model_name == '1_region':\n expected_columns = {'demand', 'wind'}\n elif self.model_name == '6_region':\n expected_columns = {'dem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connects to an existing database view containing the distinct ProductIDs for a given client, and returns those IDs as a list. This is highly suboptimal but works as a proof of concept. | def db_get_productlist():
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
productIDs = []
db_cursor.execute("exec TimeSeriesQueueGet") # Expects a table or view containing distinct ProductIDs in a 'ProductID' int field
for row in db_cursor.fetchall():
... | [
"def _get_product_ids(prefix):\n from accelpy._application import Application\n return Application.list(prefix)",
"def get_affected_products_by_cve(self, cve):\n assert self.cursor is not None, 'DB connection not set!'\n LOGGER.debug('Looking for affected products: cve={}.'.format(cve))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts a single ProductID. Queries the profile database to get the DAILY sales counts for that single ProductID. This is then converted into a clean time series, bounded by the min and max sales dates, with all missing dates filled in with zero sales. Returns a Pandas timeseries object for further processing. | def db_get_trx_series(productID):
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
db_cursor.execute("select * from dbo.fxTransactionSeries(?)", productID)
result = db_cursor.fetchsarray()
db_connection.close()
ts_idx = pd.date_range(ts_config["minPurchaseDate"... | [
"def __get_product_ts(self, product_id):\n d_range_s = FROM\n d_range_e = TO\n resample = \"W\"\n new_ts = self._ts[self._ts[\"IDPRODUCTO\"] == product_id][\"#UNIDADES\"]\n ts = new_ts[d_range_s:d_range_e].resample(resample).mean().fillna(1)\n ts = np.log(ts)\n ts[ts... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts a single ProductID as a paremeter. Retrieves a timeseries vector for that product, and creates several moving averages (e.g., ewma7) from that data to identify upward/downward trends. Plucks the last values from those moving averages and writes them to a ts_values dict. Attempts to separate seasonality from tre... | def timeseries(productID):
ts = db_get_trx_series(productID) # Get a Time-Series vector for a specific product #1587
ts_values = {}
# Compute exponentially weighted moving averages (EWMAs) for specific time periods
ewma7 = pd.Series(pd.ewma(ts, span=7, freq="D"))
ewma14 = pd.Series(pd.ewma(ts,... | [
"def predict_product(self, product_id):\n product_ts = self.__get_product_ts(product_id)\n\n model = SARIMAX(product_ts, order=(0,1,2),\n time_varying_regression=True,\n mle_regression=False,\n trend='n',\n sea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads a set of weights to a timeseries weights table in the DB. Could benefit from some connection pooling all around. | def db_update_weights(productID, weights_dict):
db_connection = iopro.connect(**db_config)
db_cursor = db_connection.cursor()
for k, v in weights_dict.items():
db_cursor.execute("insert into dbo.TimeSeriesWeights_TMP values (?,?,?)", productID, k, v)
db_connection.commit()
d... | [
"def loadWeights():\r\n final = []\r\n f = open(\"data/weight.txt\", 'r')\r\n for line in f:\r\n final.append(float(line))\r\n f.close()\r\n return final",
"def _load_weights(self):\n\n if not self.load_weights_file_path:\n return\n beh_load_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main programflow logic. Sets a db_config parameter to the desired database, Gets required purchasedate parameters to apply to all ProductIDs, Gets the list of all known ProductIDs, Runs timeseries extraction for daily sales totals for each ProductID (serially), and Writes the resulting weights to a database. | def main(db):
db_config["Database"] = db
# Load queue file
db_get_ts_config()
# Load Product Table on initialization
productIDs = db_get_productlist()
for productID in productIDs:
timeseries(productID)
print()
#print(ts_config["productIDList"][0:... | [
"def db_update_weights(productID, weights_dict):\n db_connection = iopro.connect(**db_config) \n db_cursor = db_connection.cursor()\n \n for k, v in weights_dict.items():\n db_cursor.execute(\"insert into dbo.TimeSeriesWeights_TMP values (?,?,?)\", productID, k, v)\n \n db_connection.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a set of CCDlevel FITS headers according to the FITS template file, which is supposed to implement the FITS standard for sensors (LCA10140). | def fits_headers(template=template_file):
headers = OrderedDict()
hdr = fits.header.Header()
with open(template) as fd:
for line in fd:
# Skip comments and whitespace lines.
if line[0] == '#' or len(line.strip()) == 0:
continue
if line[:3] == 'END'... | [
"def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self,hdulist, options)\n\n hdulist[0].header.update('MODULE',self.module, 'NIRCam module: A or B')\n hdulist[0].header.update('CHANNEL', 'Short' if self.pixelscale == self._pixelscale_short else 'Long', 'NIRCam channel: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the keywords in a the specified FITS header template file are present. The default file is based on the FITS standard document for sensors, LCA10140. Dictionary of missing keywords by header extension number. | def check_keywords(infile, template=template_file, verbose=True):
prototype_headers = fits_headers(template=template)
input = fits.open(infile)
report = []
missing_keys = {}
missing_headers = []
#
for i, extname in enumerate(prototype_headers):
prototype = prototype_headers[extname]
... | [
"def fits_checkkeyword(fitsfile, keyword, ext=0, silent=False):\n import astropy.io.fits as pf\n\n fh = pf.open(fitsfile)\n try:\n return fh[ext].header[keyword]\n except KeyError as e:\n if silent:\n return None\n else:\n print('The specified extension or keyw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts a dict from a schema version 1.0, 1.1 or 1.2 package containing a "platforms" key and converts it to a list of releases compatible with' schema version 2.0. | def platforms_to_releases(info, debug):
output = []
temp_releases = {}
platforms = info.get('platforms')
for platform in platforms:
for release in platforms[platform]:
key = '%s-%s' % (release['version'], release['url'])
if key not in temp_releases:
tem... | [
"def get_packages(platform):\n with open('pkg-resolver/packages.json', encoding='utf-8', mode='r') as pkg_file:\n pkgs = json.loads(pkg_file.read())\n packages = []\n for platforms in filter(lambda x: x.get(platform) is not None, pkgs.values()):\n if isinstance(platforms.get(platform), list):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Geocode a single location via maps API Returns a tuple of latitude and longitude | def geocode(location):
gmaps = googlemaps.Client(key=settings.GAPI_KEY)
loc = gmaps.geocode(location, region="UK")
if not loc:
raise RuntimeError(f"Could not find {location} on Google maps")
else:
return (loc[0]["geometry"]["location"]["lat"],
loc[0]["geometry"]["location... | [
"def address_to_latlng(address):\n location_geo = geocode(address)\n location = {}\n location['lat'] = location_geo['lon']\n location['lon'] = location_geo['lat']\n print location\n return tuple(location.values())",
"def location(locations):\r\n ctx = ssl.create_default_context(cafile=certif... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return [layout_a, layout_b, layout_c] in the form of cutlass_lib definitions | def cutlass_lib_layouts():
import cutlass_lib
return [
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.ColumnMajor,
cutlass_lib.library.LayoutType.RowMajor,
] | [
"def cutlass_lib_layouts():\n import cutlass_lib\n\n return [\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n ]",
"def _get_layouts(self):\r\n pass",
"def __get_library... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return [layout_a, layout_b, layout_c] in the form of cutlass_lib definitions | def cutlass_lib_layouts():
import cutlass_lib
return [
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.RowMajor,
cutlass_lib.library.LayoutType.RowMajor,
] | [
"def cutlass_lib_layouts():\n import cutlass_lib\n\n return [\n cutlass_lib.library.LayoutType.RowMajor,\n cutlass_lib.library.LayoutType.ColumnMajor,\n cutlass_lib.library.LayoutType.RowMajor,\n ]",
"def _get_layouts(self):\r\n pass",
"def __get_libr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gate inputs, then update gate settings according to c. | def call(
self, c: nd.NumDict[feature], *inputs: nd.NumDict
) -> Tuple[nd.NumDict, ...]:
gs = [self.store.isolate(key=k) for k in self.flags]
self.update(c)
return (self.store, *(x.mul(g) for g, x in zip(gs, inputs))) | [
"def update_and_send(self,inputs):\n assert(self.running == True)\n\n # TODO: figure out a better way to get state indexes\n # float64 vt 0\n # float64 alpha 1\n # float64 beta 2\n # float64 phi 3\n # float64 theta 4\n # float64 psi 5\n # float64 p 6\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process a book element into the database. | def process_book_element(book_element):
book, created = Book.objects.get_or_create(pk=book_element.get('id'))
book.title = book_element.findtext('title')
book.description = book_element.findtext('description')
aliases = {}
same_aliases = False
book_aliases = {}
for alias in book.aliases.val... | [
"def add_book_to_db(book: dict) -> None:\n if \"title\" in book:\n title = request.form['title']\n else:\n title = \"\"\n\n if \"authors\" in book:\n authors = \";\\n\".join(request.form['authors'].split(';'))\n else:\n authors = \"\"\n\n if \"publishedDate\" in book:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This provides a way for ajax calls to get the user's own picks | def get_picks(request):
response_data = {}
user = request.user
game_ids = []
#Only try this if we are asking at least one game. Otherwise, just return a normal response with no games.
if request.POST.get('games'):
try:
#I have no clue why, but the brackets are added t... | [
"def get_pick_list(self, pick_list_id):\r\n return self.get('picklists/{}'.format(pick_list_id)).json()",
"def user_profile():\n user_id = session[\"user_id\"]\n picks = Pick.query.filter_by(author=user_id).all()\n return render_template(\n \"profile.html\",\n picks=picks\n )",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads or computes calibration. When finalImageHeight is given, scales camera matrix to final height, which may be different from the imageHeight at which calibration is computed. | def LoadOrCompute(self,
squareWidth=None,
rows=None,
cols=None,
forceRecompute=False,
finalImageHeight=None):
if forceRecompute:
print('Forcing recomputation of calibration data.')
elif self.LoadFromFile(fi... | [
"def _load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(\n self.base_path, 'calib/{}.txt'.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The client email registered for the integration service. | def client_email(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_email") | [
"def get_email(self):\r\n return self.email",
"def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")",
"def thread_email(self):\n return self._thread_email",
"def provider_email(self) -> str:\n return pulumi.get(self, \"provider_email... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This resource allows you to create and manage third party log integrations for a CloudAMQP instance. Once configured, the logs produced will be forward to corresponding integration. Only available for dedicated subscription plans. Argument Reference (cloudwatchlog) | def __init__(__self__,
resource_name: str,
args: IntegrationLogArgs,
opts: Optional[pulumi.ResourceOptions] = None):
... | [
"def __init__(__self__,\n resource_name: str,\n args: IntegrationLogCollectionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def setup_logger():\n log = logging.getLogger('contrail_vrouter_provisioning')\n log.setLevel(logging.DEBUG)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing IntegrationLog resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_key_id: Optional[pulumi.Input[str]] = None,
api_key: Optional[pulumi.Input[str]] = None,
client_email: Optional[pulumi.Input[str]] = None,
cred... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourceSpecificLogging':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResourceSpecificLoggingArgs.__new__(ResourceSpecificLogg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The client email registered for the integration service. | def client_email(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_email") | [
"def client_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_email\")",
"def get_email(self):\r\n return self.email",
"def notification_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_email\")",
"def thread_email(self):\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a storm configuration given its name, assuming it exists. | def get_config(self, storm_name: str) -> Dict:
q = {"name": storm_name}
cols = {"config": 1}
r = list(self._storms.find(q, cols))
if len(r) == 0:
raise KeyError(f"{storm_name} not found, no configuration to load.")
else:
return r[0]["config"] | [
"def get_conf(self, name='global'):\n return self.cluster_configuration_manager.get_object(name)",
"def get(name):\n value = Configuration.settings.get(name, None)\n\n if value is None:\n raise ConfigurationNotFound(name)\n\n return value",
"def get(self, name: str, defaul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the run_record from last storm run under a given name | def get_last_run(self, storm_name: str) -> Dict:
q = {"storm_name": storm_name}
cols = {"_id": 0}
r = list(self._runs.find(q, cols))
if len(r) == 0:
return None
elif len(r) > 0:
max_run_idx = np.argmax(
np.array([dt.datetime.strptime(x["ru... | [
"def get_last_run(runfile):\n runfile = open(runfile, 'r')\n return int(runfile.readlines()[-1])",
"def get_last_run(self, rule_id):\n\n s = RuleRun.search()\n s = s.filter('term', rule_id=rule_id).sort('-timestamp')\n s = s[:1]\n response = s.execute()\n if response.hits.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Will Return all run records for a storm (and all fields) | def get_runs_by_storm(self, storm_name: str) -> List[Dict]:
q = {"storm_name": storm_name}
cols = {"config": 0}
r = list(self._runs.find(q, cols))
if len(r) == 0:
return None
else:
return r | [
"def get_runs(self):\n try:\n return self.__dict__['runs']\n except KeyError:\n json = self._connection._make_request('routes/%s/runs/' % self.id)\n obj_list = [BusRun(\n j[\"id\"],\n j['display_name'],\n j['direction_name']... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds new run record (for use after storm run) | def write_run_record(self, run_record: Dict) -> None:
q = {}
self._runs.insert_one(run_record) | [
"def add(self, record):\n return self._append_record(record, 'additions')",
"def _add_model_run(\n posts: List[YamlDict],\n run_id: str,\n open_timestamp: dt,\n inputs: List[str],\n outputs: List[YamlDict],\n model_config: YamlDict,\n submission_script: YamlDict,\n code_repo: YamlDict,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates existing run record | def update_run_record(self, run_record: Dict) -> None:
q = {"_id": run_record["_id"]}
self._runs.update_one(q, {"$set": run_record}) | [
"def update(self):\n\n self.__check_update_ok()\n self.db.update_dataset_record(self.dataset_dict)",
"def write_run_record(self, run_record: Dict) -> None:\n\n q = {}\n self._runs.insert_one(run_record)",
"def update_student_records(self, students, test_runs):\n pass",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a playlists full current record (excluding changelog) | def get_playlist_current_info(self, playlist_id: str) -> Dict:
q = {"_id": playlist_id}
cols = {"changelog": 0}
r = list(self._playlists.find(q, cols))
if len(r) == 0:
raise Exception(f"{playlist_id} not found.")
else:
return r[0] | [
"def get_playlist():\n return _playlist",
"def playlist(self):\n return self.video.playlist",
"def get_playlog(self, flag='c'):\n return self._saver._fetch_playlog(flag=flag)",
"def curr_playlist(self):\n return self.curr_playlist_name",
"def fetch_playlists_metadata():\n sp = get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a playlists changelog, a dictionary where each entry is a date. | def get_playlist_changelog(self, playlist_id: str) -> Dict:
q = {"_id": playlist_id}
cols = {"changelog": 1}
r = list(self._playlists.find(q, cols))
if len(r) == 0:
raise Exception(f"{playlist_id} not found.")
else:
if "changelog" in r[0].keys():
... | [
"def getWeblogEntriesDates(entries_dict):",
"def deb_changelogs(new_snap, pkg_changes):\n # type: (str, Dict[str, Tuple[str, str]]) -> Dict[str, str]\n changelogs = {} # type: Dict[str, str]\n with tmpdir() as tmp:\n unsquashfs(tmp, new_snap, \"/usr/share/doc/*\")\n for name in pkg_changes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a playlists last collection date. | def get_playlist_collection_date(self, playlist_id: str) -> str:
q = {"_id": playlist_id}
cols = {"last_collected": 1}
r = list(self._playlists.find(q, cols))
# If not found print old date
if len(r) == 0:
return "2000-01-01" # Long ago
elif len(r) == 1:
... | [
"def get_last(collection):\n return list(DB.DATABASE[collection].find().sort([('created_at', -1)]).limit(1))[0]",
"def last(self) -> datetime.date:\n return self.__dates__[-1]",
"def last_day(self):\n return self._last_day",
"def last_played(self):\n if self._last_played is None:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all ids from the artists db. | def get_known_artist_ids(self) -> List[str]:
q = {}
cols = {"_id": 1}
r = list(self._artists.find(q, cols))
return [x["_id"] for x in r] | [
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def fetchAlbumIds(artist_id):\n url = \"https://api.spotify.com/v1/artists/\" + artist_id + \"/albums?album_type=album&market=US\"\n req = reques... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns all artists with album collection dates before max_date. | def get_artists_for_album_collection(self, max_date: str) -> List[str]:
q = {}
cols = {"_id": 1, "album_last_collected": 1}
r = list(self._artists.find(q, cols))
# Only append artists who need collection in result
result = []
for artist in r:
if "album_last_c... | [
"def db_annotater_get_latest_user_albums(album_date):\n\tstart_at\t= album_date['start_at']\n\tend_at\t\t= album_date['end_at']\n\t(hours, mins, secs) = get_time_diff_h_m_s(start_at, end_at)\n\twear_time \t= [{\"hours\":str(hours),\"minutes\":str(mins)}]\n\talbum_id \t= album_date['id']\n\tif album_date['annotation... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a list artists in DB that have one or more of the genres | def get_artists_by_genres(self, genres: List[str]) -> List[str]:
q = {"genres": {"$all": genres}}
cols = {"_id": 1}
r = list(self._artists.find(q, cols))
return [x["_id"] for x in r] | [
"def get_genres():\n \n return Genre.query.order_by('genre_name').all()",
"def populate_artist_genres(artist_list, music_genre_dict):\n\tpopulated_list = []\n\tfor artist in artist_list:\n\t\tif artist in music_genre_dict.keys():\n\t\t\tpopulated_list.append(artist)\n\t\t\tpopulated_list.extend(music_genre_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates artist db with list of new artist info | def update_artists(self, artist_info_list: List[Dict]) -> None:
for artist in tqdm(artist_info_list):
q = {"_id": artist["id"]}
# Writing updates (formatting changes)
artist["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
artist["total_followers"] = art... | [
"def update_artist(artist, new_name):\n conn = sqlite3.connect(\"mydatabase.db\")\n cursor = conn.cursor()\n \n sql = \"\"\"\n UPDATE albums\n SET artist = ?\n WHERE artist = ?\n \"\"\"\n cursor.execute(sql, (new_name, artist))\n conn.commit()\n cursor.close()\n conn.close()",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates a list of artists album_collected date to today by default. | def update_artist_album_collected_date(self, artist_ids: List[str], date: str=None) -> None:
date = dt.datetime.now().strftime("%Y-%m-%d") if date is None else date
for artist_id in tqdm(artist_ids):
q = {"_id": artist_id}
self._artists.update_one(
q, {"$set": {"... | [
"def get_artists_for_album_collection(self, max_date: str) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"album_last_collected\": 1}\n r = list(self._artists.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for artist in r:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a full blacklist record by name (id) | def get_blacklist(self, name: str) -> List[str]:
q = {"_id": name}
cols = {"_id": 1, "blacklist": 1, "type": 1, "input_playlist": 1}
return list(self._blacklists.find(q, cols)) | [
"def blacklist_flush(name):\n engine = Engine(name).load()\n return engine.blacklist_flush()",
"def blacklist():\n # Get values used for pagination of the blacklist\n total = get_row_count('Blacklist')\n page, per_page, offset = get_page_args(\n page_parameter=\"page\", per_page_parameter=\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
updates a blacklists artists given its name | def update_blacklist(self, blacklist_name: str, artists: List[str]) -> None:
q = {"_id": blacklist_name}
[
self._blacklists.update_one(q, {"$addToSet": {"blacklist": x}})
for x in artists
] | [
"def update_artist(artist, new_name):\n conn = sqlite3.connect(\"mydatabase.db\")\n cursor = conn.cursor()\n \n sql = \"\"\"\n UPDATE albums\n SET artist = ?\n WHERE artist = ?\n \"\"\"\n cursor.execute(sql, (new_name, artist))\n conn.commit()\n cursor.close()\n conn.close()",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all albums that need tracks added. | def get_albums_for_track_collection(self) -> List[str]:
q = {}
cols = {"_id": 1, "tracks": 1}
r = list(self._albums.find(q, cols))
# Only append artists who need collection in result
result = []
for album in r:
if "tracks" not in album.keys():
... | [
"def getTracks(self, album):\n\n\t\talbumSock = self.opener.open(album['url'])\t\t#download the album page\n\t\talbumPage = albumSock.read()\n\t\talbumSock.close()\n\n\t\tp = albumParser()\n\t\tp.feed(albumPage)\n\t\tp.close()\n\n\t\talbum['tracks'] = p.tracks\n\t\talbum['tracks'].sort(lambda x, y: cmp( x['num'], y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
update album info if needed. | def update_albums(self, album_info: List) -> None:
for album in album_info:
if isinstance(album, dict):
q = {"_id": album["id"]}
# Writing updates (formatting changes)
album["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del... | [
"def _syncPhoto(self, photo_entry, username, albumname=None, refresh=False):\n gphoto_id = photo_entry.gphoto_id.text\n #if self.cli_verbose:\n # print \"syncPhoto album %s id %s\" % (albumname, gphoto_id)\n # if we're refreshing this data, then delete the PicasaPhoto first...\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all tracks that need audio analysis added. | def get_tracks_for_audio_analysis(self) -> List[str]:
l.debug("Finding Tracks without audio analysis, this can take some time.")
q = {}
cols = {"_id": 1, "audio_analysis_flag": 1}
r = list(self._tracks.find(q, cols))
# Only append artists who need collection in result
... | [
"def get_audio_analysis(self, track_id):\n url = \"https://api.spotify.com/v1/audio-analysis/\" + track_id\n headers = {'Authorization': \"Bearer \" + self.token}\n\n request = self.session.get(url, headers=headers)\n return request",
"def get_all_audio(self):\n return [x.file f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a track list based on an album list | def get_tracks_from_albums(self, albums: List[str]) -> List[str]:
q = {"album_id": {"$in": albums}}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r] | [
"def get_album_tracklist(name, artist=None, token=None):\n if not token:\n token = get_token()\n album = get_spotify_api(\"https://api.spotify.com/v1/search\", get=True, data={\"q\": (artist + \" - \" if artist else \"\") + name, \"type\": \"album\", \"limit\": 1})\n if album[\"albums\"][\"items\"]:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of all tracks in the database. | def get_tracks(self) -> List[str]:
q = {}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r] | [
"async def get_all_tracks(self) -> List[PlaylistTrack]:\n if isinstance(self._tracks, PartialTracks):\n return await self._tracks.build()\n\n _tracks = []\n offset = 0\n while len(self.tracks) < self.total_tracks:\n data = await self.__client.http.get_playlist_track... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all available information for every track in track_ids. Done in batches as it is a large database. | def get_track_info(self, track_ids: List[str], fields: Dict={"artists": 0, "audio_analysis": 0}) -> List[Dict]:
# Check if needs to be done in batches
id_lim = 50000
batches = np.array_split(track_ids, int(np.ceil(len(track_ids) / id_lim)))
result = []
for batch in batches:
... | [
"def get_general_info_mult_tracks(track_ids):\n connect()\n url = 'https://api.spotify.com/v1/tracks'\n # Max that can be submitted to this endpoint is 50 at a time\n track_groups = make_chunks(track_ids, 50)\n track_details = []\n for group in track_groups:\n query_params = {'ids': ','.joi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all tracks in database from a list of artists and a date range for releases. | def get_tracks_from_artists(self, artists: List[str], start_date: str, end_date: str) -> List[str]:
albums = self.get_albums_from_artists_by_date(artists, start_date, end_date)
tracks = np.unique(self.get_tracks_from_albums(albums)).tolist()
return tracks | [
"def get_release_list(artist_str):\n username = 'Username'\n password = 'Password'\n \n musicbrainzngs.set_useragent(username, password)\n artist_list = musicbrainzngs.search_artists(artist=artist_str)['artist-list']\n artist = sorted(artist_list, reverse=True, key=lambda artist:int(artist['ext:sc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates a track and its album frm a list. | def update_tracks(self, track_info_list: List[Dict]) -> None:
for track in track_info_list:
# Add track to album record
q = {"_id": track["album_id"]}
self._albums.update_one(q, {"$push": {"tracks": track["id"]}}, upsert=True)
# Add track data to tracks
... | [
"def update_albums(self, album_info: List) -> None:\n\n for album in album_info:\n if isinstance(album, dict):\n q = {\"_id\": album[\"id\"]}\n\n # Writing updates (formatting changes)\n album[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates a track's record with audio features | def update_track_features(self, tracks: List[Dict]) -> None:
for track in tracks:
q = {"_id": track["id"]}
# Writing updates (formatting changes)
track["audio_features"] = True
track["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
del track["... | [
"def _update_audio_(course_id, audio_info):\n course = Course.objects.get(course_id=course_id)\n dir = audio_info[\"url\"].split(\"/\")\n if dir[-2] == \"audio_temp\":\n audio = AudioTemp.objects.get(pk=audio_info[\"id\"]).position\n course.audio_url = File(audio, dir[-1])\n audio.clos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If tracks that can't get features are identified, mark them here | def update_bad_track_features(self, bad_tracks: List[str]) -> None:
for track in tqdm(bad_tracks):
q = {"_id": track["id"]}
# Writing updates (formatting changes)
track["audio_features"] = False
track["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d")
... | [
"def hastracks(self):\n return len(self._tracks) > 0",
"def isLayerTrack(self):\r\n\t\treturn None",
"def update_track_features(self, tracks: List[Dict]) -> None:\n for track in tracks:\n q = {\"_id\": track[\"id\"]}\n\n # Writing updates (formatting changes)\n tra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in a specific audio_filter format to get tracks with a filter | def filter_tracks_by_audio_feature(self, tracks: List[str], audio_filter: Dict) -> List[str]:
q = {"_id": {"$in": tracks}, **audio_filter}
cols = {"_id": 1}
r = list(self._tracks.find(q, cols))
return [x["_id"] for x in r] | [
"def apply_audio_filters(\n audio_filters: list[AudioMatch] | bool | None,\n original_tracks: list[Box],\n):\n if not audio_filters:\n return []\n\n original_tracks = deepcopy(original_tracks)\n\n tracks = []\n for audio_match in audio_filters:\n if audio_match.match_item == MatchIte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a unique trackname based on the name and artists, avoids the same track being counted multiple times | def gen_unique_track_id(self, track_name: str, artists: List[str]) -> str:
bad_chars = ",. "
for char in bad_chars:
track_name = track_name.replace(char, "")
artist_string = "A&A".join(artists)
return track_name + "T&A" + artist_string | [
"def get_track_identifier(self):\n return (self.name, ','.join(self.artists))",
"def _generate_track_filename(self, extention):\n track_filename = ''\n for char in self.title:\n if char in \" -,.;:(){}[]`~'\":\n track_filename += '_'\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prints an error payload, which can also be used for action responses | def print_error_payload( response ):
try:
print( get_error_messages( response ) )
except:
# No response body
if response.status >= 400:
print( "Failed" )
else:
print( "Success" ) | [
"def print_error(response):\n print 'Status code: {0}'.format(response.status_code)",
"def print_error(error):\n print json.dumps({'error': error})",
"def print_api_error(error):\n sys.stderr.write('\\nERROR: %s\\n' % error)",
"def error_print():\n print(\"ERROR: Invalid Entry!\")",
"def indicat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send body back to infos queue | def send_to_info_queue(body):
print("trying connection to publisher")
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq'))
channel = connection.channel()
channel.queue_declare(queue='infos')
data = body
channel.basic_publish(exchange='', routing_key='infos', body=(da... | [
"def _add_details(self, info):\r\n msg_dicts = info.pop(\"messages\", [])\r\n super(QueueClaim, self)._add_details(info)\r\n parsed = urlparse.urlparse(self.href)\r\n self.id = parsed.path.rsplit(\"/\", 1)[-1]\r\n self.messages = [QueueMessage(self.manager._message_manager, item)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tweets selected by year, month and day. Use the cleanded data from the getallweets.py module, this function loads the data and compute the frequrence for days, months and years. Use tree dicts to store the output with pickle into locale storage for makePicture.py. | def computeTime(inputData):
import pickle
data = None
with open(inputData, "rb") as f:
data = pickle.load(f)
years = {}
months = {}
days = {}
for tweet in data:
year = tweet[3].year
month = tweet[3].month
day = tweet[3].day
# Strings
... | [
"def scrape_month_weather(self, year: int, month: int) -> dict:\n try:\n print('Scraping data of year: {0}, month: {1}...'.format(year, month))\n days_of_current_month = calendar.monthrange(year, month)[1]\n # Get raw info from HTML parse\n url = (\"http://climate.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the frequence of words used. Then returns a dict as output and stores the result dict in a local data. Try to import NLTK package to throw out those stopword, then we can get more intressting data. And use NLTK to tokenize words, and clean the shorturl or something not import. | def computeFreqOfWords(inputData):
import pickle
data = None
result = {}
wordlist = []
with open(inputData,"rb") as w:
data = pickle.load(w)
for t in data:
sent = t[1]
words = sent.split(" ")
try:
import nltk
from nltk.tokenize import Regex... | [
"def set_freq(self):\n for site, tags in self.words_by_site.items():\n self.word_frequency[site] = defaultdict(int)\n words = tags.split(\" \")\n for word in words:\n # Save words containing no punctuation characters.\n match = [char in word for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the frequence of hashtags used. Then returns a dict as output and stores the result dict in a local data. | def computeFreqOfHashtags(inputData):
import pickle
with open(inputData,"rb") as r:
data = pickle.load(r)
hashlist = []
result = {}
for t in data:
h = t[2]
hashlist.extend(h)
for h in hashlist:
if h in result:
atv = result[h]
result[h] = a... | [
"def hashtagCount(words):\n htc=words.map(lambda w:w.lower()).filter(lambda x:len(x)>2).filter(lambda x:x[0]=='#')\n htc=htc.map(lambda x:(x,1))\n htc_one=htc.reduceByKey(lambda x,y:x+y)\n htc_total=htc_one.updateStateByKey(lambda x,y:sum(x+(y or 0)))\n return htc_total",
"def hashtags_distribution... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute die Frequence of Client, eg, iPad, web. Then return a dict as putput and store the result dict in a local data. | def computeFreqOfClient(inputData):
import pickle
with open(inputData,"rb") as f:
data = pickle.load(f)
result = {}
for tweet in data:
client = tweet[4]
if client in result.keys():
result[client] = result[client] + 1
else:
result[client] = 1
# w... | [
"def Histogram(self):\n\n hist = {}\n\n hunt = aff4.FACTORY.Open(\"aff4:/hunts/%s\" % self.session_id,\n age=aff4.ALL_TIMES, token=self.token)\n\n log = hunt.GetValuesForAttribute(hunt.Schema.LOG)\n\n client_ids = [l.client_id for l in log]\n\n to_read = []\n\n while cl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the lambda function alias data | def get_function_alias_data(function_name, env):
lambda_client = _get_lambda()
function_name = function_name.format(ENV=f'{env}')
function_alias_data = {
'FunctionName': f'{function_name}',
'Name': f'{env}'
}
function_alias_data = lambda_client.get_alias(**function_alias_data)
re... | [
"def get_function_aliases(trend_type):\n return {\n \"trend_percentage()\": Alias(\n lambda aggregate_filter: [\n \"trend_percentage\",\n CORRESPONDENCE_MAP[aggregate_filter.operator]\n if trend_type == IMPROVED\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the lambda function configuration and package to clone and saved in a pickle file | def pull(function_name: str, env: str):
try:
lambda_client = _get_lambda()
function_name = function_name.format(ENV=f'{env}')
function_alias_data = get_function_alias_data(function_name, f'{env}')
function_data = lambda_client.get_function(
FunctionName=f"{function_alias_... | [
"def build_archive(mod, cache):\n\n mod_pathname = os.path.abspath(os.path.dirname(__file__) + \"/../lambdas/{}.py\".format(mod))\n awsflow_basedir = os.path.abspath(os.path.dirname(__file__) + \"/../../\")\n\n pkg_dir_suffix = \".lambda\"\n\n if cache:\n # Instead of generating a new temporary d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Breaks it wordwise when going further than ``limit`` chars. | def text_wrap(*args, delimiter=' ', end='\n', limit=80):
output = delimiter.join(args)
lines = output.splitlines(keepends=True)
results = []
for line in lines:
curr_print = line
while len(curr_print.rstrip('\n')) > limit:
splitpos = curr_print[:limit].rfind(' ')
i... | [
"def max_num_words(original:str, limit:str):\r\n words = original.split()\r\n if len(words) <= int(limit):\r\n return True\r\n else:\r\n return False",
"def smart_split(text, limit=100):\n prefix = \"\"\n while text:\n chunk = text[:limit]\n text = te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove columns which have a single label for the entire input dataset, i.e. categories that have only zeros or only ones. | def remove_non_informative_categories(categories_df):
columns_only_zeros = categories_df.columns[categories_df.sum(axis=0) == 0].values
columns_only_ones = categories_df.columns[categories_df.sum(axis=0) == len(categories_df)].values
categories_df = categories_df.drop(columns=np.concatenate((columns_only_on... | [
"def drop_constant_columns(df):\n\n cols=df.columns\n counts=[[],[]]\n for c in cols:\n typ = df[c].dtypes\n uniq = len(df[c].unique())\n if uniq == 2 and typ == np.float64:\n counts[1].append(c)\n elif uniq == 1:\n counts[0].append(c)\n print('Constant ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clean our dataframe, this mainly means cleaning the categories column | def clean_data(df):
cleaned_categories = create_categories_columns(df["categories"])
# replace old categories with the cleaned one (which itself is a whole dataframe), then remove duplicates
df = df.drop(columns=["categories"], axis=1)
df = pd.concat([df, cleaned_categories], sort=False, axis=1)
df... | [
"def clean_data(df):\n # Resolve categories and expand them to actual columns.\n categories_df = _resolve_categories(df['categories'])\n df = df.drop(columns=['categories'])\n df = pd.concat([df, categories_df], axis=1)\n\n # drop duplicates\n df = _drop_duplicates(df)\n return df",
"def clea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run classification test with ExponentiatedGradient. | def run_expgrad_classification(estimator, moment):
X, Y, A = fetch_adult()
expgrad = ExponentiatedGradient(
estimator,
constraints=moment)
expgrad.fit(X, Y, sensitive_features=A)
assert expgrad.n_oracle_calls_ > 1
assert len(expgrad.predictors_) > 1 | [
"def experiment(self, x_train, x_test, y_train, y_test, **kwargs):\n\n print('\\n--------------------------')\n self.plot_model_complexity(x_train, y_train, **kwargs)\n self.plot_learning_curve(x_train, y_train, **kwargs)\n self.fit(x_train, y_train)\n self.evaluate(x_test, y_test... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run classification test with ThresholdOptimizer. | def run_thresholdoptimizer_classification(estimator):
X, Y, A = fetch_adult()
to = ThresholdOptimizer(estimator=estimator, prefit=False)
to.fit(X, Y, sensitive_features=A)
results = to.predict(X, sensitive_features=A)
assert results is not None | [
"def test_override_threshold():\n vals = np.zeros((2, 2))\n vals[0, :] = 5\n vals[1, :] = 10\n # create classifier and assert threshold\n C = classifier.BinaryClassifier(vals, 7)\n assert C.threshold == 7\n assert np.all(C.data[0, :] == 5)\n assert np.all(C.data[1, :] == 10)\n # classify ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates the table header based on the number of columns. | def _Header(numCols):
return "\\begin{center}\n\\begin{tabular}{" + "|c" * numCols + "|}\n" | [
"def tbl_header():\n header = ['REGION', 'DEL/DUP', 'CNV LENGTH', 'ZSCORE', 'MEAN DEPTH', 'NUMBER OF PROBES', 'TOTAL ALLELES',\n 'POP DEL COUNT', 'POP DEL AF', 'POP DUP COUNT', 'POP DUP AF', 'GENES']\n return header",
"def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates the column header based on the list of strings that are passed in via the input strIn. | def _colHeader(strIn):
return " & ".join(strIn) + "\\\\\n" | [
"def format_medical_table_headers(self):\n med_cols = ['B', 'C', 'D', 'E']\n for col in med_cols:\n cell = f'{col}{self.title_final_row + 1}'\n self.format_cell_as_header(cell)",
"def _Header(numCols):\n return \"\\\\begin{center}\\n\\\\begin{tabular}{\" + \"|c\" * numCo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function transforms the 2D numpy array (arrIn) into latex tabular format. The "form" argument specifies the number format to be used in the tabular environment. The "rowHeader" argument is a list of strings that are used in the first column of each row in the tabular environment. The latex tabular environment is r... | def _Arr2Tab(arrIn, form="%.4E", rowHeader=None):
out = str()
if rowHeader is None:
if np.size(arrIn.shape) == 2:
numRows = arrIn.shape[0]
for k in range(numRows):
out += np.array2string(
arrIn[k, :], separator=" & "... | [
"def SimpleTable(arrIn, form=\"%.4E\", colHeader=None, rowHeader=None):\n\n if colHeader is None and rowHeader is None:\n return (\n table._Header(arrIn.shape[1])\n + \"\\\\hline\\n\"\n + table._Arr2Tab(arrIn, form=form)\n + \"\\n\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates the footer for the latex table. | def _Footer():
return "\\end{tabular}\n\\end{center}" | [
"def print_table_footer():\n\n print('''\\\\bottomrule\n\\\\end{tabular}\n}\n\\\\end{center}\n\\\\end{table}\\n\\n''')",
"def print_latex_footer():\n print(\n \"\"\"\\\\bottomrule\n\\\\end{tabular}\n\\\\end{center}\n\\\\end{Large}\n\\\\end{document}\"\"\"\n )",
"def generate_footer_html(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates a simple latex table for the 2D numpy array arrIn. The "form" argument specifies the number format to be used in the tabular environment. The "colHeader" arugment is a list of strings that are used as the first row in the tabular environment. The "rowHeader" argument is a list of strings that are ... | def SimpleTable(arrIn, form="%.4E", colHeader=None, rowHeader=None):
if colHeader is None and rowHeader is None:
return (
table._Header(arrIn.shape[1])
+ "\\hline\n"
+ table._Arr2Tab(arrIn, form=form)
+ "\n"
+ table._Fo... | [
"def _Arr2Tab(arrIn, form=\"%.4E\", rowHeader=None):\n out = str()\n if rowHeader is None:\n if np.size(arrIn.shape) == 2:\n numRows = arrIn.shape[0]\n for k in range(numRows):\n out += np.array2string(\n arrIn[k, :], s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a source hdf5 file and a set of datasets and produce a dest hdf5 file that contains only those datasets and that has been repacked. | def convert_and_copy( src, dest, datasets, srsly=False ):
if not os.path.isfile(src):
return -1
temp = tempfile.NamedTemporaryFile()
for dset in datasets:
### copy only the relevant datasets
cmd_args = ["h5copy", "-i", src, "-o", temp.name, "-s", dset, "-d", dset, "-p"]
if a... | [
"def pack(name, f_name, img_size=(227,227),\n\t\tgreyscale=False, flatten=False, istest=False):\n\t \n\tdtype = \"Float64\" # Should be Float64\n\tdata_folder = \"DATA\"\n\thdfname = \"%s.hdf5\" % name\n\n\tf = h5py.File(\"%s/%s\" % (data_folder, hdfname), \"w\")\n\tif istest:\n\t\tX, paths = _load_testset(f_name, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Suggest a new name for an h5lmt file. | def suggest_name( src ):
date = src.split(os.sep)[-2]
basename = os.path.basename(src).split('.', 2)[0]
if basename in hpcparse.FS_MAP_REV:
return hpcparse.FS_MAP_REV[basename] + "_" + date + ".hdf5"
else:
return basename + "_" + date + ".hdf5" | [
"def get_nameSimulation(self):\n self.path.name = self.input_file.name.split(\"_ky\")[0] if \"_ky\" in self.input_file.name else self.input_file.stem\n return",
"def setRawName(*args, **kwargs):\n \n pass",
"def setH5file(self, h5filepath):\n self.h5file = os.path.expanduser(h5filepath)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Many tag related changes. add many to many relationships for added/removed tags to changes. add a composed primary key to Tag from name and is_default_language. change stickertag many to many relationship to new primary key of tag. | def upgrade():
op.drop_constraint("sticker_tag_tag_name_fkey", "sticker_tag", type_="foreignkey")
op.drop_constraint("tag_pkey", "tag")
op.create_primary_key("tag_pkey", "tag", ["name", "is_default_language"])
# Change added tags many to many relationship
op.create_table(
"change_added_tags... | [
"def bind_tags(self, tags):\n current_map = dict((x.name, x) for x in self.tags)\n currently_attached = set(x.name for x in self.tags)\n new_tags = set(tags)\n\n def lookup_tag(name):\n tag = Tag.query.filter_by(locale=self.locale,\n name=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Responsible for locking each test. | def run(self, messages):
if not self.args.lock:
return
format.print_line('~')
print('Locking tests')
print()
for test in self.assignment.test_map.values():
log.info('Locking {}'.format(test.name))
test.lock(self._hash_fn) | [
"def steal_test_lock(self, test_uuid):",
"def create_test_lock(self, test_uuid):",
"def test_multithreading():",
"def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Context manager to temporarily redirect stderr output to another source. If given, 'new_stderr' should be a filelike object. | def redirect_stderr(new_stderr=None):
if new_stderr is None:
new_stderr = cStringIO.StringIO()
old_stderr = sys.stderr
sys.stderr = new_stderr
try:
yield new_stderr
finally:
sys.stderr = old_stderr | [
"def _redirect_stderr(to_fd):\n # Flush the C-level buffer stderr\n libc.fflush(c_stderr)\n # Flush and close sys.stderr - also closes the file descriptor (fd)\n sys.stderr.close()\n # Make original_stderr_fd point to the same file as to_fd\n os.dup2(to_fd, original_stderr_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open csv's, read them, get all data, get plant names, get already analyzed genome names, return list of plant names & list of already analyzed genome names | def getInfo(filename1):
with open(filename1) as f1:
reader = csv.reader(f1) #opens csv file
data1 = [] #holds all information from rows in csv
#start for
for row in reader:
data1.append(row) #grabs the information from each row
#end for
... | [
"def read_kg_data(csv_file):\n print(f\"Started a model builder for data from: {csv_file}\")\n df = pd.read_csv(csv_file)\n df.columns = [\"h\", \"r\", \"t\"]\n entities = list(set(df[\"h\"].tolist() + df[\"t\"].tolist()))\n relations = list(set(df[\"r\"].tolist()))\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Searches NCBI Assembly database using NCBI Eutilites API, returns assembly accession number, bioproject number and assembly publication date | def getAssemblyinfo(speciesName):
#---------------Create e-search URL & send request to API-----------------------
base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
url = base_url + "esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08" % speci... | [
"def download_assemblies(self):\n n = 0\n for name, barcode in self.__barcodes.items():\n # Put the assembly barcode into an URL for database search\n url = \"http://enterobase.warwick.ac.uk/api/v2.0/%s/assemblies?barcode=%s&limit=50\" % (self.__db, barcode)\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Searches NCBI PubMed database using NCBI Eutilites API, returns article title name and pubmed ID | def getpubmedinfo(speciesName):
#---------------Create e-search URL & send request to API-----------------------
search_base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
search_url = search_base_url + "esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a5... | [
"def search_for(search, reldate=None, mindate=None, maxdate=None,\n batchsize=100, delay=2, callback_fn=None,\n start_id=0, max_ids=None):\n class ResultParser(sgmllib.SGMLParser):\n # Parse the ID's out of the XML-formatted page that PubMed\n # returns. The format of t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates dictionary of plant names with appropriate values | def appendPlantDict(plantDict, speciesName, Accession_num, bioproject_num, pubdate, title, pubmed_id):
key = speciesName #sets the dictionary key to the species name
values = [Accession_num, bioproject_num, pubdate, title, pubmed_id] #sets dictionary values to appropriate information
plantDi... | [
"def create_supervisor_to_projects_map():\n\n mode = MAX_SUPERVISOR_PROJECTS * 0.75\n\n supervisor_project_numbers = (\n np.random.triangular(\n left=1,\n mode=mode,\n right=MAX_SUPERVISOR_PROJECTS,\n size=len(supervisor_names),\n )\n .round()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes plantDict, prints dictionary to .csv file | def printFile(plantDict):
Comma = ','
Results = open("plantInfo.csv", 'a') #creates or opens existing csv file, appends data to file
#Results.write("%s%c%s%c%s%c%s%c%s%c%s\n" % ("Species Name", Comma, "Accession Number", Comma,
#"Bioproject Number", Comma, "Publication Ye... | [
"def print_customers(self, dict):\n cust_dict = dict\n for customer_id, loc in cust_dict:\n result = self.data[self.data['customer_no']==customer_id]['timestamp']\n return pd.row_to_csv('result.csv', index=False)",
"def write_data(df_dict, gps_trips_dir):\n for key, value in df... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simple helper to prepare data generators factories used to train model | def get_data_generators_factories(config):
categories = config["categories"]
indices_to_colors_map, void_color = net.data.get_colors_info(len(categories))
voc_train_config = {
"data_directory": config["voc"]["data_directory"],
"data_set_path": config["voc"]["train_set_path"],
}
h... | [
"def create_generators(args):\r\n common_args = {\r\n 'batch_size': args.batch_size,\r\n 'config': args.config,\r\n 'image_min_side': args.image_min_side,\r\n 'image_max_side': args.image_max_side,\r\n # 'preprocess_image': preprocess_image,\r\n }\r\n\r\n # create random ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Should return a render_template of event_list | def event_list():
return render_template("event_list.html", user=current_user) | [
"def event(request, index_id, event_id):\n context = {\"index_id\": index_id, \"event_id\": event_id}\n return render(request, 'event.html', context)",
"def news_and_events(request):\n return render(request, 'mysite/news_and_events.html')",
"def event_list(request, category_slug=None):\n category = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a directory item showing a particular season in a series. Seasons contain episodes, so this passes responsibility on to SeasonMenu() to construct that list. | def makeSeasonItem(season):
art = R(CRUNCHYROLL_ART)
if Dict['series'][str(season['seriesId'])]['tvdbId'] is not None:
artUrl = getSeasonThumb(Dict['series'][str(season['seriesId'])]['tvdbId'], season['seasonnum'])
#Log.Debug("arturl: %s"%artUrl)
if artUrl is not None:
art = Function(GetArt,url=artUrl)
seas... | [
"def SeasonMenu(sender,seriesId=None,season=None):\n\tdir = MediaContainer(disabledViewModes=[\"Coverflow\"], title1=sender.title1, title2=\"Series\")\n\tepList = getSeasonEpisodeListFromFeed(seriesId, season)\n\tfor episode in epList:\n\t\tdir.Append(makeEpisodeItem(episode))\n\treturn dir",
"def add_new_season(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |