query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Add average speeds to each time category We need to bin the time categories because every half houris too much, it causes colliniarity issues. To overcome this we bin each journey into 3 bigger ones. | def test(group):
median_dist = group.Distance.median()
median_time = group.loc[group['Distance'] >= median_dist].TravelTime.iloc[0]
median_speed = (median_dist) / median_time
try:
avg_speeds[group.Day.tolist()[0]][group.TimeCategory.tolist()[0]] += ... | [
"def add_speeds_to_trajectories(ds):\n lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values\n \n heading_starts, heading_ends, seg_speeds = [], [], []\n \n for i in range(len(lats)-1):\n geod = Geodesic.WGS84.Inverse(lats[i], lons[i], lats[i+1], lons[i+1])\n dtime = (times[i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare Google or RAWG URLs for asynchronous fetching. | async def get_api_urls(self):
if self.google_dev_query or self.google_games_query:
cx_broad1 = self.cx1
cx_broad2 = self.cx2
cx_broad3 = self.cx3
cx_broad4 = self.cx4
cx_broad5 = self.cx5
if self.google_dev_query or self.google_ga... | [
"def _get_all_url(cls) -> str:",
"def process_batch(self, urls, extra_headers=None):\n\n # cull out ones we've got\n n_before = len(urls)\n urls = [url for url in urls if not self.store.already_got(url)]\n logging.info(\"processing %d urls (%d are new)\", n_before, len(urls))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare RAWG URLs for fetching up to max page of results. | async def get_api_urls_w_max_page(self):
max_pages = await self.http_response_async()
dev_slugs = await self.database_fetches()
dev_slug_list = []
for i in dev_slugs:
i = [i]
dev_slug_list.append(i)
max_pages_list_init = [range(x) for x in max_pages]
... | [
"def _prepare_url(self, paging=False):\n # If there is no min_tag_id, then this is likely the first poll and\n # we need to initialize the min_tag_id.\n if self.min_tag_id is None:\n self._initialize_min_tag_id()\n if not paging:\n # New query so save off the new mi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch RAWG results for developer games, popular games, and upcoming games | def rawg_fetch(key, rawg_dev=False, rawg_pop=False, rawg_upcm=False):
try:
if rawg_dev:
rawg_db_query_results = asyncio.run(
AsyncFetchApi(rawg_key=key, fetch_dev_games=True, max_page=True).http_response_async_max_page())
elif rawg_pop:
rawg_db_query_results =... | [
"def get_games(url, includetwitter=False, rest=5):\n\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n page = requests.get(url, headers=headers)\n time.sleep(rest)\n\n games = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send a command and return a array for each line | def cmd(self, cmd):
str_out = self.__get_stdout(cmd)
return [x.strip() for x in str_out.split('\n')] | [
"def send_command(session, cmd, host=''):\n\n logger.debug('Executing Command on %s: %s', host, cmd)\n results = session.send_command(cmd)\n return results.split('\\n')",
"def GetCommandOutput(command):\r\n\r\n f = os.popen(command, 'r')\r\n lines = [line.strip() for line in f.readlines()]\r\n f.close... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send a message to email with generated code. | def send_message(email, generated_code):
mail = EmailMessage(
'Confirm your email',
generated_code,
settings.EMAIL_HOST_USER,
[email, ]
)
try:
mail.send()
result = f'message was sended to {email} with confirmation code.'
return result
except SMTPE... | [
"def SendMailVerificationCode(send_to):\n sent_from = settings.EMAIL_USER\n to = [send_to]\n subject = 'Verification code [Accommodating]'\n length = 6\n verify_sample = random.sample(init_chars, length)\n verification_code = ''.join(verify_sample)\n body = f\"Here is your verification code!\"\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dummy decorator for code | def dummy(func):
def dummy_wrap(self, *args, **kwargs):
""" Decorates to a dummy function """
print("Calling dummy for %s" % func.__str__())
func(self, *args, **kwargs)
return dummy_wrap | [
"def dummy_wrap(self, *args, **kwargs):\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)",
"def dummy_function(*args, **kwargs):\n return",
"def test_decorated_nothing() -> None:\n print(\"This is a test that's decorated but the decorator does nothing.\")",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorates to a dummy function | def dummy_wrap(self, *args, **kwargs):
print("Calling dummy for %s" % func.__str__())
func(self, *args, **kwargs) | [
"def dummy(func):\n\n def dummy_wrap(self, *args, **kwargs):\n \"\"\" Decorates to a dummy function \"\"\"\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)\n return dummy_wrap",
"def dummy_function(*args, **kwargs):\n return",
"def dont_decorate(func... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to normalize vectors of the matrix Y with respect to X so that Y.T @ X = I (identity). This is used to normalize the matrix with the left eigenvectors. | def normalize(X, Y):
Yn = np.zeros_like(X)
YTX = Y.T @ X # normalize y so that Y.T @ X will return I
factors = [1/a for a in np.diag(YTX)]
# multiply each column in y by a factor in 'factors'
for col in enumerate(Y.T):
Yn[col[0]] = col[1]*factors[col[0]]
Yn = Yn.T
return Yn | [
"def _normalize_rows(self, Y):\n return Y / la.norm(Y, axis=1)[:, np.newaxis]",
"def normalize_X(self,X):\r\n X_n = X.copy()\r\n for i in range(X_n.shape[1]):\r\n X_n[:, i] = (X_n[:, i] - self.lower_bound[i]) / (self.upper_bound[i] - self.lower_bound[i])\r\n return X_n",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will return the natural frequencies (w), eigenvectors (P), mode shapes (S) abd the modal transformation matrix S^1(takes x > r(modal coordinates) for an undamped system. | def modes_system_undamped(M, K):
L = la.cholesky(M)
Linv = la.inv(L)
lam, P = eigen(Linv @ K @ Linv.T)
w = np.real(np.sqrt(lam))
S = Linv @ P
Sinv = P.T @ Linv
return w, P, S, Sinv | [
"def modes_system(M, K, C=None):\n\n n = len(M)\n\n Z = np.zeros((n, n))\n I = np.eye(n)\n Minv = la.inv(M)\n\n if (C is None or np.all(C == 0) or # check if C has only zero entries\n la.norm(Minv @ C @ K - Minv @ K @ C, 2) <\n 1e-8*la.norm(Minv @ K @ C, 2)):\n w, P, S, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Natural frequencies, damping ratios, and mode shapes of MDOF system. This function will return the natural frequencies (wn), the damped natural frequencies (wd), the damping ratios (zeta), the right eigenvectors (X) and the left eigenvectors (Y) for a system defined by M, K and C. If the dampind matrix 'C' is none or i... | def modes_system(M, K, C=None):
n = len(M)
Z = np.zeros((n, n))
I = np.eye(n)
Minv = la.inv(M)
if (C is None or np.all(C == 0) or # check if C has only zero entries
la.norm(Minv @ C @ K - Minv @ K @ C, 2) <
1e-8*la.norm(Minv @ K @ C, 2)):
w, P, S, Sinv = modes_sys... | [
"def mck2modal(*args):\r\n\r\n if len(args) == 2: # Undamped case\r\n # Solve the undamped case for eigenfrequencies and mode shapes\r\n M = args[0]\r\n K = args[1]\r\n [V, D] = linalg.eig(linalg.solve(M,K))\r\n [D, I] = np.sort(np.diag(D)) # Sort eigenvalues/frequencie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function calculates the time response for an undamped system and returns the vector (statespace) X. The n first rows contain the displacement (x) and the n last rows contain velocity (v) for each coordinate. Each column is related to a timestep. The time array is also returned. | def response_system_undamped(M, K, x0, v0, max_time):
t = np.linspace(0, max_time, int(250 * max_time))
dt = t[1] - t[0]
n = len(M)
Z = np.zeros((n, n))
I = np.eye(n, n)
# creates the state space matrix
A = np.vstack([np.hstack([Z, I]),
np.hstack([-la.pin... | [
"def __call__(self,X,t):\n xvals = X[:3]-self.locs\n rvals = numpy.sqrt( (xvals**2).sum(1) )\n \n dVdt = sum([ self.halos[i].accel(rvals[i])*xvals[i]/rvals[i] \\\n for i in range(self.N) ])\n return numpy.concatenate([X[3:] * 1E3 * yr/kpc,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function solves the system given the initial displacement vector 'X0', initial velocity vector 'V0', the mass matrix 'M', the stiffness matrix 'M', and the damping matrix 'C' and force 'F'. T is a row vector of evenly spaced times. F is a matrix of forces over time, each column corresponding to the corresponding c... | def response_system(M, C, K, F, x0, v0, t):
n = len(M)
Z = np.zeros((n, n))
I = np.eye(n)
# creates the state space matrix
A = np.vstack([np.hstack([Z, I]),
np.hstack([-la.pinv(M) @ K, -la.pinv(M) @ C])])
B = np.vstack([Z,
la.inv(M)])
C ... | [
"def solve(self, model, t):\n\n print t\n\n # Compute applied loads, this should be independent of deformation\n load, load_squared = model.domain.compute_load_vector(t)\n iteration = 0\n while True:\n if iteration > self.miter:\n print \"Max iterations a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Belong to bugfix pull requests. | def _is_bugfix(self, pr: Dict) -> bool:
return any(
[label[self.key_name] == self.label_bug for label in pr[self.key_labels]]
) | [
"def link_issue_to_pr(issue, pr):\n if issue < app.config['JIRA_WATERMARK']:\n logging.info(\"Skipping linking issue {i} since before watermark.\")\n return\n if not app.config['UPDATE_ISSUES']:\n logging.info(\"Not linking issues due to system wide setting.\")\n return\n\n jira... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build_gru_mut001_step returns a function, named gru_mut001_step, that executes (1) GRU MUT1 step gru_mut001_step = gru_mut001_step(X_t, h_tm1) | def build_gru_mut001_step(self):
def gru_mut001_step(X_t, h_tm1, *args_for_params):
z_t = self._gates.z.connect_through(X_t)
r_t = self._gates.r.connect_through(X_t, h_tm1)
# h_t = self._gates.h.connect_through( r_t * h_tm1)
h_t = self._gates.h.connect_through( r_t)
h_t = h_t + self.psis.h[0]( s... | [
"def init_gru(rnn):\n\n def _concat_init(tensor, init_funcs):\n (length, fan_out) = tensor.shape\n fan_in = length // len(init_funcs)\n\n for (i, init_func) in enumerate(init_funcs):\n init_func(tensor[i * fan_in: (i + 1) * fan_in, :])\n\n def _inner_uniform(tensor):\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__get_state__ return the parameters or "weights" that were used in this feedforward | def __get_state__(self):
## unroll all the parameters
gates = self._gates
Thetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']]
params = [weight for gate in gates for weight in gate.__get_state__()['params']]
print "Total number of parameters: %d " % len(params)
return dict(... | [
"def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n bvis_list.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build_J_L2norm build or make cost functional, of the form of the L2 norm (i.e. Euclidean distance norm) | def build_J_L2norm(self, lambda_val, y_sym=None):
if y_sym is not None:
self.y = y_sym
else:
y_sym = self.y
Thetas_only = self.GRU_model.__get_state__()['Thetas']
lambda_val = np.cast[theano.config.floatX]( lambda_val ) # regularization constant
J = build_cost_functional_L2norm( lambda_val,
... | [
"def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas):\n#\tm = y_sym.shape[0].astype(theano.config.floatX)\n\n\tJ_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym ))\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prediction_fxn_given same as prediction_fxn, result in a theano function, but using givens | def prediction_fxn_givens(self,X_vals):
X=self.X
y_predicted = sandbox.cuda.basic_ops.gpu_from_host(
self.scan_res[0][-1]
)
predictions = theano.function([],
outputs = y_predicted,
givens = { X : X_vals.astype(theano.config.floatX) })
return predictions | [
"def gradient_boosting_predict(X, f0, models, nu):\n f_before=f0\n X=torch.tensor(X).float()\n ### BEGIN SOLUTION\n for model in models:\n model.eval()\n Tm=model(X)\n Tm=Tm.squeeze()\n Tm=Tm.detach().numpy()\n f_new=f_before+nu*Tm\n f_before=f_new\n\n y_hat=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
predict_on_lst_givens same as predict_on_lst, but using givens for theano function through prediction_fxn_givens | def predict_on_lst_givens(self, test_data, verbose=False):
predictions = []
for i,o in test_data: # test_data is a list from j=0,1,...m-1, m total training data points
i = i.astype(theano.config.floatX) # Txd or (T,d) size dims. matrix
predictions_func = self.prediction_fxn_givens(i)
predicted_y = pr... | [
"def gradient_boosting_predict(X, f0, models, nu):\n f_before=f0\n X=torch.tensor(X).float()\n ### BEGIN SOLUTION\n for model in models:\n model.eval()\n Tm=model(X)\n Tm=Tm.squeeze()\n Tm=Tm.detach().numpy()\n f_new=f_before+nu*Tm\n f_before=f_new\n\n y_hat=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
save_parameters (save the weights or parameters of this model as numpy arrays that are pickled) | def save_parameters(self, filename='objects.save'):
f = open(filename,'wb')
for param in self.GRU_model.__get_state__()['params']:
cPickle.dump( param.get_value(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close() | [
"def save_params(self, model):\n torch.save(model.state_dict(), self.params_file)",
"def save(self):\n LOGGER.info('saving parameters: {} ...'.format(self._param_file))\n np.save(self._param_file, self._parameters, allow_pickle=True, fix_imports=True)",
"def dump_model(self, save_path):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build_cost_functional (with regularization) J=J_y(Theta,b) J\equiv J_y(\Theta,b), but now with X,y being represented as theano symbolic variables first, before the actual numerical data values are given INPUT/PARAMETERS ================ | def build_cost_functional(lambda_val, h, y_sym, Thetas):
m = y_sym.shape[0].astype(theano.config.floatX)
# logistic regression cost function J, with no regularization (yet)
J_theta = T.sum( T.nnet.categorical_crossentropy( h, y_sym ) )
reg_term = np.float32(lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) fo... | [
"def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas):\n#\tm = y_sym.shape[0].astype(theano.config.floatX)\n\n\tJ_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym ))\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build_cost_functional_L2norm (with regularization) J=J_y(Theta,b) J\equiv J_y(\Theta,b), for the L2 norm, or Euclidean space norm, but now with X,y being represented as theano symbolic variables first, before the actual numerical data values are given INPUT/PARAMETERS ================ | def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas):
# m = y_sym.shape[0].astype(theano.config.floatX)
J_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym ))
# reg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )
# reg_term = n... | [
"def build_J_L2norm(self, lambda_val, y_sym=None):\n\t\tif y_sym is not None:\n\t\t\tself.y = y_sym\n\t\telse:\n\t\t\ty_sym = self.y\n\t\t\n\t\tThetas_only = self.GRU_model.__get_state__()['Thetas']\n\t\t\n\t\tlambda_val = np.cast[theano.config.floatX]( lambda_val ) # regularization constant\n\t\tJ = build_cost_fu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build_gradDescent_step gradient Descent (with momentum), but from build_cost_functional for the J | def build_gradDescent_step( J, Thetabs, X_sym,y_sym, alpha =0.01, beta = 0.0):
updateThetabs = [ sandbox.cuda.basic_ops.gpu_from_host(
Theta - np.float32( alpha) * T.grad( J, Theta) + np.float32(beta)*Theta ) for Theta in Thetabs]
gradientDescent_step = theano.function(inputs = [X_sym, y_sym],
out... | [
"def nngradientDescent(X, y, nn_params, input_layer_size, hidden_layer_size, num_labels, alpha, lbd, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd)\n nn_params... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Launch the playbook execution | def launch(self):
endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.playbook)
response = self.rest_client.http_post(endpoint, self.params)
if response:
self.play_uuid = json.loads(response.text)["data"]["play_uuid"]
self.log.info("Playbook execution launched succesfuly")
... | [
"def _execute_playbook(playbook_name):\n install_dir = os.path.dirname(os.path.dirname(sys.executable))\n share_dir = os.path.join(install_dir, 'share', 'dws')\n playbook_path = os.path.join(share_dir, 'playbooks', playbook_name)\n if not os.path.exists(playbook_path):\n # When running directly f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the status of the execution | def get_status(self):
status_value = ExecutionStatusCode.NOT_LAUNCHED
if self.play_uuid == '-': # Initialized
status_value = ExecutionStatusCode.NOT_LAUNCHED
elif self.play_uuid == '': # Error launching playbook
status_value = ExecutionStatusCode.ERROR
else:
... | [
"def get_execution_status(self):\n return {\n u'timestamp': self.last_execution,\n u'error': self.error_occured,\n }",
"def status(self, result, config=None):\r\n return result['status']",
"def status(self) -> 'outputs.UpdateRunStatusResponse':\n return pulumi.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the data of the events filtered by a task pattern and a event filter | def get_result(self, event_filter=""):
if not self.result_task_pattern or not self.play_uuid:
result_events = {}
response = self.rest_client.http_get(PLAYBOOK_EVENTS % self.play_uuid)
if not response:
result_events = {}
else:
events = json.loads(res... | [
"def filter_task(self, task, feature, date_ranges):\n all_source_times = list()\n if self.filter_product is not None and self.filter_product != {}:\n for sr in task.sources:\n v = sr.data\n all_source_times = (all_source_times +\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a mail to the mailing list Validate the mail and add it | def add_mail_address(self, new_mail: str) -> None:
if new_mail.find("@") == -1 \
or 1 < len(new_mail) < 100:
exit("Bad mail address given")
self._destination_mail.append(new_mail) | [
"def add(self, email_address, expecting_reload=True):\n self.click('Add an email address')\n self.harness.css('input', self.element).fill(email_address)\n if expecting_reload:\n with self.harness.page_reload_afterwards():\n self.click('Send verification')\n else... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Smooth data using a lowpass frequency filter. Applies a lowpass Butterworth filter to `da.data` based on the sampling rate defined by `coord`. | def lowpass(
da: sc.DataArray, *, dim: str, N: int, Wn: sc.Variable, coord: Optional[str] = None
) -> sc.DataArray:
da = _ensure_no_variances(da)
coord = dim if coord is None else coord
if da.coords[coord].sizes[dim] == da.sizes[dim] + 1:
da = da.copy(deep=False)
da.coords[coord] = sc.m... | [
"def lowpass(a, cutoff, order, config):\n B, A = signal.butter(order, cutoff / (config[\"sample_rate\"] / 2), btype=\"lowpass\")\n return signal.lfilter(B, A, a, axis=0)",
"def applyLowPass(x, fs, fc=30, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc)\n return scipy.signal.filtfilt(b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the indicies of the board that are valid moves. | def get_valid_moves(self):
return [i for i in range(9) if self.is_valid_move(i)] | [
"def valid_moves(self):\n # get column indices where there is still room to add chips.\n cols = (self.col_heights < BOARD_H).nonzero()[0]\n return [self.next_pos_in_col(i) for i in cols]",
"def generate_legal_moves(self) -> list:\n board = self.board_list\n index = 0\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies a move to the game board and updates the turn. | def apply_move(self, move):
if self.is_valid_move(move):
self.board[move] = self.turn
self.turn = 'X' if self.turn == 'O' else 'O' | [
"def applyMove(board,gameState, move, player = \"player\"):\n pass",
"def _update(self, moves, amount):\n for move in moves:\n self.board[move[0]][move[1]] += amount",
"def update_move(self, game_data, move_index, move):\n current_data = self.get_data_at_move(game_data, move_index)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the passed in board is the same as the current board. | def is_same(self, board):
return board == self.board | [
"def same_board_array(self, other):\n return (is_class_instance(other, 'ConnectFourBoard')\n and (self.board_array == other.board_array))",
"def current_board_is_visited(current_board, visited):\r\n for board in visited:\r\n if current_board.equals(board):\r\n return Tru... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use factory.faker to generate a random file name which includes an uppercase character. | def random_filename():
filegen = faker.Faker()
return filegen.file_name().title() | [
"def _make_random_filename(base_dir='',suffix='',num_chars=20):\n all = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n rand_region = ''.join([choice(all) for i in range(num_chars)])\n return path.join(base_dir,rand_region+suffix)",
"def create_file_name():\n # This generates a name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Train wine model for testing other functions | def train_wine_model():
from keras.optimizers import SGD,Adam,Adagrad,RMSprop
from keras import losses
dataman = Datamanager.Datamanager(dataset="wine")
sgd= SGD(lr=0.01)
rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=No... | [
"def test_train(self):\n trace.train(10)",
"def test_wine():\n test_path = tempfile.mkdtemp()\n x_train, metadata = wine(test_path)\n try:\n assert x_train.shape == (21, 5)\n except:\n shutil.rmtree(test_path)\n raise()",
"def train(self) -> None:\r\n\r\n self.training = True",
"def do_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DefaultDataTypes() > Dictionary[SecurityType, List[TickType]] Hard code the set of default available data feeds | def DefaultDataTypes():
pass | [
"def addDefaults(cls):\n dic = cls.getAll()\n dic.update(cls.DEFAULT_HELIXTYPES)\n pymol.plugins.pref_set('BETAFAB_HELIXTYPES', dic)\n pymol.plugins.pref_save(quiet=True)",
"def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]",
"def get_data_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load an image from disk using the requested backend. | def load_image(filepath: Path | str, *, backend: ImageLoadingBackend = "opencv") -> RawImage:
if backend == "opencv":
if isinstance(filepath, Path):
# cv2 can only read string filepaths
filepath = str(filepath)
image = cv2.imread(filepath) # type: ignore
if image is ... | [
"def load() -> Image:\r\n image = load_image(choose_file())\r\n show(image)\r\n return image",
"def load(image_path, access='random'):\n\n return pyvips.Image.new_from_file(image_path, access=access)",
"def load_image(file):\n return Image.open(os.path.abspath(file))",
"def img_read(img_path):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Infer which imageloading backend to use based on the type of the imagetransform. | def infer_il_backend(transform: ImageTform | None) -> ImageLoadingBackend:
# Default to openccv is transform is None as numpy arrays are generally
# more tractable
if transform is None or isinstance(transform, get_args(AlbumentationsTform)):
return "opencv"
return "pillow" | [
"def _autodetect_backend(storage_path):\n if storage_path == '::inmem::':\n return 'inmem'\n elif storage_path.endswith('.npz'):\n return 'npz'\n elif storage_path.endswith(('.h5', '.hdf5')):\n return 'hdf5'\n if storage_path.endswith('.mat'):\n return 'mat'\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Infer which audioloading backend to use based on the operating system. | def infer_al_backend() -> AudioLoadingBackend:
soundfile: Final = "soundfile"
sox: Final = "sox_io"
return soundfile if platform.system() == "Windows" else sox | [
"def _autodetect_backend(storage_path):\n if storage_path == '::inmem::':\n return 'inmem'\n elif storage_path.endswith('.npz'):\n return 'npz'\n elif storage_path.endswith(('.h5', '.hdf5')):\n return 'hdf5'\n if storage_path.endswith('.mat'):\n return 'mat'\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an artist to this library. | def add_artist(self, artist):
self.artists[artist.name] = artist | [
"def add(self, data):\n\n if not self.validate(data):\n raise Exception(\"itunes data isn't valid. Make sure \"\n \"it's not missing important data, and \"\n \"isn't duplicated\")\n\n return self.engine.add(JoinSongArtist(**data.dict()))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a track to this library. | def add_track(self, track):
self.tracks[track.id] = track
self._add_genre(track.genre) | [
"def add_track(self):\n self.tracks.append(Track(self))",
"def add_track(self, track):\n self.tracks.add(track.id)\n self.artists.update(track.artists)",
"def addTrack(self, trackId):\n self.sonus.playlist_add_id(trackId)",
"def add_track(self, slack_event):\n track_id = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the genre to the library | def _add_genre(self, genre):
self.genres.add(genre) | [
"def add_genre(self, genre):\n self.genres.add(genre)",
"def add_genres(self, dict_genre):\r\n raise NotImplementedError",
"def add(self, path: str, genre=None):\n song = MusicFile(path)\n song.load()\n if not genre or genre == song.genre:\n self.collection.append(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge duplicate tracks into one and remove extraneous. Preference will be given to merge the duplicate track info onto the album with the most tracks, then the most recent. Updated track will have sum of play counts and average of ratings. If any of the duplicates are tagged loved, the merged will retain that. | def remove_duplicates(self):
# { track_identifier : [track_id] }
identifier_to_index = {}
# { track_identifier }
duplicate_identifiers = set()
# { track_identifier : (track_id, plays, rating, loved) }
# the track we'll merge onto, and the merged plays/rating/loved
... | [
"def dedupe(self, spatial_iou_threshold=0.8, dt=5, tracks=True, activities=True, temporal_iou_threshold=0.8, verbose=True):\n if tracks:\n deleted = set([])\n for tj in sorted(self.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest\n for (s, ti) in s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Associate a genre with this artist. | def add_genre(self, genre):
self.genres.add(genre) | [
"def _add_genre(self, genre):\n self.genres.add(genre)",
"def add_genres(self, dict_genre):\r\n raise NotImplementedError",
"def set_genre(self, genre=UNKNOWN_GENRE):\n self.genre = genre",
"def add_artist(self, artist):\n self.artists[artist.name] = artist",
"def genre_list(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a track to the music album, updating album artists as necessary. | def add_track(self, track):
self.tracks.add(track.id)
self.artists.update(track.artists) | [
"def addalbum(self, album):\n self.albums.append(album)",
"def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a base music track. Sets the id, name, artists, rating as given. If there are multiple or featured artists they will be combined in a set. Defaults plays to 0 and genre to UNKNOWN_GENRE. | def __init__(self, id, name, artists, rating):
self.rating = RATING_MAPPING[int(rating)]
self.plays = 0
feat_artists = FEAT_GROUP_PATTERN.match(name)
artists = re.split(MULT_ARTIST_PATTERN, artists)
main_artist = artists[0]
artists = set(artists)
if feat_artist... | [
"def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n title='Cool Song',\n artist='Pop Star',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the track year. | def set_year(self, year):
self.year = int(year) if year else None | [
"def year(self, new_year_value):\n if new_year_value < 0:\n raise ActivityValidatorError(\"Year cannot be negative! (or could it!? xD)\\n\")\n self.__date[\"year\"] = new_year_value",
"def IncYear(self):\n self.year = self.year + 1\n self.set_day = None",
"def mod_year(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets whether the track is 'loved' on iTunes. | def set_loved(self, is_loved):
self.loved = is_loved | [
"def set_spotlight_on(self):\n return self._set_spotlight_properties({\"enabled\": True})",
"def set_light_detection_on(self) -> bool:\n return self.set_light_detection(True)",
"def lamp_on(self, lamp_on):\n\n self._lamp_on = lamp_on",
"def on(self):\n self.transite_light_state(on_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the track genre. | def set_genre(self, genre=UNKNOWN_GENRE):
self.genre = genre | [
"def add_genre(self, genre):\n self.genres.add(genre)",
"def _add_genre(self, genre):\n self.genres.add(genre)",
"def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the track rating. | def set_rating(self, rating=0):
self.rating = rating | [
"def rating(self, rating: float):\n\n self._rating = rating",
"def set(self, state, rating, oldrating, initiator):\n rating_obj = state.obj()\n artwork = rating_obj.artwork\n if artwork:\n artwork.rating_sum = artwork.rating_sum - oldrating + rating\n recalc_wilso... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the track play count. | def set_plays(self, plays=0):
self.plays = int(plays) | [
"def presetCount(self, count_preset):\r\n previous_count = self.__count\r\n self.__count = count_preset\r\n return previous_count",
"def set_trigger_count(self, count):\n self.count = count",
"def countNumSuitsPerTrack(self, count):\n for suit in self.suitList:\n if cou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the album id. | def set_album_id(self, album_id):
self.album_id = album_id | [
"def album_uri(self, uri):\r\n self.data['album_uri'] = uri",
"def set_id(self, a_id):\n raise QiitaAnalysisError(\"The id of an object can't be changed\")",
"def album(self, album_id, market=None):\n\n trid = self._get_id(\"album\", album_id)\n if market is not None:\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves a track identifier in the form of its name and artists. Intended to be used for identifying duplicate tracks within the same album. | def get_track_identifier(self):
return (self.name, ','.join(self.artists)) | [
"def get_track_id(client, artist, title, album=None, cover_of=None):\n results = _get_track_search_results(client, artist, title)\n if not results and cover_of is not None:\n results = _get_track_search_results(client, cover_of, title)\n if not results:\n return None\n results = sorted(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a verbose string representation. | def print_verbose(self):
rstr = 'Track ID:\t{id}\n'.format(id=self.id)
rstr += 'Name:\t\t{name}\n'.format(name=self.name)
rstr += 'Artists:\t\t{artist}\n'.format(artist=','.join(self.artists))
rstr += 'Genre:\t\t{genre}\n'.format(genre=self.genre)
rstr += 'Rating:\t\t{rating}\n'.... | [
"def verbose(msg):\n message(msg, flag='v')",
"def verbose_value(self, value):\n\t\treturn self.render_value(value)",
"def verbose(self):\n \n self.options[\"verbose\"] = True",
"def verbose_print(msg):\n if VERBOSE:\n print(msg)",
"def verbose(message: str) -> None:\n if not V... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate qestimators for tshift == 0 where the shot noise term is considered if arr0 == arr1. | def _sub_qestimators(self, arr0, arr1, rbf, pair):
qestimator = np.zeros(self._binfactors.size)
qestimator_stderr = np.zeros(self._binfactors.size)
for i, binf in enumerate(self._binfactors):
new_sgl = self._segmentlength // rbf // binf
n_segments = arr0.size // new_sgl
... | [
"def _sub_qestimators_withtshift(self, arr0, arr1, rbf, tshift):\n # time = np.zeros(self._binfactors.size)\n qestimator = np.zeros(self._binfactors.size)\n qestimator_stderr = np.zeros(self._binfactors.size)\n # index = 0\n for i, binf in enumerate(self._binfactors):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate qestimators for tshift >= 1 where no shot noise term exist. | def _sub_qestimators_withtshift(self, arr0, arr1, rbf, tshift):
# time = np.zeros(self._binfactors.size)
qestimator = np.zeros(self._binfactors.size)
qestimator_stderr = np.zeros(self._binfactors.size)
# index = 0
for i, binf in enumerate(self._binfactors):
# time[i] ... | [
"def run_exp(self, steps, n_mc, q, seed_tot, t_saved=None):\n random_state = np.random.RandomState(seed_tot)\n seeds = random_state.randint(1, 312414, n_mc) # seed for all experiment on\n\n if t_saved is None:\n t_saved = [i for i in range(steps)] # if t_saved is not given the enti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a putfiles request on each of these files using the python bindings. | def _put_files(cls, filename_key_list, staging_bucket, timeout_seconds=1200):
logger = infra.get_logger(Upload)
bundle_uuid = str(uuid.uuid4())
files = []
for filename, file_uuid, key in filename_key_list:
logger.info("%s", "File {}: registering...".format(filename))
... | [
"def list_files() -> dict:\n endpoint_url = '/real-time-response/entities/put-files/v1'\n response = http_request('GET', endpoint_url)\n return response",
"def _UploadFiles(upload_dir, files):\n if files:\n google_storage_upload_dir = os.path.join(_RENDER_TEST_BUCKET, upload_dir)\n cmd = [os.path.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A utility function to hide/delete a Story object. | def hide_story(story_id):
story = Stories.query.filter_by(id=story_id).one()
old_json_value = {"is_visible": story.is_visible}
story.is_visible = False
new_json_value = {"is_visible": False}
update_object(new_json_value, Stories, story.id)
delete_doc(story.id)
# We should keep the same c... | [
"def test_api_v3_stories_story_public_id_delete(self):\n pass",
"def delete_story(request, story_id):\n if not request.user.is_staff:\n messages.error(request, 'Sorry, only the author can do that!')\n return redirect(reverse('index'))\n\n story = get_object_or_404(Story, pk=story_id)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A utility function to edit a Story object and convert parameters to the correct data types. After the Story object is edited, it will be added and committed to the database | def update_story(story_id,
activist_first,
activist_last,
activist_start,
activist_end,
tags,
content,
activist_url,
image_url,
video_url,
user_guid,
... | [
"def edit_exo(exoId, newName, chaps, duration, txts, qTF, qH, qFB, tags):\n\n #on récupère l'exercice correspondant\n exo = db.session.query(MetalExercise).filter(MetalExercise.id==exoId).first()\n if newName:\n exo.name = newName\n if tags:\n exo.tags = tags\n if duration:\n exo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the usi of this Spectrum. Universal Spectrum Identifier | def usi(self):
return self._usi | [
"def chip_sku(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"chip_sku\")",
"def waveunit(self):\n return u.Unit(self.meta.get('waveunit', \"Angstrom\"))",
"def getAnalogUnit(self,num):\n listidx = self.An.index(num) # Get the position of the channel number.\n return self.uu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the usi of this Spectrum. Universal Spectrum Identifier | def usi(self, usi):
self._usi = usi | [
"def set_from_SI(self, val):\n if is_none(self.unit):\n self.value = val\n return\n self.value = self.unit.inverse(val)",
"def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationIma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the accession of this Spectrum. Local identifier specific to the provider | def accession(self):
return self._accession | [
"def get(self):\r\n\r\n return self.LocalPatientID",
"def get_external_id(self):\n pass",
"def accession_id(self) -> int:\n return self.calcbench_id",
"def identity_provider(self):\n return self._identity_provider",
"def identity(self):\n return self.data['identity']",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the accession of this Spectrum. Local identifier specific to the provider | def accession(self, accession):
self._accession = accession | [
"def setAccessMode(self, mode): \n self.__accessMode = mode",
"def set_alias(self, alias):\n self.send_command(api.set_alias, alias='IPCAM')",
"def set_access_code(self, *args, **kwargs):\n return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the mzs of this Spectrum. Array of m/z values | def mzs(self):
return self._mzs | [
"def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs",
"def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[0])), dtype=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the mzs of this Spectrum. Array of m/z values | def mzs(self, mzs):
self._mzs = mzs | [
"def mzs(self):\n return self._mzs",
"def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs",
"def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coord... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the intensities of this Spectrum. Array of intensity values corresponding to mzs | def intensities(self, intensities):
self._intensities = intensities | [
"def mzs(self, mzs):\n\n self._mzs = mzs",
"def get_intensities(self):\n\n return [line.intensity for line in self.atlas_lines]",
"def test_normalize_intensities():\n mz = numpy.array([10, 20, 30, 40], dtype='float')\n intensities = numpy.array([0, 1, 10, 100], dtype='float')\n spectrum_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the interpretations of this Spectrum. Array of coded interpretation strings of the peaks, corresponding to mzs | def interpretations(self):
return self._interpretations | [
"def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs",
"def mzs(self):\n return self._mzs",
"def extractPeaks(sparky_file):\n\n f = open(sparky_file,'r')\n sparky = f.readlines()\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the interpretations of this Spectrum. Array of coded interpretation strings of the peaks, corresponding to mzs | def interpretations(self, interpretations):
self._interpretations = interpretations | [
"def _set_spectral_arrays(self):\n self.spectral_arrays = [ 'FLUX', 'IVAR', 'MASK' ]\n if self.mode == 'RSS' or (self.mode == 'CUBE' and 'LSFPOST' in self.ext):\n self.spectral_arrays += [ 'LSFPOST' ]\n if self.mode == 'RSS' or (self.mode == 'CUBE' and 'LSFPRE' in self.ext):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solves for the Hammiltonian given certain perameters. N is the number of electrons, m is the mass, L is the length, and omega is the phonon frequency | def Hamiltonian(N,m,L,omega):
N=N
omega1=omega
c = np.zeros((L,N))
b = np.zeros((L,))
for i in range(L):
for j in range(N):
c[i,j] = np.sqrt(2.0/(L+1)) * np.sin((j+1)*np.pi*(i+1)/(L+1))
b[i] = np.sqrt(1.0/(2.0*omega1)) * (np.sqrt(omega1) * i + 1j)
K = np.zeros((N*L, N... | [
"def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Begins indefinite crawling, sequentially calling each crawler, then sleeps the pause time before starting over. | def run(self):
while True:
for crawler in self.crawlers:
crawler.crawl()
print 'Sleeping for %s seconds' % self.crawl_wait
sleep(self.crawl_wait) | [
"def crawl(self):\r\n #beging analyzer and controller thread(actually called their run())\r\n self.__analyzer.start()\r\n self.__controller.start()\r\n #block until controller thread terminate\r\n self.__controller.join(3600)\r\n self.__analyzer.setStopCondition(True)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a crawler to the fleet. | def add_crawler(self, crawler):
self.crawlers.append(crawler) | [
"def RUN_CRAWLER(crawler_):\n crawler_.crawl()",
"def addCrawlers(self, crawlers, addTaskHolderVars=True):\n for crawler, filePath in self.query(crawlers).items():\n\n if addTaskHolderVars:\n # cloning crawler so we can modify it safely\n crawler = crawler.clone(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A dictionary to map required slots to an extracted entity | def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
return {
"destination": self.from_entity(entity="destination", intent="inform"),
"origin": self.from_entity(entity="origin", intent="inform"),
"depart_date": self.from_entity(entity="depart_date", intent="inform... | [
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"dist\": [self.from_entity(entity = \"dist\", intent = \"dist_entry\")],\n }",
"def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"date_time\", \"phone_number\", \"person_number\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Puts products into the database | def products_to_database(self, products_lists):
cursor = DatabaseManager.connection_to_database(self)
for category_products_list in products_lists:
for products_dicts in category_products_list:
cursor.execute("INSERT INTO product (name, nom_category, ingredients, shops, "
... | [
"def insert_product(self, table):\n for i in self.products:\n # extract data\n name = i[\"name\"]\n quantity = i[\"quantity\"]\n brand = i[\"brand\"]\n description = i[\"description\"]\n url = i[\"url\"]\n rating = i[\"rating\"]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Showing categories from DB to console | def categories_show(self):
cursor = DatabaseManager.connection_to_database(self)
cursor.execute("SELECT * FROM category")
my_results = cursor.fetchall()
i = 1
cat_list = []
for cat_tuples in my_results:
for cat_str in cat_tuples:
cat_list2 ... | [
"def display_categories(self):\n self.get()",
"def _show_categories(self):\n for (key, val) in self.categories:\n separator = key % 5 == 0 and \"\\n\" or ' ' * (15 - len(val) * 2)\n print ('%02s: %s%s' % (key, val, separator)).encode('utf-8'),",
"def showCatalog():\n state... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The user selects the number of the category and this returns the name of the category | def category_name_chosen(self, category_number):
category_list = DatabaseManager.category_from_database(self)
category_position = category_number-1
category_name = category_list[category_position]
return category_name | [
"def category_choice(self):\n while True:\n print(\"============================================== \\n\"\n \"Voici la liste des catégories :\\n\"\n \"============================================== \\n\"\n \"Entrez -1 si vous désirez retourner au m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get maximum number of products in list | def get_number_products(self, products_whole_list):
for product in products_whole_list[-1]:
return product | [
"def number_of_products():\n return NUMBER_OF_PRODUCTS",
"def maxCount(lili, name):\n return max([li.count(name) for li in lili])",
"def maxNumberOfApples(self, arr):\r\n arr.sort()\r\n apples = units = 0\r\n for _, weight in enumerate(arr):\r\n units += weight\r\n if units > 5000:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter, fetch and display alarms from AWS | def get_alarms(filters: dict) -> None:
try:
# filter out None values, because paginate can't handle them
filters = {k: v for k, v in filters.items() if v is not None}
# instanciate client
client = boto3.client("cloudwatch")
# get paginator to iterate over
paginator = ... | [
"def alarms_cmd(args):\n\n alarms=AlarmManager(\n time_range=args.time_range,\n start_time=args.start_time,\n end_time=args.end_time,\n status_filter=args.status,\n filters=alarms_cmd_parse_filters(args.filters),\n event_filters=alarms_cmd_parse_filters(args.event_filter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure auth tokens are encoded correctly. | def test_encode_auth_token(self):
auth_token = encode_auth_token(1)
self.assertTrue(isinstance(auth_token, bytes)) | [
"def test_split_token():\n assert auth._split_token('badtokenvalue') == ''",
"def encode_token_auth(token, **kwargs):\n # NOTE: Only ASCII characters are allowed in HTTP headers.\n return {b\"Authorization\": b\"Bearer \" + token.encode(\"ascii\")}",
"def check_authorization(self):\n self.token"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure auth tokens are decoded correctly. | def test_decode_auth_token(self):
auth_token = encode_auth_token(1)
self.assertTrue(isinstance(auth_token, bytes))
self.assertTrue(decode_auth_token(auth_token.decode("utf-8")) == 1) | [
"def test_decode_token_invalid_input_2(_mocked_fetch_public_key, _mocked_get_audiences):\n assert decode_user_token(APP, \"Foobar\") is None",
"def test_decode():\n byujwt = byu_jwt.JWT_Handler()\n decoded_jwt = byujwt.decode(JWT, verify=False)\n assert decoded_jwt",
"def _consume_auth_token(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with malformed bearer token. | def test_startml_malformed_bearer(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.post(
'/ml/start',
headers=dict(
Authorization='Bearer' + auth_token.decode()
)
)
... | [
"def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with a blacklisted token. | def test_startml_blacklisted_token(self):
with self.client:
auth_token = encode_auth_token(1)
# Blacklist a valid token
blacklist_token = BlacklistToken(auth_token.decode())
db.session.add(blacklist_token)
db.session.commit()
# blacklisted ... | [
"def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with an expired token. | def test_startml_expired_token(self):
with self.client:
auth_token = encode_auth_token(1)
# wait for token to be invalidated
time.sleep(6)
response = self.client.post(
'/ml/start',
headers=dict(
Authorization='Be... | [
"def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Aut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with no provided files. | def test_startml_no_files(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.post(
'/ml/start',
headers=dict(
Authorization='Bearer ' + auth_token.decode()
)
)
data ... | [
"def test_startml_empty_file_list(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with an empty file list. | def test_startml_empty_file_list(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.post(
'/ml/start',
headers=dict(
Authorization='Bearer ' + auth_token.decode()
),
data=js... | [
"def empty(self):\r\n return _osgDB.stdFilePathList_empty(self)",
"def _empty(self) -> bool:\n return len(self.files) + len(self.directories) == 0",
"def test_include_filelist_with_blank_line(self):\n self.ParseTest([(\"--include-filelist\", \"file\")],\n [(), ('1',), ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with status that isn't 'Waiting for files.' | def test_startml_bad_status(self):
with self.client:
auth_token = encode_auth_token(1)
# set user status in db
status = MLStatus(1, "Processing.")
db.session.add(status)
db.session.commit()
# request
response = self.client.post(... | [
"def test_notify_run_status(self):\n pass",
"def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + aut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for starting ml with correct status. | def test_startml(self):
with self.client:
auth_token = encode_auth_token(1)
# set user status in db
status = MLStatus(1, "Waiting for files.")
db.session.add(status)
db.session.commit()
# request
response = self.client.post(
... | [
"def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status with no provided token | def test_statustml_no_auth(self):
with self.client:
response = self.client.get(
'/ml/status'
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'Provide a valid auth ... | [
"def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status with malformed bearer token. | def test_statusml_malformed_bearer(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.get(
'/ml/status',
headers=dict(
Authorization='Bearer' + auth_token.decode()
)
)
... | [
"def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status with a blacklisted token. | def test_statusml_blacklisted_token(self):
with self.client:
auth_token = encode_auth_token(1)
# Blacklist a valid token
blacklist_token = BlacklistToken(auth_token.decode())
db.session.add(blacklist_token)
db.session.commit()
# blacklisted... | [
"def test_startml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status with an expired token. | def test_statusml_expired_token(self):
with self.client:
auth_token = encode_auth_token(1)
# wait for token to be invalidated
time.sleep(6)
response = self.client.get(
'/ml/status',
headers=dict(
Authorization='B... | [
"def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)",
"def test_startml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status with no previous status. | def test_statusml_no_status(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.get(
'/ml/status',
headers=dict(
Authorization='Bearer ' + auth_token.decode()
)
)
dat... | [
"def test_skipped_status(self):\n job_set = self._jm.run([self._qc]*2, backend=self.fake_api_backend,\n max_experiments_per_job=1)\n jobs = job_set.jobs()\n jobs[1]._job_id = 'BAD_ID'\n statuses = job_set.statuses()\n self.assertIsNone(statuses[1])",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for ml status. | def test_statusml(self):
with self.client:
auth_token = encode_auth_token(1)
# insert ml status
status = MLStatus(1, "Processing.")
db.session.add(status)
db.session.commit()
# request
response = self.client.get(
... | [
"def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for getting classified json with malformed bearer token. | def test_getclassified_malformed_bearer(self):
with self.client:
auth_token = encode_auth_token(1)
response = self.client.get(
'/ml/classified',
headers=dict(
Authorization='Bearer' + auth_token.decode()
)
)
... | [
"def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for getting classified json with blacklisted token. | def test_getclassified_blacklisted_token(self):
with self.client:
auth_token = encode_auth_token(1)
# Blacklist a valid token
blacklist_token = BlacklistToken(auth_token.decode())
db.session.add(blacklist_token)
db.session.commit()
# blackl... | [
"def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for getting classified json with expired token. | def test_getclassified_expired_token(self):
with self.client:
auth_token = encode_auth_token(1)
# wait for token to be invalidated
time.sleep(6)
response = self.client.get(
'ml/classified',
headers=dict(
Authoriz... | [
"def jwt_expired(token: str) -> bool:\n payload = base64.b64decode(token.split('.')[1]).decode()\n if time.time() > json.loads(payload)['exp']:\n return True\n else:\n return False",
"def test_expired_token_failing_jwt_auth(self):\n payload = utils.jwt_payload_handler(self.user)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use a dynamic partition value based on the date parameter. | def partition_value(self):
return self.date.isoformat() # pylint: disable=no-member | [
"def add_partition_date(\n list_of_dicts: List[dict],\n partition_date: datetime,\n partition_type: bigquery.TimePartitioningType = bigquery.TimePartitioningType.DAY,\n partition_field: str = \"release_date\",\n):\n if partition_type == bigquery.TimePartitioningType.HOUR:\n partition_date = pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A generator that iterates through all tasks used to generate the data in each partition in the interval. This can be used by downstream map reduce jobs to read all of the raw data. | def get_raw_data_tasks(self):
for task in self.requires():
if isinstance(task, ModuleEngagementPartitionTask):
yield task.data_task | [
"def generator_function(self, data):\n for i in range(self.batches_test):\n batch_data, batch_coords, batch_padding = [], [], []\n batch_indices = self.center_coords[i * self.batch_size: min((i + 1) * self.batch_size,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A generator that returns all fields that are metrics. | def get_metrics(self):
for field_name, field_obj in self.get_fields().items():
if getattr(field_obj, 'is_metric', False):
yield field_name, getattr(self, field_name) | [
"def iter_fields(self):\n\n yield \"date\", \"Date\", \"\", \"\"\n yield \"ts\", \"DateTime\", \"\", \"\"\n yield \"metric_type\", \"String\", \"\", \"\"\n for f in self.key_fields:\n yield f.field_name, f.field_type, \"\", \"\"\n yield \"labels\", \"Array(LowCardinalit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The ratio of attempts per correct problem submission is an indicator of how much a student is struggling. If a student has not completed any problems a value of float('inf') is returned. | def compute_attempts_per_completion(num_problem_attempts, num_problems_completed):
if num_problems_completed > 0:
attempts_per_completion = float(num_problem_attempts) / num_problems_completed
else:
attempts_per_completion = float('inf')
return attempts_per_completion | [
"def mistake_scoop(self) -> float:\n if self.all_try_scoop == 0:\n return 0.0\n return 100*(self.all_try_scoop - self.try_scoop) / self.all_try_scoop",
"def calculate_results() -> int:\r\n all_answers: list = []\r\n for question in Question.objects.all():\r\n question_accurac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identify delimiters in the data and strip them out to prevent parsing errors. Also, if self.max_field_length is set, then truncate the field to self.max_field_length. | def strip_and_truncate(field):
stripped = "regexp_replace(regexp_replace({}, '\\\\t|\\\\n|\\\\r', ' '), '\\\\\\\\', '')".format(field)
if self.max_field_length is not None:
stripped = "substring({}, 1, {})".format(stripped, self.max_field_length)
return stripped | [
"def _normalize_input_data(self, data, normalised_field_name='ADDRESS_norm'):\n # make a copy of the actual address field and run the parsing against it\n data[normalised_field_name] = data['ADDRESS'].copy()\n\n # remove white spaces from the end and beginning if present\n data[normalise... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Judge if any soldier in army can move | def canMove(mapObj, army, enemy):
for soldier in army:
if soldierCanMove(mapObj, soldier, army + enemy):
return True
return False | [
"def isSoldier(army, x, y):\n return getDirectionByPosition(x, y, army) is not None",
"def soldier(self, mf_board_row, mf_board_column, mt_board_row, mt_board_column):\n\n #ensures piece to be moved is a soldier & sets the moved to\n #piece owner info to a variable\n if self._XiangqiGame._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert state of game to army number | def getArmyFromState(state):
return 1 if '1' in state else 2 | [
"def state_transform(state):\n if isinstance(state, str):\n return np.array([int(s) for s in state])\n else:\n return str(state)[1:-1].replace(' ', '')",
"def state_to_int(p, statelist):\n # convert statelist to string\n state = ''.join([str(s) for s in statelist])\n # construct uniqu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |