query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Convert a dataframe of correlation data into a matrix | def make_correlation_matrices(dataframe):
# Group the data by residue then use a pivot table-like function
# to create correlation matrices
if u'residue' in dataframe.index.names:
correlation_group = dataframe.reset_index().groupby(u'residue')
else:
correlation_group = dataframe.groupby... | [
"def create_correlation_matrix(df, columns):\r\n correlations = df.corr()\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n cax = ax.matshow(correlations, vmin=-1, vmax=1)\r\n fig.colorbar(cax)\r\n ticks = np.arange(0,9,1)\r\n ax.set_xticks(ticks)\r\n ax.set_yticks(ticks)\r\n ax.se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate wave solution using 16th order finite difference | def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):
ny , nx = U.shape
for i in range(8,ny-8):
for j in range(8,nx-8):
Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \
((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j]-94174080*... | [
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate wave solution using 12th order finite difference | def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):
ny , nx = U.shape
for i in range(6,ny-6):
for j in range(6,nx-6):
Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \
((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478*U[i+0,j]+1... | [
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate wave solution using 8th finite difference | def solve_wave_FD8(U,Up,h,c,ncpml,b,psi,phi):
ny , nx = U.shape
for i in range(4,ny-4):
for j in range(4,nx-4):
Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-4,j-4]* \
((-9*U[i,j-4]+128*U[i,j-3]-1008*U[i,j-2]+8064*U[i,j-1]-14350*U[i,j+0]+8064*U[i,j+1]-1008*U[i,j+2]+128*U[i,j+3... | [
"def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate wave solution using 4th order finite difference | def solve_wave_FD4(U,Up,h,c,ncpml,b,psi,phi):
ny , nx = U.shape
for i in range(2,ny-2):
for j in range(2,nx-2):
Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-2,j-2]* \
((-1*U[i-2,j]+16*U[i-1,j]-30*U[i,j]+16*U[i+1,j]-1*U[i+2,j]) + \
(-1*U[i,j-2]+16*U[i,... | [
"def solve_wave_FD12(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(6,ny-6):\n for j in range(6,nx-6):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-6]* \\\n ((-50*U[i-6,j]+864*U[i-5,j]-7425*U[i-4,j]+44000*U[i-3,j]-222750*U[i-2,j]+1425600*U[i-1,j]-2480478... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate wave solution using 2nd order finite difference | def solve_wave_FD2(U,Up,h,c,ncpml,b,psi,phi):
ny , nx = U.shape
for i in range(1,ny-1):
for j in range(1,nx-1):
Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-1,j-1]* \
(U[i-1,j]-2*U[i,j]+U[i+1,j]+ \
(U[i,j-1]-2*U[i,j]+U[i,j+1]))/h/h
#CPML boundary ... | [
"def wfDerivative(signalRaw,sp=10.):\n signalDeriv = np.zeros(len(signalRaw))\n for i in range(len(signalRaw)-1):\n signalDeriv[i] = (signalRaw[i+1] - signalRaw[i])/sp\n return signalDeriv",
"def solve_wave_FD4(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(2,ny-2):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms a given value into a node | def to_node(value: str) -> Node:
if not value:
res = Empty()
elif value in CONCATENATION_SYMBOLS:
res = Concatenation()
elif value in UNION_SYMBOLS:
res = Union()
elif value in KLEENE_STAR_SYMBOLS:
res = KleeneStar()
elif value in EPSILON_SYMBOLS:
res = Epsilo... | [
"def convert(gr, raw_node):\r\n type, value, context, children = raw_node\r\n if children or type in gr.number2symbol:\r\n # If there's exactly one child, return that child instead of\r\n # creating a new node.\r\n if len(children) == 1:\r\n return children[0]\r\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find if x is a member of the tree in O(lg lg u) time | def vEBTreeSearch(self, x):
if self.min == x or self.max == x:
return True
elif self.u <= 2:
return False
else:
cluster_of_x = self.clusters[self.high(x)]
if cluster_of_x is not None:
return cluster_of_x.isMember(self.low(x))
... | [
"def _is_member(self, leaf):\n\n return leaf in self.leaves",
"def __contains__ (self, target):\n node = self.root\n while node:\n rc = node.compareTo(target)\n if rc > 0:\n node = node.left\n elif rc < 0:\n node = node.right\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the mean and variance of the latent function at some new points Xnew. | def predict_f(self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False) -> MeanAndVariance:
# metadata
X_data, Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
# compute initial matrices
err = Y_data - self.mean_function(X_data)
... | [
"def predict_f(\n self,\n params: dict,\n Xnew: InputData,\n key: jnp.DeviceArray = None,\n num_inducing_samples: int = None,\n full_cov: bool = False,\n full_output_cov: bool = False,\n ) -> MeanAndCovariance:\n if num_inducing_samples is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Predict the mean and variance for unobserved values at some new points Xnew. | def predict_y(self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False) -> MeanAndVariance:
mean, var = self.predict_f(Xnew)
return mean, var + self.likelihood.noise_variance(mean, var) | [
"def predict(self, Xnew):\n if self.compute_invcdf:\n for _, x in np.ndenumerate(self.X):\n x.compute_inv_cdf(self.spline_basis)\n\n scores = np.ones(Xnew.shape[0]) * self.intercept\n for i in range(Xnew.shape[0]):\n for j in range(self.n_predictors):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inspects each line of the command output and checks to see if the command errored. This check is used for API's that get data and do not simply report a success message. | def _no_error_in_output(self, stdout):
for line in stdout:
if line.startswith("ERROR:"):
return False
return True | [
"def command_error(self, response):\n # The command should be echoed back accurately (might be\n # preceeded by a '- ' if it is part of a program definition) and\n # no errors should be returned, if it has no errors.\n return (response[2] not in [response[0], '- ' + response[0]]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send request via RabbitMQ | def send_to_rabbit(dialogue: Dialogue):
global channel_send
_prepare()
channel_send.basic_publish(
exchange='',
routing_key=QUEUE_REQUESTS,
body=dumps({'id': dialogue.id, 'request': dialogue.request, 'meta': dialogue.meta})
) | [
"def setup_amqp_req(amqp):\n amqp.request_access(username=hookenv.config('rabbit-user'),\n vhost=hookenv.config('rabbit-vhost'))\n trove.assess_status()",
"def test_send_message(self):\n query_string = [('type', 'private'),\n ('to', [56]),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive request from RabbitMQ. Dialogue and other dialogues may be updated in the database | def receive_from_rabbit(dialogue: Dialogue) -> Dialogue:
dialogue_from_db = Dialogue.objects.get(id=dialogue.id)
if dialogue_from_db.rabbit_updated:
dialogue_from_db.rabbit_updated = False
dialogue_from_db.save()
return dialogue_from_db
global channel_receive
_prepare()
whi... | [
"def handle_request(self, cleanup=True):\n try:\n self.amqp_channel.wait()\n finally:\n if cleanup:\n self._cleanup()",
"def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift f0 by a number of octaves. | def shift_f0(audio_features, f0_octave_shift=0.0):
audio_features['f0_hz'] *= 2.0 ** (f0_octave_shift)
audio_features['f0_hz'] = np.clip(
audio_features['f0_hz'],
0.0,
librosa.midi_to_hz(110.0)
)
return audio_features | [
"def setNumOfOctaves(self, octaves) -> None:\n ...",
"def zero_base_shift(*args):\n return [arg - 1 for arg in args]",
"def bandpass_octaves(x, fs, frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):\n return bandpass_fractional_octaves(x, fs, frequencies, fr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get content from all url | def get_content(self, content_url):
response = requests.get(content_url)
page = response.json()
content = page['results']
while page['next'] is not None:
content_url = page['next']
response = requests.get(content_url)
page = response.json()
... | [
"def fetch_url_content(self, url):\n response = requests.get(url)\n response.raise_for_status()\n return response.content",
"def get_content(nothing):\n url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=%s' % nothing\n \n return fetch_url(url)",
"def _get_all_url(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether exactly two of the arguments are equal and the third is not. >>> two_equal(1, 2, 3) False >>> two_equal(1, 2, 1) True >>> two_equal(1, 1, 1) False >>> result = two_equal(5, 1, 1) return, don't print >>> result True | def two_equal(a, b, c):
l = [a,b,c]
if len(set(l)) == 2:
return True
else:
return False | [
"def two_equal(a, b, c):\n if a == b and b != c:\n return True\n elif b == c and c != a:\n return True\n elif c == a and a != b:\n return True\n else:\n return False",
"def is_same_or_all_different(a: T, b: T, c: T) -> bool:\n is_same = a == b and b == c\n if is_same:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the integer height of a neargolden rectangle with PERIMETER. >>> near_golden(42) 8 x 13 rectangle has perimeter 42 8 >>> near_golden(68) 13 x 21 rectangle has perimeter 68 13 >>> result = near_golden(100) return, don't print >>> result 19 | def near_golden(perimeter):
smallest = perimeter
for w in range(1, perimeter//2):
h = perimeter//2 - w
dif = abs(h/w - (w/h-1) )
if dif < smallest:
smallest = dif
h_best = h
return h_best | [
"def near_golden(perimeter):\n height = 1\n while height < perimeter/2:\n width = perimeter/2 - height\n if abs(height/width - width/height + 1) < 0.02:\n return height\n height += 1cd",
"def height_tree(tree: Tree) -> int:\n if tree is None:\n return -1\n left_h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the case insensitive word in text that has most vowels. Return a tuple of the matching word and the vowel count, e.g. ('objectoriented', 6) | def get_word_max_vowels(text):
count = 0
max_vowel_count = []
text = [word.lower() for word in text.split()]
for word in text:
num_vowels = Counter([letter for letter in word\
if letter in VOWELS])
vowel_sum = sum(num_vowels.values())
if vowel_sum >= count:
... | [
"def get_word_max_vowels(text):\r\n vowel_counts = [(word, sum(word.count(vowel) for vowel in VOWELS)) for word in text.split()]\r\n return max(vowel_counts, key=itemgetter(1))",
"def num_vowels(word):\n return sum(char in VOWELS for char in word.lower())",
"def search4vowels(word):\n vowels = set('... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get seasonlevel Fielding Statistics for Specific Team (from BaseballReference) | def team_fielding_bref(team: str, start_season: int, end_season: Optional[int]=None) -> pd.DataFrame:
if start_season is None:
raise ValueError(
"You need to provide at least one season to collect data for. " +
"Try team_fielding_bref(season) or team_fielding_bref(start_season, end_... | [
"def test_fantasy_defense_season_stats_by_team(self):\n pass",
"def test_player_season_stats_by_team(self):\n pass",
"def test_team_season_stats(self):\n pass",
"def get_seasonal_statistics(self, season_id, wnba_season, team_id):\n path = \"wnba/trial/v4/en/seasons/{season_id}/{wnb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the character whether the user got the question right or wrong. | def update_character(both_correct, character, is_complete, is_correct, type, user, session_number):
now = datetime.datetime.now()
base_character = BaseCharacter.objects.get(character=character)
user_object = User.objects.get(username=user.username)
character_object = ProgressCharacter.objects.get(charac... | [
"def process_question(self):\n for rb in self.rbs:\n rb.configure(state = DISABLED)\n if self.var.get()==self.questions[self.index].answer: \n self.correct += 1\n self.feedback.config(text = \"Correct! \" + str(self.correct) + \"/\" + str(self.index + 1))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert any valid color format black and white rgb tuple >>> bw((0, 4, 210)) (71.333333333333329, 71.333333333333329, 71.333333333333329) >>> bw([0, 4, 210]) (71.333333333333329, 71.333333333333329, 71.333333333333329) | def bw(value):
r, g, b = _int2rgbtuple(rgb(value))
g = (r + g + b) / 3.0
return g, g, g | [
"def white_rgb(w,h):\n m = []\n for i in range(h):\n fila = []\n for j in range(w):\n fila += [(255,255,255)]\n m += [fila]\n return (\"RGB\", m)",
"def toRGB(self, _rgbu):\n newRGB = 0\n for iVal, val in enumerate(_rgbu):\n if self.bitDepths[iVal] > 0:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert any valid color format to cmyk tuple >>> cmyk((0, 4, 210)) (0.82352941176470584, 0.80784313725490198, 0, 0.17647058823529416) >>> cmyk([0, 4, 210]) (0.82352941176470584, 0.80784313725490198, 0, 0.17647058823529416) >>> cmyk([0.1, 0.1, 0.2, 0.2]) [0.10000000000000001, 0.10000000000000001, 0.20000000000000001, 0.... | def cmyk(value):
cvalue, type = convertcolor(value)
if cvalue is None:
raise ValueError, ('Unknown color type: "%s"' % value)
if type == ISCMYKFLAG:
return cvalue
elif type == ISRGBFLAG:
return _rgb2cmyk(_int2rgbtuple(cvalue)) | [
"def rgb_to_cmyk(self,tup):\n if sum(tup) == 0: # black\n return 0, 0, 0, self.cmyk_scale\n \n # set values and normalize\n r,g,b = tup\n r /= self.rgb_scale\n g /= self.rgb_scale\n b /= self.rgb_scale\n \n # extract CMYK values\n k = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert from a RGB color tuple to RGB integer >>> _rgbtuple2int((0, 4, 210)) 1234L >>> _rgbtuple2int((0, 48, 57)) 12345L >>> _rgbtuple2int((0, 214, 216)) 55000L >>> _rgbtuple2int((6, 241, 88)) 455000L | def _rgbtuple2int(t):
r = int(round(t[0])) # Not all r,g,b values are necessarily
g = int(round(t[1])) # integers, eg from _cmyk2rgb
b = int(round(t[2])) # so we round and truncate them
return long(r) * 65536 + ... | [
"def int_from_rgb(rgb):\n return (round(rgb[0] * 0xff) << 16) + \\\n (round(rgb[1] * 0xff) << 8) + \\\n round(rgb[2] * 0xff)",
"def rgb_from_int(val):\n return tuple([\n ((val >> 16) & 0xff) / 0xff,\n ((val >> 8) & 0xff) / 0xff,\n (val & 0xff) / 0xff])",
"def to_rgb(valu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert from a RGB color integer to RGB tuple >>> _int2rgbtuple(1234) (0, 4, 210) >>> _int2rgbtuple(12345) (0, 48, 57) >>> _int2rgbtuple(55000) (0, 214, 216) >>> _int2rgbtuple(455000) (6, 241, 88) | def _int2rgbtuple(i):
if not isinstance(i, int): # We need to have an integer here
try: # so we try to make it
i = int(i) # otherwise return and error message
except:
raise ValueError, ('Value is not an inte... | [
"def rgb_from_int(val):\n return tuple([\n ((val >> 16) & 0xff) / 0xff,\n ((val >> 8) & 0xff) / 0xff,\n (val & 0xff) / 0xff])",
"def _rgbtuple2int(t):\n r = int(round(t[0])) # Not all r,g,b values are necessarily\n g = int(round(t[1])) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert from a CMYK color tuple to an RGB color tuple From the Adobe Postscript Ref. Manual 2nd ed. Page 306 >>> _cmyk2rgb((1, 1, 1, 1)) (0.0, 0.0, 0.0) >>> _cmyk2rgb((0, 0, 0, 0)) (255.0, 255.0, 255.0) >>> _cmyk2rgb((0.2, 0.6, 0.8, 0.2)) (153.0, 50.999999999999986, 0.0) | def _cmyk2rgb((c, m, y, k), density=1):
r = 1.0 - min(1.0, c + k)
g = 1.0 - min(1.0, m + k)
b = 1.0 - min(1.0, y + k)
return (r * 255, g * 255, b * 255) | [
"def cmyk_to_rgb(self,tup):\n # set values and normalize\n c,m,y,k = tup\n c /= self.cmyk_scale\n m /= self.cmyk_scale\n y /= self.cmyk_scale\n k /= self.cmyk_scale\n \n # convert to RGB and scale\n r = int(round(self.rgb_scale*(1.0-c)*(1.0-k),0))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One way to get cmyk from rgb." >>> _rgb2cmyk((100, 100, 100)) (0, 0, 0, 0.60784313725490202) >>> _rgb2cmyk((0, 0, 0)) (0, 0, 0, 1) >>> _rgb2cmyk((1, 1, 1)) (0, 0, 0, 0.99607843137254903) | def _rgb2cmyk((r, g, b)):
c = 1 - (r / 255.0)
m = 1 - (g / 255.0)
y = 1 - (b / 255.0)
k = min(c, m, y)
c = min(1, max(0, c - k))
m = min(1, max(0, m - k))
y = min(1, max(0, y - k))
k = min(1, max(0, k))
return float(c), float(m), float(y), float(k) | [
"def rgb_to_cmyk(self,tup):\n if sum(tup) == 0: # black\n return 0, 0, 0, self.cmyk_scale\n \n # set values and normalize\n r,g,b = tup\n r /= self.rgb_scale\n g /= self.rgb_scale\n b /= self.rgb_scale\n \n # extract CMYK values\n k = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Traverse instructions in array, incrementing accumulator, until an instruction repeats | def processor(instruction_array):
processed_array = [True] * len(instruction_array)
current_iter = 0
accumulation = 0
while processed_array[current_iter]:
processed_array[current_iter] = False
if instruction_array[current_iter][0] == "n":
current_iter += 1
elif instru... | [
"def find_acc(inputs):\n acc = i = 0\n length = len(inputs)\n visited = set()\n\n while True:\n visited.add(i)\n\n instr, num = inputs[i]\n delta = 1\n if instr == \"acc\":\n acc += num\n elif instr == \"jmp\":\n delta = num\n\n i += delta\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Redirect client to camera feed | async def index(request):
# Parse URL for camera ID
cam_id = request.args.get(CAM_ID_PARAM)
if cam_id:
# Get camera data from source JSON
cam = get_camera_by_id(CAM_ID_KEY, cam_id)
if cam:
ip = cam[CAM_IP_FIELD]
# Redirect to camera feed
retu... | [
"def capture():\r\n (x, y) = global_camera.get_coordinates()\r\n # Label snapshot image with the x- and y-coordinates:\r\n path = \"/capture/X{}Y{}.jpeg\".format(x,y)\r\n return redirect(path)",
"def get_camera_url(self):\n return self._url + '/camera'",
"def video_feed():\n return Respons... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the security token by connecting to TouchWorks API | def get_token(self, appname, username, password):
ext_exception = TouchWorksException(
TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR)
data = {'Username': username,
'Password': password}
resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data)
try:
... | [
"def _getToken(self):\n\n url = \"https://login.microsoftonline.com/{0!s}/oauth2/token\".format(self._tenant_id)\n \n payload = {\n \"grant_type\":\"client_credentials\",\n \"client_id\":self._client_id,\n \"client_secret\": self._client_secret,\n \"r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if the token cached is valid or has expired by comparing the time token was created with current time | def _token_valid(self):
if not self._cache_token:
return False
now = time.time()
if now - self._token.acquired_time > self._token_timeout:
logger.debug('token needs to be reset')
return False
return True | [
"def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)",
"def is_token_valid(self):\r\n if not self.auth_token:\r\n return False\r\n\r\n if not self.auth_token_expires:\r\n return False\r\n\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
search document types by name and active(Y/N) status | def find_document_type_by_name(self, entity_name, active='Y',
match_case=True):
all_types = self.get_dictionary('Document_Type_DE')
if match_case:
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0,
... | [
"def search_items(keywords, meta_types=None):",
"def find_by_type(cls, rec_type):\n return (cls.query.filter_by(rec_type_id = rec_type.id)\n .filter(cls.rec_type.has(RecommendationType.is_active == True))).all()",
"def searchObjTypeDerive(self,keys_list=None,query_objType=\".obj.pub\"):\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
utility method to create a magic json object needed to invoke TouchWorks APIs | def _magic_json(self, action='', user_id='', app_name='', patient_id='',
token='', parameter1='', parameter2='',
parameter3='', parameter4='', parameter5='',
parameter6='', data=''):
if not token:
token = self._token.token
if not ap... | [
"def makeJson(self, lstype='lsys', payload=''): \n return self.mapping.prepareJson(lstype, payload)",
"def _generate_json_response(self, context):\n raise NotImplementedError",
"def _reprJSON(self):\n return {'__Fgi__': self.__dict__}",
"def _generate_swagger_json(self, app):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This should return some meaningful result. You could do something like check to make sure both the input and output files are identical, or that some other aspect of your test passed. This is called automatically once the forwarder has finished executing the test. You can return whatever you like, or even just print a ... | def result(self, receiver_outfile):
if not os.path.exists(receiver_outfile):
raise ValueError("No such file %s" % str(receiver_outfile))
if self.files_are_the_same(self.input_file, receiver_outfile):
print "Test passes!"
return True
else:
print "Te... | [
"def _succeed(self):\n print(self.test_case + ': succeeded')\n exit(0)",
"def _process_output(port, options, test_input, test_types, test_args,\r\n test_output, worker_name):\r\n failures = []\r\n\r\n if test_output.crash:\r\n failures.append(test_failures.FailureCras... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show all rides rides in the database (for testing). | def all_rides(request):
context_instance=RequestContext(request)
data = {
'subtitle': 'Current Rides',
'matching': False,
'rides': Ride.get_all_rides()}
return render_to_response('rides.html', data,
context_instance=context_instance) | [
"def get_all_rides():",
"def get_all_rides():\n return jsonify({'RideIds': listdir(RIDE_LOCATIONS)})",
"def get_rides(self):\n self.driver.get(self.STRAVA + \"/athlete/training\")\n # Switch form to only bike rides\n self.driver.find_element_by_xpath(\"//select[@id='activity_type']\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show all rides that the user has ever taken. | def current_rides(request):
context_instance=RequestContext(request)
user = context_instance['user']
data = {
'subtitle': 'Current Rides',
'matching': False,
'rides': Ride.get_all_rides_for_user(user)}
return render_to_response('rides.html', data,
context_instance=context... | [
"def all_rides(request):\n context_instance=RequestContext(request)\n data = {\n 'subtitle': 'Current Rides',\n 'matching': False,\n 'rides': Ride.get_all_rides()}\n return render_to_response('rides.html', data,\n context_instance=context_instance)",
"def get_all_rides():",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a ride_id and a previous search request, allow the user to join the ride. | def join_ride(request, ride_id):
context_instance = RequestContext(request)
user = context_instance['user']
ride = Ride.objects.get(pk=ride_id)
if '_search_request' in request.session:
# user is attempting to add this ride
sr_post = request.session['_search_request']
rr_form = Ri... | [
"def request_ride(self):\n result = \"Invalid id\"\n for ride_info in rides.all_rides:\n if ride_info['ride_id'] == self.ride_id:\n result ={\"success\": (\"you have requested to join the ride from\",\n ride_info['from_where'], \"to\", ride_info[\"to\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function drops a user from a ride if they are part of it. Otherwise, it does nothing and reports an error. | def drop_ride(request, ride_id):
context_instance = RequestContext(request)
user = context_instance['user']
query = Q(ride__id=ride_id) &\
Q(user__id=user.id)
sr = SearchRequest.objects.filter(query)
if sr:
sr = sr[0]
ride = sr.ride
sr.delete()
mess... | [
"def cancel_ride(self, rideID):\n ride=Ride.objects.get(id=rideID)\n if ride.ride_started:\n return 0\n\n ride.offer.status = 'C'\n ride.offer.save() \n\n pickup_point = self.global_address_cache.get_address((ride.offer.pickup_point.latitude,ride.offer.pickup_poi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Page displaying ride information, along with a chat window. First, confirm that the user is or has been part of this ride. Then, fetch the ride information. | def ride_info(request, ride_id):
context_instance = RequestContext(request)
user = context_instance['user']
ride = Ride.objects.get(pk=ride_id).filled_out()
# If they have submitted a request and it is in bounds of the ride, let them
# see this ride.
# Next, check if they are part of this ride.... | [
"def join_ride(request, ride_id):\n context_instance = RequestContext(request)\n user = context_instance['user']\n ride = Ride.objects.get(pk=ride_id)\n if '_search_request' in request.session:\n # user is attempting to add this ride\n sr_post = request.session['_search_request']\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Form to request a new ride. | def request_ride(request):
data = {'subtitle': 'Request or Create New Ride'}
return render_to_response('new_ride.html', data,
RequestContext(request)) | [
"def new_ride(request):\n\n context_instance = RequestContext(request)\n\n # A POST request indicates that a DescriptionForm has been submitted.\n if request.method == 'POST':\n rr_form = RideRequestForm(request.session['_search_request'])\n if rr_form.is_valid():\n sr = rr_form.sa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show rides matching the search criteria of the request. | def search_rides(request):
context_instance=RequestContext(request)
# Store the valid form in the user's session so new_ride or ride_info can
# pick it up.
if request.method == 'POST':
rr_form = RideRequestForm(request.POST)
request.session['_search_request'] = request.POST
elif... | [
"def all_rides(request):\n context_instance=RequestContext(request)\n data = {\n 'subtitle': 'Current Rides',\n 'matching': False,\n 'rides': Ride.get_all_rides()}\n return render_to_response('rides.html', data,\n context_instance=context_instance)",
"def get_all_rides():",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Page to enter description of new ride, and submit it. | def new_ride(request):
context_instance = RequestContext(request)
# A POST request indicates that a DescriptionForm has been submitted.
if request.method == 'POST':
rr_form = RideRequestForm(request.session['_search_request'])
if rr_form.is_valid():
sr = rr_form.save(commit = F... | [
"def request_ride(request):\n data = {'subtitle': 'Request or Create New Ride'}\n return render_to_response('new_ride.html', data,\n RequestContext(request))",
"def new_ride(request):\n context = {'user': request.user}\n return render(request, 'cabrides/new_ride.html', context)",
"def create_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the get_project by using KECHAIN_FORCE_ENV_USE=True | def test_get_project__force_env_use_no_vars(self):
self.env.set("KECHAIN_FORCE_ENV_USE", "True")
with self.env:
self.assertTrue(env.bool(KechainEnv.KECHAIN_FORCE_ENV_USE))
with self.assertRaisesRegex(ClientError, "should be provided as environment variable"):
# K... | [
"def test_runtime_envs_get(self):\n pass",
"def test_env_init(generic_task):\n assert generic_task.get_env() == 'KKK'",
"def test_env(self, env_flag, env, fake_project_cli, fake_metadata, mocker):\n mock_environ = mocker.patch(\"os.environ\", {})\n result = CliRunner().invoke(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compute the Laplacian of phi | def lap(gr, phi):
lapphi = gr.scratch_array()
ib = gr.ilo
ie = gr.ihi
lapphi[ib:ie+1] = (phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2
return lapphi | [
"def laplacian(self,W):\n # Degree matrix.\n d = W.sum(axis=0)\n # Laplacian matrix.\n d = 1 / np.sqrt(d)\n D = sp.diags(d.A.squeeze(), 0)\n I = sp.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n assert type(L) is sp.csr.csr_matrix\n return L",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the main evolution loop. Evolve phi_t = k phi_{xx} from t = 0 to tmax | def evolve(nx, k, t0, phi1, phi2, C, tmax):
# create the grid
gr = diffimplicit.Grid(nx, ng=1, xmax=1.0)
# time info
dt = C*0.5*gr.dx**2/k
t = 0.0
# initialize the data
gr.phi[:] = gr.phi_a(0.0, k, t0, phi1, phi2)
while t < tmax:
gr.fill_BCs()
# make sure we end rig... | [
"def setup_phi_eff(self):\n momentum = self.p0\n time = 0.\n phi_eff = 0.\n for turn in range(self.n_turns+5):\n # evolve through one full revolution\n time += self.tof(momentum)\n self.phase_list[0].append(momentum)\n # phi_eff is the phase th... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute and return a person's age in years. | def compute_age(birth):
birthday = datetime.strptime(birth, "%Y-%m-%d")
today = datetime.now()
# Compute the difference between today and the birthday in years.
years = today.year - birthday.year
# If necessary, subtract one from the difference.
if birthday.month > today.month or \
... | [
"def get_age(self):\n if self.basics['death']:\n return self.basics['death'] - self.basics['birth']\n else:\n return datetime.datetime.now().year - self.basics['birth']",
"def age_calc(self):\n if self.professor_dob is not False:\n self.age = (datetime.today()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tries to cast value to bool. Raises ValueError if value is ambiguous. Raises TypeError for unsupported types. | def cast_bool(value) -> bool:
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise ValueError("Ambiguous value of " + value)
else... | [
"def convert_to_bool(value: object) -> bool:\n # if number, then bool it\n # if string, try to convert to float\n # if float converts, then bool the result\n # if float does not convert then look for truthy string and bool True\n # else False\n truthy = ['y', 'yes', 'true', '*']\n\n if is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively iterates on dictionary and swaps booleanlike values with proper booleans. | def swap_booleans(dictionary: dict, inplace: bool=True) -> dict: # TODO: Extend functionality to lists too
if not inplace:
dictionary = copy.deepcopy(dictionary)
for key in dictionary.keys():
if isinstance(dictionary[key], dict):
dictionary[key] = swap_booleans(dictionary[key], inpl... | [
"def replace_string_bool_to_bool(dictionary: Dict[str, Any]) -> Dict[str, Any]:\n for key, item in dictionary.items():\n if isinstance(item, str):\n if item.lower() == \"true\":\n dictionary[key] = True\n elif item.lower() == \"false\":\n dictionary[key]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns ix, bitsa, bitsb where ix is the first different index and bitsa has a 1 there. | def __find_one_zero(self, bits1, bits2):
for ix in range(len(bits1)):
if bits1[ix] != bits2[ix]:
return (ix, bits1, bits2) if bits1[ix] else (ix, bits2, bits1)
return (-1, bits1, bits2) | [
"def getBit( value , index ):\n return (value >> index) & 1",
"def get_bit(number, bit_index):\n return number >> bit_index & 1",
"def get_bit(k, i):\n return k[0][i]",
"def positions_mask_to_tuple(self,positions):\n return tuple(i for i in range(16) if (positions & (1 << i)) != 0)",
"def ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fill a feed dictionary with the actual set of images and labels for this particular training step. | def feed_dict(is_train, data_set):
feed_images, feed_labels = data_set.next_batch(Utils.batch_size)
return {x_placeholder: feed_images, y_placeholder: feed_labels, is_training: is_train} | [
"def fill_feed_dict(self, batch):\n feed_dict = {\n self.inputs: batch.data,\n self.targets: batch.labels,\n }\n return feed_dict",
"def fill_feed_dict(self, data):\n\n feed_dict = {\n self.encode_input_placeholder : data[\"encoder_inputs\"],\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs one evaluation against the full epoch of data. | def eval_full_epoch(data_set):
true_count = 0 # Counts the number of correct predictions.
data_set.start_new_epoch() # Start new epoch for evaluation
steps_per_epoch = data_set.num_examples // Utils.batch_size
num_examples = steps_per_epoch * Utils.batch_size
for _ in xrange(steps_per_epoch):
... | [
"def train_one_epoch(self, *args, **kwargs):\r\n raise NotImplementedError",
"def eval_one_epoch(sess, ops, test_writer,tracks=False,lstm_params=None):\n global EPOCH_CNT\n is_training = False\n log_string(str(datetime.now()))\n log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Private method that changes the value into baseline type (chooses between int and float). | def __set_value_type(self, baseline, value):
if (
isinstance(baseline, int | numpy.int32 | numpy.int64)
):
return round(value)
if isinstance(baseline, float):
return float(value)
return None | [
"def record(self, value: typing.Union[float, int]) -> None:",
"def test__get_value_types_float(self):\n value, m_type = formatters._get_value_types(1.1)\n assert value == 1.1\n assert m_type == 'float'",
"def setGenericFloat(*args, **kwargs):\n \n pass",
"def _set_float(self, pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that lineary fills missing values, if the successive missing values are up to (max_seconds) apart. | def linear_fill_missing_values(self, activity, key, max_seconds=15):
index = 0
count = len(activity[key])
while index < count:
if activity[key][index] is None:
to = self.__missing_from_to(activity[key], index)
if to + 1 < len(activity[key]):
... | [
"def fill_nan(train_raw):\n return train_raw.fillna(method='ffill')",
"def gap_filling(data,NoDataValue):\n\t \n # fill the no data values\n if NoDataValue is np.nan:\n mask = ~(np.isnan(data))\n else:\n mask = ~(data==NoDataValue)\n xx, yy = np.meshgrid(np.arange(data.shape[1]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add group to pickUpQueue | def enqueueGroup(self, group):
# Tell the group that it's enqueued
group.enqueue(self.genTimeToPickUp())
# and store it on the vehicle
self.pickUpQueue.append(group)
self.occupancy += group.groupSize | [
"def pickUpGroup(self, group):\n\n self.pickUpQueue.remove(group)\n group.pickUp()\n self.groups.append(group)\n self.genHoldTime()",
"def newGroup(self):\n self.appendJobGroup()\n self.currentGroup = self.groupInstance(subscription=self.subscription)\n map(lambda ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move group from pickUpQueue to car | def pickUpGroup(self, group):
self.pickUpQueue.remove(group)
group.pickUp()
self.groups.append(group)
self.genHoldTime() | [
"def enqueueGroup(self, group):\n\n # Tell the group that it's enqueued\n group.enqueue(self.genTimeToPickUp())\n\n # and store it on the vehicle\n self.pickUpQueue.append(group)\n\n self.occupancy += group.groupSize",
"def move_people(self):\n people = self.people.custom... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use the credentials to authenticate the Azure CLI. | def login_cli(creds_data):
app_id = creds_data['application-id']
app_pass = creds_data['application-password']
sub_id = creds_data['subscription-id']
tenant_id = _get_tenant_id(sub_id)
try:
log('Forcing logout of Azure CLI')
_azure('logout')
except AzureError:
pass
tr... | [
"def azure_credentials(self) -> 'outputs.AzureCredentialsResponse':\n return pulumi.get(self, \"azure_credentials\")",
"def _get_resource_creds_from_cli(\n cloud: dict, args: Namespace\n ) -> Tuple[identity.AzureCliCredential, identity_aio.AzureCliCredential]:\n try:\n logger.in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tag the given instance with the given tags. | def tag_instance(request):
log('Tagging instance with: {}', request.instance_tags)
_azure('vm', 'update',
'--name', request.vm_name,
'--resource-group', request.resource_group,
'--set', *['tags.{}={}'.format(tag, value)
for tag, value in request.instance_ta... | [
"def set_tags(self, url_prefix, microver, instance, tags):\n try:\n for tag in tags:\n instance.add_tag(self.conn, tag)\n except AttributeError:\n # Try a low-level access if SDK version is old\n for tag in tags:\n response = self.conn.put... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable instance inspection access for the given application. | def enable_instance_inspection(request):
log('Enabling instance inspection')
_assign_role(request, _get_role('vm-reader')) | [
"def setAppInstance(self, instance):\n pass",
"def requested_instance_inspection(self):\n return bool(self._unit.received[\"enable-instance-inspection\"])",
"def application_enableapi(self, application_enableapi):\n\n self._application_enableapi = application_enableapi",
"def application_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable network management for the given application. | def enable_network_management(request):
log('Enabling network management')
_assign_role(request, StandardRole.NETWORK_MANAGER) | [
"def enable_network(self):\n if self._is_admin():\n completed = subprocess.run(args=['netsh', 'interface', 'set', 'interface', '\"Wi-Fi\"', 'enable'])\n print(\"Enable Wi-Fi \", completed.returncode)\n completed = subprocess.run(args=['netsh', 'interface', 'set', 'interface',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable security management for the given application. | def enable_security_management(request):
log('Enabling security management')
_assign_role(request, StandardRole.SECURITY_MANAGER) | [
"def enable(app, user_class, role_class):\n app.logger.info('Enabling security')\n user_class.add_cross_reference(role_class)\n\n # configure security with role and user classes\n user_datastore = SQLAlchemyUserDatastore(app.db, user_class, role_class)\n app.security = Security(app, user_datastore)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable block storage (disk) management for the given application. | def enable_block_storage_management(request):
log('Enabling block storage management')
_assign_role(request, _get_role('disk-manager')) | [
"def enable_storage(self):\n self.storage_enabled = True",
"def attach_and_activate_disks(request, storage):\n self = request.node.cls\n\n read_only = getattr(self, 'read_only', False)\n storage_helpers.prepare_disks_for_vm(\n self.vm_name, self.disk_names, read_only\n )",
"def request... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable DNS management for the given application. | def enable_dns_management(request):
log('Enabling DNS management')
_assign_role(request, StandardRole.DNS_MANAGER) | [
"def enable_app() -> None:\n \n copyfile(\"config/hostapd\", \"/etc/default/hostapd\")\n copyfile(\"config/dhcpcd.conf\", \"/etc/dhcpcd.conf\")\n copyfile(\"config/dnsmasq.conf\", \"/etc/dnsmasq.conf\")\n\n subprocess.run([\"systemctl\", \"daemon-reload\"])",
"def update_dns_config(self, context):\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable object storage readonly access for the given application. | def enable_object_storage_access(request):
log('Enabling object storage read')
_assign_role(request, StandardRole.OBJECT_STORE_READER) | [
"def enable_object_storage_management(request):\n log('Enabling object store management')\n _assign_role(request, StandardRole.OBJECT_STORE_MANAGER)",
"def enable_storage(self):\n self.storage_enabled = True",
"def read_only(self):\n\n self._read_only = True",
"def make_readonly(path: str)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable object storage management for the given application. | def enable_object_storage_management(request):
log('Enabling object store management')
_assign_role(request, StandardRole.OBJECT_STORE_MANAGER) | [
"def enable_storage(self):\n self.storage_enabled = True",
"def enable_object_storage_access(request):\n log('Enabling object storage read')\n _assign_role(request, StandardRole.OBJECT_STORE_READER)",
"def enable_block_storage_management(request):\n log('Enabling block storage management')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update all custom roles based on current definition file. | def update_roles():
sub_id = kv().get('charm.azure.sub-id')
known_roles = {}
for role_file in Path('files/roles/').glob('*.json'):
role_name = role_file.stem
role_data = json.loads(role_file.read_text())
role_fullname = role_data['Name'].format(sub_id)
scope = role_data['Assi... | [
"def apply_roles(self):\n minion_sets = []\n role_sets = []\n for instance in self.instances:\n minion = instance.get('minion')\n roles = set(minion.roles or [])\n for role in instance.get('roles', []):\n roles.add(role)\n roles = list(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Elide s in the middle to ensure it is under max_len. That is, shorten the string, inserting an ellipsis where the removed characters were to show that they've been removed. | def _elide(s, max_len, ellipsis='...'):
if len(s) > max_len:
hl = (max_len - len(ellipsis)) / 2
headl, taill = floor(hl), ceil(hl)
s = s[:headl] + ellipsis + s[-taill:]
return s | [
"def shorten(s, maxlen=20):\n halflen = int(maxlen / 2) - 2\n return s if len(s) <= maxlen else (s[:halflen]+'...'+s[-halflen:])[:maxlen]",
"def _truncate(s, limit): \n\ts = force_unicode(s) \n\tif len(s) <= limit: \n\t\treturn s \n\treturn '%s...' % s[:max(1, limit - 3)] \n\ttruncate = allow_lazy(truncate,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translate the subscription ID into a tenant ID by making an unauthorized request to the API and extracting the tenant ID from the WWWAuthenticate header in the error response. | def _get_tenant_id(subscription_id):
url = ('https://management.azure.com/subscriptions/'
'{}?api-version=2018-03-01-01.6.1'.format(subscription_id))
try:
urlopen(url)
log_err('Error getting tenant ID: did not get "unauthorized" response')
return None
except HTTPError as e... | [
"def http_get_tenant_id(tenant_id):\n my = config_dic['http_threads'][threading.current_thread().name]\n\n try:\n tenant = my.ovim.show_tenant_id(tenant_id)\n delete_nulls(tenant)\n change_keys_http2db(tenant, http2db_tenant, reverse=True)\n data = {'tenant': tenant}\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call the azurecli tool. | def _azure(cmd, *args, return_stderr=False):
cmd = ['az', cmd]
cmd.extend(args)
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = result.stdout.decode('utf8').strip()
stderr = result.stderr.decode('utf8').str... | [
"def cli():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-U', '--cot_url', help='URL to CoT Destination.',\n required=True\n )\n parser.add_argument(\n '-K', '--fts_token', help='FreeTAKServer REST API Token.'\n )\n parser.add_argument(\n '-S', '--... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translate short role name into a full role name and ensure that the custom role is loaded. The custom roles have to be applied to a specific subscription ID, but the subscription ID applies to the entire credential, so will almost certainly be reused, so there's not much danger in hitting the 2k custom role limit. | def _get_role(role_name):
known_roles = kv().get('charm.azure.roles', {})
if role_name in known_roles:
return known_roles[role_name]
sub_id = kv().get('charm.azure.sub-id')
role_file = Path('files/roles/{}.json'.format(role_name))
role_data = json.loads(role_file.read_text())
role_fullna... | [
"async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert to the key to PEM format. Expects bytes | def _convert_ec_pub_to_pem(raw_pub_key):
public_key_der = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + raw_pub_key
public_key_b64 = base64.b64encode(public_key_der).decode('ascii')
public_key_pem = '-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----' % '\n'.jo... | [
"def pem(self) -> str:\n return self.key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )",
"def toPEM(self):\n return self.x509.as_pem()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the I2C address of the crypto chip or None if chip is not installed. | def ecc608_find_chip():
for addr in (0x30, 0x60, 0x62):
if _ecc608_check_address(addr):
logger.info('Found crypto chip at 0x%x', addr)
return addr
logger.warning('No crypto detected, using SW.')
return None | [
"def get_i2c_address(self):\n return self.i2c_address",
"def i2c_config(self) -> int:\n return self._read_reg(_REG_I2C_CONFIG, 1)[0]",
"def getPiI2CBusNumber() -> int:\n rev = _getPiRevision()\n if rev > 1:\n return 1\n elif rev == 1:\n return 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the current user's tracking_id, sets new one if blank; | def tracking_id(request):
if request.session.get(TRACKING_ID_SESSION_KEY, '') == '':
request.session[TRACKING_ID_SESSION_KEY] = _generate_tracking_id()
return request.session[TRACKING_ID_SESSION_KEY] | [
"def current_user_id():\n if not hasattr(g, 'current_user_id'):\n try:\n id = int(request.headers.get(HEADER_CURRENT_USER_ID_KEY))\n except:\n id = 1\n if not id:\n id = 1\n setattr(g, 'current_user_id', id)\n return g.current_user_id",
"def ga_tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
function for generating secure random tracking ID values | def _generate_tracking_id():
tracking_id_length = 48
characters = string.ascii_letters + string.digits + string.punctuation
tracking_id = ''.join((secrets.choice(characters)
for _ in range(tracking_id_length)))
return tracking_id | [
"def gen_id():\n return \"{:04x}\".format(random.randint(0, int(0xFFFF)))",
"def get_random_sensor_id():\n return \"\".join(random.choice(\"0123456789abcdef\") for i in range(12))",
"def new_id():\n bs = uuid4().bytes\n return urlsafe_b64encode(bs).strip().replace('=', '')",
"def generate_id():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
strip out common words, limit to 5 words | def _prepare_words(search_text):
words = search_text.split()
for common in STRIP_WORDS:
if common in words:
words.remove(common)
return words[0:100] | [
"def delete_common_words(data):",
"def remove_common_words(words):\n common_words = [\"the\", \"for\", \"of\" ]\n return [w for w in words if w not in common_words ]",
"def prepare_words(search_text):\n for common in STRIP_SYMBOLS:\n if common in search_text:\n search_text = search_te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepends a column of ones to the design matrix (bias term) | def prepend_bias_term(X):
return np.column_stack((np.ones(X.shape[0]), X)) | [
"def zero_bias(matrix):\n matrix[:,0] = 0\n return(matrix)",
"def add_bias_feature(data,bias):\n for n in range(len(data)):\n l = len(data[n])\n data[n] = np.c_[ data[n], bias*np.ones((l,1)) ]",
"def add_ones_column_to_matrix(mat):\n\tshape = list(mat.shape)\n\tshape[1] += 1\n\tres = np.o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the hash of a given file, using either MD5 or just the size in bytes If the size is too big, MD5 will not be run and the byte size will be used instead This is to save some power and time, as large files are unlikely to have byte size conflict Also for Cloud files it avoids downloading the file again | def get_hash(self, file_path: pathlib.Path):
size = os.path.getsize(file_path)
if size > self.size_threshold:
return str(os.path.getsize(file_path))
return hashlib.md5(open(file_path, 'rb').read()).hexdigest() | [
"def filehash(file):\n hasher = hashlib.md5()\n f = open(file, 'rb')\n buf = f.read()\n hasher.update(buf)\n return hasher.hexdigest()",
"def calc_md5( path_filename ):\n hash_md5 = hashlib.md5()\n with open( path_filename , \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot a 3D pose 4x4 homogenous transform on given axis 'axes' with given 'axis_length'. | def plot_pose3_on_axes(axes, T, axis_length=0.1, center_plot=False, line_obj_list=None):
return plot_pose3RT_on_axes(axes, *decompose_T(T), axis_length, center_plot, line_obj_list) | [
"def plot_pose3RT_on_axes(axes, gRp, origin, axis_length=0.1, center_plot=False, line_obj_list=None):\n # draw the camera axes\n x_axis = origin + gRp[:, 0] * axis_length\n linex = np.append(origin, x_axis, axis=0)\n \n y_axis = origin + gRp[:, 1] * axis_length\n liney = np.append(origin, y_axis, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot a 3D pose on given axis 'axes' with given 'axis_length'. | def plot_pose3RT_on_axes(axes, gRp, origin, axis_length=0.1, center_plot=False, line_obj_list=None):
# draw the camera axes
x_axis = origin + gRp[:, 0] * axis_length
linex = np.append(origin, x_axis, axis=0)
y_axis = origin + gRp[:, 1] * axis_length
liney = np.append(origin, y_axis, axis=0)
... | [
"def draw_axes(axes, origin=(-1, -1, -1), length=(2, 2, 2)):\n x, y, z = origin\n dx, dy, dz = length\n axes.plot([x, x+dx], [y, y], [z, z], color='black')\n axes.plot([x, x], [y, y+dy], [z, z], color='black')\n axes.plot([x, x], [y, y], [z, z+dz], color='black')",
"def plot_pose(pose):\n import... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take a file in binary format and a dictionary of hashes and run a fixity check against the first one found that's supported by hashlib. | def download_fixity_checker(resource_dict):
fixity_obj = {
'hash_algorithm': None,
'source_hash': None,
'presqt_hash': None,
'fixity': None,
'fixity_details': None,
'title': resource_dict['title'],
'path': resource_dict['path']
}
fixity_match = True
... | [
"def verify(hash_file, path, exclude=None, relaxed=False, quiet=False):\n is_file = False\n if os.path.isdir(path):\n pass\n elif os.path.isfile(path):\n is_file = True\n else:\n return HashResult.BAD_PATH\n\n # load hashes\n try:\n hash_info = load(hash_file)\n exce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent system activities files into a Pandas DataFrame. Combines data from the latest file in all existing directories and removes duplicates. | def get_all_system_activities(
base_directory: str, nrows: Union[int, None] = None
) -> pd.DataFrame:
files = fr.get_system_activities_files(base_directory)
if files is None or len(files) == 0:
return _default()
df_list = list()
for f in files:
df_list.append(_read_csv(f,... | [
"def df_from_files(self):\n print('Creating dataframe...')\n num = len([name for name in os.listdir(self.raw) if not name[0] == '.'])\n files = os.path.join(self.raw, '~.info.json') # This is a weird hack\n files = files.replace('~', '{:05d}') # It allows path joining to work on Windows\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent section associations file for the given section into a Pandas DataFrame. | def get_section_associations(
base_directory: str, section_id: int, nrows: Union[int, None] = None
) -> pd.DataFrame:
file = fr.get_section_associations_file(base_directory, section_id)
if file is not None:
return _read_csv(file, nrows)
return _default() | [
"def get_all_section_associations(\r\n base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n return _get_data_for_section(\r\n base_directory, sections, get_section_associations, nrows\r\n )",
"def get_section_activities(\r\n base_directory: str, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent section associations files for all given sections into a Pandas DataFrame. | def get_all_section_associations(
base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None
) -> pd.DataFrame:
return _get_data_for_section(
base_directory, sections, get_section_associations, nrows
) | [
"def get_section_associations(\r\n base_directory: str, section_id: int, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n file = fr.get_section_associations_file(base_directory, section_id)\r\n\r\n if file is not None:\r\n return _read_csv(file, nrows)\r\n\r\n return _default()",
"def g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent section activities file for the given section into a Pandas DataFrame. | def get_section_activities(
base_directory: str, section_id: int, nrows: Union[int, None] = None
) -> pd.DataFrame:
file = fr.get_section_activities_file(base_directory, section_id)
if file is not None:
return _read_csv(file, nrows)
return _default() | [
"def get_all_section_activities(\r\n base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n return _get_data_for_section(\r\n base_directory, sections, get_section_activities, nrows\r\n )",
"def get_attendance_events(\r\n base_directory: str, secti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent section activities files for all given sections into a Pandas DataFrame. | def get_all_section_activities(
base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None
) -> pd.DataFrame:
return _get_data_for_section(
base_directory, sections, get_section_activities, nrows
) | [
"def get_section_activities(\r\n base_directory: str, section_id: int, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n file = fr.get_section_activities_file(base_directory, section_id)\r\n\r\n if file is not None:\r\n return _read_csv(file, nrows)\r\n\r\n return _default()",
"def get_a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent assignments file for the given section into a Pandas DataFrame. | def get_assignments(
base_directory: str, section_id: int, nrows: Union[int, None] = None
) -> pd.DataFrame:
file = fr.get_assignments_file(base_directory, section_id)
if file is not None:
return _read_csv(file, nrows)
return _default() | [
"def get_all_assignments(\r\n base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n return _get_data_for_section(base_directory, sections, get_assignments, nrows)",
"def get_submissions(\r\n base_directory: str,\r\n section_id: int,\r\n assignment_id:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent assignments files for all given sections into a Pandas DataFrame. | def get_all_assignments(
base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None
) -> pd.DataFrame:
return _get_data_for_section(base_directory, sections, get_assignments, nrows) | [
"def get_assignments(\r\n base_directory: str, section_id: int, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n file = fr.get_assignments_file(base_directory, section_id)\r\n\r\n if file is not None:\r\n return _read_csv(file, nrows)\r\n\r\n return _default()",
"def get_submissions(\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent submissions file for the given section into a Pandas DataFrame. | def get_submissions(
base_directory: str,
section_id: int,
assignment_id: int,
nrows: Union[int, None] = None,
) -> pd.DataFrame:
file = fr.get_submissions_file(base_directory, section_id, assignment_id)
if file is not None:
return _read_csv(file, nrows)
return _default(... | [
"def load_submission_data(webcat_path, onlyfinal=True):\n\n cols_of_interest = [\n 'userName',\n 'assignment',\n 'submissionNo',\n 'score.correctness',\n 'max.score.correctness',\n 'elements',\n 'elementsCovered',\n 'submissionTimeRaw',\n 'dueDateRaw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent grades file for the given section into a Pandas DataFrame. | def get_grades(
base_directory: str, section_id: int, nrows: Union[int, None] = None
) -> pd.DataFrame:
file = fr.get_grades_file(base_directory, section_id)
if file is not None:
return _read_csv(file, nrows)
return _default() | [
"def get_all_grades(\r\n base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n return _get_data_for_section(base_directory, sections, get_grades, nrows)",
"def _read(self, profile_filename):\n # header=0 because docs say to if using skip rows and column... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent grades files for all given sections into a Pandas DataFrame. | def get_all_grades(
base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None
) -> pd.DataFrame:
return _get_data_for_section(base_directory, sections, get_grades, nrows) | [
"def get_grades(\r\n base_directory: str, section_id: int, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n file = fr.get_grades_file(base_directory, section_id)\r\n\r\n if file is not None:\r\n return _read_csv(file, nrows)\r\n\r\n return _default()",
"def get_all_assignments(\r\n b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent attendance events file for the given section into a Pandas DataFrame. | def get_attendance_events(
base_directory: str, section_id: int, nrows: Union[int, None] = None
) -> pd.DataFrame:
file = fr.get_attendance_events_file(base_directory, section_id)
if file is not None:
return _read_csv(file, nrows)
return _default() | [
"def get_all_attendance_events(\r\n base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n return _get_data_for_section(base_directory, sections, get_attendance_events, nrows)",
"def get_events_from_study_guide(filename):\n\n df = get_df_from_pdf(filename)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the most recent attendance events files for all given sections into a Pandas DataFrame. | def get_all_attendance_events(
base_directory: str, sections: pd.DataFrame, nrows: Union[int, None] = None
) -> pd.DataFrame:
return _get_data_for_section(base_directory, sections, get_attendance_events, nrows) | [
"def get_attendance_events(\r\n base_directory: str, section_id: int, nrows: Union[int, None] = None\r\n) -> pd.DataFrame:\r\n file = fr.get_attendance_events_file(base_directory, section_id)\r\n\r\n if file is not None:\r\n return _read_csv(file, nrows)\r\n\r\n return _default()",
"def get_eve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if answer_id found in answer table. | def valid_answer_id(answer_id):
SQL = """SELECT id FROM answer WHERE id = %s;"""
data = (answer_id,)
fetch = "one"
try:
found_id = db.run_statements(((SQL, data, fetch),))[0][0]
except (DatabaseError, TypeError):
return False
return True | [
"def _is_answer_correct(self, answer_id=int, question_id=int):\n try:\n for question in self.questions_json_list:\n print question\n print \"Q_ID: \" + str(question['id'])\n print \"PASSED Q_ID: \" + str(question_id)\n if question['id'] =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Answer message must be at least 10 characters long to return True, else False. | def valid_answer_message(a_form):
answer_message = len(a_form.get("message", ''))
if answer_message >= 10:
return True
return False | [
"def check_message(message):\n return False if len(message) > 20000 else True",
"def is_length(message):\n\n if len(message) <= 25:\n return True\n else:\n return False",
"def tweetswarm_string_validate(s):\n return s.__len__() < 140 and s.__len__() > 0",
"def check_size(msg):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add 1 to answer counter of question with question_id, or reduce counter by one, depending on operation. | def update_answer_counter(question_id, operation):
number = 1 if operation == "ADD" else 0
if number:
SQL = """UPDATE question SET answer_count = answer_count + 1 WHERE id = %s;"""
else:
SQL = """UPDATE question SET answer_count = answer_count - 1 WHERE id = %s;"""
data = (question_id,)... | [
"def change_vote_count(conn, direction, question_id=None, answer_id=None):\n if question_id:\n table = \"question\"\n the_id = question_id\n elif answer_id:\n table = \"answer\"\n the_id = answer_id\n SQL2 = \"\"\"SELECT question_id FROM answer WHERE id = %s;\"\"\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return new answer with message from form, initialized without image. | def create_new_answer_no_image(message, question_id, user_name):
init_time = helper.create_timestamp()
init_votes = 0
init_image = None
accepted = False
new_answer = [init_time, init_votes, question_id, message, init_image, accepted, user_name]
return new_answer | [
"def to_form(self, message):\n form = GameForm()\n form.urlsafe_key = self.key.urlsafe()\n form.user_one = self.user_one.get().name\n form.user_two = self.user_two.get().name\n form.turn = self.turn.get().name\n form.current_round = self.current_round\n form.game_ove... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rename answer image and question_id as tuple from answer table. | def rename_answer_image(filename, answer_id):
SQL = """UPDATE answer SET image = %s WHERE id = %s;"""
data = (filename, answer_id)
fetch = None
db.run_statements(((SQL, data, fetch),)) | [
"def get_answer_image_and_q_id(answer_id):\n SQL = \"\"\"SELECT image, question_id FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = \"one\"\n\n a_img_and_id = db.run_statements(((SQL, data, fetch),))[0]\n return a_img_and_id",
"def remove_answer_image(answer_id):\n SQL = \"\"\"UP... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return answer image and question_id based on answer_id. | def get_answer_image_and_q_id(answer_id):
SQL = """SELECT image, question_id FROM answer WHERE id = %s;"""
data = (answer_id,)
fetch = "one"
a_img_and_id = db.run_statements(((SQL, data, fetch),))[0]
return a_img_and_id | [
"def get_answer_details(answer_id):\n SQL = \"\"\"SELECT id, submission_time, vote_number, question_id, message, image\n FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = \"one\"\n\n answer = db.run_statements(((SQL, data, fetch),))[0]\n return answer",
"def get_data_imag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whole answer with all details where answer_id found. | def get_answer_details(answer_id):
SQL = """SELECT id, submission_time, vote_number, question_id, message, image
FROM answer WHERE id = %s;"""
data = (answer_id,)
fetch = "one"
answer = db.run_statements(((SQL, data, fetch),))[0]
return answer | [
"def view_answers(self, id):\n answers = self.db\n quiz_and_answers = answers[id-1]\n return quiz_and_answers",
"def get_answer_by_id(answer_id):\n\n return Answer.query.get(answer_id)",
"def get_answer_to_question(question_id):\n return Question.query.filter_by(id=question_id).first_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update answer message in answer tabel where answer_id found. | def update_answer_message(answer_id, message):
SQL = """UPDATE answer SET message = %s WHERE id = %s;"""
data = (message, answer_id)
fetch = None
db.run_statements(((SQL, data, fetch),)) | [
"def reply_update(self, tweet_id):\n data = self.get_update()\n form = self.format_block_data(data)\n first = General().reply(form[0], tweet_id)\n General().reply(form[1], first)",
"def post(self, answerid):\n db = Database()\n votes = db.get_by_argument(\"answers\", \"an... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove answer record and its image, then return its question_id. | def remove_answer_and_get_q_id(answer_id):
image_to_delete, question_id = get_answer_image_and_q_id(answer_id)
if image_to_delete:
try:
os.remove("static/uploads/" + image_to_delete)
except (FileNotFoundError, TypeError):
pass
delete_answer_by_id(answer_id)
retur... | [
"def remove_answer_image(answer_id):\n SQL = \"\"\"UPDATE answer SET image = NULL WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))",
"def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes answer by answer ID. | def delete_answer_by_id(answer_id):
SQL = """DELETE FROM answer WHERE id = %s;"""
data = (answer_id,)
fetch = None
db.run_statements(((SQL, data, fetch),)) | [
"def delete(self, answer_id):\n le_answer = get_an_answer(answer_id)\n if not le_answer:\n return {'success': False, 'msg': 'answer does not exist'}\n else:\n return delete_a_answer(answer_id)",
"def delete_answer_by_id(conn, answer_id):\n SQL = \"\"\"DELETE FROM answ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |