query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Generate a CSRF prevention token. We derive this token as the SHA256 hash of the auth token, which ensures the two are bound together, preventing cookie forcing attacks. Returns a valid CSRF prevention token. | def get_csrf_token(self):
h = hashlib.new('sha256')
h.update(self.__current_authentication_token())
return h.hexdigest() | [
"def generate_csrf_token():\n return binascii.b2a_hex(os.urandom(32))",
"def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = binascii.hexlify(os.urandom(32)).decode()\n return session['_csrf_token']",
"def generate_csrf_token() -> int:\r\n ...",
"def csr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the HTTP headers required to log the user in. Specifically, set the auth cookie, the csrf token cookie, and an unsecured cookie logged_in=true, indicating the user is logged in even if the current request context doesn't have the auth cookies. The server should redirect users with the loggedin cookie to the HTTP... | def login_headers(self):
auth_token = self.regenerate_authentication_token()
csrf_token = self.get_csrf_token()
# Set the secure flag on the cookie if the login occurred over HTTPS.
secure = ''
if 'HTTPS' in os.environ:
secure = ' secure;'
return ('Set-Cookie:... | [
"def logout_headers(self):\n self.regenerate_authentication_token()\n return ('Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\n'\n 'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\\n'\n 'Set-Cookie: %s=; path=/; expires=Thu,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the HTTP headers required to log the user out. Specifically, delete and invalidate the auth token and CSRF token. | def logout_headers(self):
self.regenerate_authentication_token()
return ('Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\n'
'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\n'
'Set-Cookie: %s=; path=/; expires=Thu, 01 Jan 197... | [
"def forget(self, environ, identity):\n challenge = \"MAC+BrowserID url=\\\"%s\\\"\" % (self.token_url,)\n return [(\"WWW-Authenticate\", challenge)]",
"def logout(request):\n token = request._auth\n token.delete()\n return Response({'detail': 'Logout successful, Token succesfully deleted'}... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the current authentication token if it still valid, else None. | def __current_authentication_token(self):
if os.path.isfile(self.token_filename):
with open(self.token_filename, 'r') as f:
(stored_token, expires) = f.read().split(' ')
t = time.time()
if int(expires) > t:
return stored_token
return No... | [
"def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true iff candidate authentication token matches stored one. | def is_authentication_token(self, candidate):
current_token = self.__current_authentication_token()
# TODO: Add expiry checking
if (current_token and
self.__valid_token_format(current_token) and
self.__valid_token_format(candidate) and
constant_tim... | [
"def is_token_auth(self):\n return self.is_valid() and bool(self.user_token)",
"def check_auth(self, token, allowed_roles, resource, method):\n return token and self.redis.get(token)",
"def check_token(self, user, token):\n try:\n data = signing.loads(token, max_age=properties.TO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and store a new random authentication token. Expires old sessions. | def regenerate_authentication_token(self):
new_token = os.urandom(self.TOKEN_LENGTH).encode('hex')
expires = int(time.time()) + Auth.SESSION_DURATION
self.write(self.token_filename, ('%s %d' % (new_token, expires)))
return new_token | [
"def generate_new_token(self):\n self.access_token = random_auth_key()",
"def generate_expired_auth_token(self):\n token = self.generate_auth_token()\n token.created = timezone.now()-timezone.timedelta(hours=25)\n token.save()\n return token",
"def generate_new_token(self, exp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In the context of a CGI request, check whether an authentication cookie is present and valid. If not, render an error. | def check_authentication(self):
try:
cookies = os.environ['HTTP_COOKIE'].split('; ')
except KeyError:
cookies = []
for c in cookies:
prefix = Auth.AUTH_COOKIE_NAME + '='
if (c.startswith(prefix) and
self.is_authentication_token(... | [
"def _auth_check():\n try:\n if not _is_authenticated():\n raise web.unauthorized()\n except:\n raise web.unauthorized()",
"def check_authentication(self, request):\n if not self.request.user.is_authenticated:\n raise NotAuthenticated()",
"def auth(request) -> bo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the request() wrapper passes along expected headers | def test_request_headers(mock_send, mock_format):
ClientSession().request('GET', 'https://url', access_token='token')
request_obj = mock_send.call_args[0][0]
assert request_obj.headers['Authorization'] == 'Bearer token' | [
"def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper to make async calls using gevent, concurrent not parallel | def asynchronous(urls, batch_size, delay=0, verbose=False):
try:
count=1
threads=[]
print(urls.strip(' ').split(","))
for url in urls.strip(" '").split(","):
print('On batch {}'.format(count))
threads.append(gevent.spawn(fetch, url, verbose))
responses... | [
"def async(func):\n func.async = True\n return func",
"def sync(async_func):\n\n def wrapFunc(self: User, *args, **kwargs):\n futures = []\n\n for sub_user in self.sub_users:\n futures.append(asyncio.run_coroutine_threadsafe(async_func(sub_user, *args, **kwargs), loop))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main function to call This function should obtain results from generators and plot image and image intensity Create a for loop to iterate the generator functions | def display_images():
vc = cv2.VideoCapture(0) # Open webcam
figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot
count = 0 # Counter for number of aquired frames
intensity = [] # Append intensity across time
# For loop over generator here
intensity.append(imageintensity)
... | [
"def main():\n run_generator()",
"def main():\r\n nColSize, nRowSize, lstData, lstGraphLabels = pipe.retrieveData()\r\n aaData = _processData(nColSize, nRowSize, lstData) \r\n _saveImage(aaData, lstGraphLabels)\r\n pipe.updateStatus()",
"def main():\n \n # for inserting other images, add tem t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function plots image and intensity of image through time | def plot_image_and_brightness(axis, image, imageintensity, framecount):
# Plot RGB Image
axis[0].imshow(image)
axis[0].axis('off')
axis[0].set_title(f'Frame Number {framecount}')
# Plot intensity
axis[1].plot(imageintensity, '.-')
axis[1].set_ylabel('Average Intensity')
# Stuff to sho... | [
"def plot_it(image_array: np.array) -> None:\n\tplt.rcParams[\"figure.figsize\"] = (20, 15)\n\n\tfig, ax = plt.subplots(1)\n\tax.imshow(image_array)\n\t# plt.savefig(\"./image_\"+str(image_counter)+\".png\")\n\tplt.show()\n\treturn",
"def plotLightCurves(self, im_array, results_arr, image_times):\n\n\n t0_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds new reactions and metabolites to iEK1008.json while performing continuous testing | def main():
run_test_suite('../models/iEK1008.json') # runs test suite with iEK1008.json
# rewrites iEK1008.json to iMtb_H37Rv.json so original model is not overwritten
model_iek = cobra.io.load_json_model('../models/iEK1008.json')
cobra.io.save_json_model(model_iek, '../models/iMtb_H37Rv.json')
m... | [
"def test_make_new_reaction(self):\n\n procnum = 2\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a cause of death index to a humanreadable string. | def cause_of_death_index_to_string(index: int) -> str:
if index == CauseOfDeath.STARVATION.value:
return "Starvation"
elif index == CauseOfDeath.DEHYDRATION.value:
return "Dehydration"
elif index == CauseOfDeath.EATEN.value:
return "Eaten"
else:
raise ValueError("Did not recognize CauseOfDea... | [
"def _index_to_unicode(cls, index: int) -> str:\n return \"\".join(cls._unicode_subscripts[int(_)] for _ in str(index))",
"def _make_not_found_message(index: Union[int, slice, str]) -> str:\n msg = [f\"Analysis result {index} not found.\"]\n errors = self.errors()\n if erro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates and returns a grouped bar chart with the death causes. The lists are expected to feature an entry for each cause of death, where the value corresponds to how many specimens died of that cause. | def create_grouped_bar_chart(stats: dict[str, list[int]]):
figure, axes = plot.subplots()
labels = [str(e) for e in CauseOfDeath]
x = numpy.arange(len(labels))
bar_width = 0.15
max_value = 0
rects = []
i = 0
for label, values in stats.items():
max_value = max(max_value, max(values))
rects.ap... | [
"def visualise_cause_of_death(data: LogData, directory: Path):\n\n rabbit_stats: list[int] = [0 for _ in CauseOfDeath]\n deer_stats: list[int] = [0 for _ in CauseOfDeath]\n wolf_stats: list[int] = [0 for _ in CauseOfDeath]\n bear_stats: list[int] = [0 for _ in CauseOfDeath]\n\n for event in data.events():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produces a grouped bar chart of the different causes of deaths, arranged by the animal types. | def visualise_cause_of_death(data: LogData, directory: Path):
rabbit_stats: list[int] = [0 for _ in CauseOfDeath]
deer_stats: list[int] = [0 for _ in CauseOfDeath]
wolf_stats: list[int] = [0 for _ in CauseOfDeath]
bear_stats: list[int] = [0 for _ in CauseOfDeath]
for event in data.events():
event_type: ... | [
"def create_grouped_bar_chart(stats: dict[str, list[int]]):\n\n figure, axes = plot.subplots()\n\n labels = [str(e) for e in CauseOfDeath]\n x = numpy.arange(len(labels))\n\n bar_width = 0.15\n max_value = 0\n\n rects = []\n i = 0\n for label, values in stats.items():\n max_value = max(max_value, max(val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a new OperatorBuilder object. atomicTermExpr i an TorqExpression object. A sequence which matches it will be recognized an atomic item and not be parsed further. composedTermNodeLables is a list of labels. Nodes who have one of them will be recognized an expression, which may include another expression inside... | def __init__(self,
atomicTermExpr=None,
composedTermNodeLabels=None,
generatedTermLabel=None):
self.__ate = atomicTermExpr if atomicTermExpr is not None else Never()
self.__ctnls = composedTermNodeLabels
self.__gtl = generatedTermLabel | [
"def __compile_term(self, xml_tree):\n tk = self.__tokenizer\n\n # unaryOp term\n if tk.get_next_token() in UNARY_OP_LIST:\n # unaryOp\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # term\n self.__c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively iterate over issue dictionary and print errors. | def _print_issue(issue, ntabs):
for key, value in issue.items():
if isinstance(value, dict):
tabs = TAB*ntabs
print('%s%s (section):' % (tabs, key))
ntabs += 1
print_issue(value, ntabs=ntabs)
elif isinstance(value, bool):
if value == False:... | [
"def printErrors(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n print self.ex\n print \"input dict:\", self.ddct\n for ln, v in enumerate(self.template.split('\\n')):\n print \"%2d:\" % (ln+2), v",
"def printSemanticErrors(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
POST /validate Validate GeoJSON data in POST body | def validate(request):
testing = request.GET.get('testing')
if request.method == 'POST':
stringy_json = request.raw_post_data
else: # GET
try:
remote_url = request.GET['url']
stringy_json = get_remote_json(remote_url)
except KeyError: # The "url" URL param... | [
"def verify_geojson(data):\n \"\"\"Enforces camelcasing of properties\"\"\"\n if 'id' in data:\n del data['id']\n try:\n data['type'] = data['type'] if 'type' in data else \"Feature\"\n data['geometry'] = data['geometry'] if 'geometry' in data else None\n if 'properties' not in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random software license. | def software_license(self) -> str:
return self.random.choice(LICENSES) | [
"async def get_license(self) -> APIReturn:\n return await self._request(\"GET\", \"/getLicense\")",
"def get_license():\n repo_fs()\n return LICENSE",
"def license(self): # noqa: A003\n logger.debug(\"Get license\")\n return self._raw_api.license.get()",
"def license_plate(self) ->... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random programming language from the list. | def programming_language(self) -> str:
return self.random.choice(PROGRAMMING_LANGS) | [
"def language():\n return random.choice(get_dictionary('languages')).strip()",
"def language():\r\n\r\n cursor.execute('SELECT name from languages order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def choose_language(self):\n\n current_dir = os.curdir\n path = os.path.join(c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random operating system or distributive name. | def os(self) -> str:
return self.random.choice(OS) | [
"def get_user_friendly_os_name() -> str:\n systems = {\n \"Windows\": \"Windows\",\n \"Linux\" : \"Linux\",\n \"Linux2\" : \"Linux\",\n \"Darwin\" : \"MacOS\",\n \"FreeBSD\": \"FreeBSD\",\n \"OpenBSD\": \"OpenBSD\",\n \"NetBSD\" : ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random system quality attribute. Within systems engineering, quality attributes are realized nonfunctional requirements used to evaluate the performance of a system. These are sometimes named "ilities" after the suffix many of the words share. | def system_quality_attribute(self) -> str:
return self.random.choice(SYSTEM_QUALITY_ATTRIBUTES) | [
"def ility(self) -> str:\n return self.system_quality_attribute()",
"def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random system quality attribute. An alias for system_quality_attribute(). | def ility(self) -> str:
return self.system_quality_attribute() | [
"def system_quality_attribute(self) -> str:\n return self.random.choice(SYSTEM_QUALITY_ATTRIBUTES)",
"def genQuality(self):\n return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)",
"def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fit scaler and transform input data Winsorise `X` at `quantile` and `1quantile`. Scale each variable (as long as they aren't binary in which case they are already rules). | def fit_transform(self, X, y=None):
self.scale = np.ones(X.shape[1])
self.lower = np.percentile(X, self.quantile*100, axis=0)
self.upper = np.percentile(X, (1-self.quantile)*100, axis=0)
# Winsorize at `self.quantile`
winX = X.copy()
is_lower = (winX < self.lower... | [
"def quantile_transform(X, *, axis=..., n_quantiles=..., output_distribution=..., ignore_implicit_zeros=..., subsample=..., random_state=..., copy=...):\n ...",
"def fit(self, x, y=None):\n x = pd.DataFrame(x)\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n iqr = q3 - q1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform data into modified features (before being passed to penalised regression step). If `linear_features=True` then this will be scaled linear features followed by the onehotencoding signifying which rules are "on". Otherwise this is just the onehotencoding signifying which rules are "on". | def transform(self, X, y=None):
if isinstance(X, DataFrame):
is_df = True # Serves no purpose
X = check_array(X) # Validate input data
X = self.ext_scaler.transform(X) # Scale and centre features
if self.linear_features:
X_scale = sel... | [
"def rescale_trainx(dftrainx):\n indices = dftrainx.index\n cols = dftrainx.columns\n trainx = dftrainx.values\n\n # trainx[:,:10] # <---- Non-binary columns\n # trainx[:,10:] # <---- Binary columns\n trainfloats = trainx[:,:10].astype(float)\n trainbinaries = trainx[:,10:]\n\n # Rescale all... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract rule set from single decision tree according to `XGBClassifier` format | def __extract_xgb_dt_rules__(self, dt):
md = self.max_depth + 1 # upper limit of max_depth?
rules = []
levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id)
path = []
# Extract feature numbers and thresholds for all nodes
feat_thresh_l = re.find... | [
"def decision_tree_xgb():\n params = {\"max_depth\":6, \"eta\":0.05, \"nthread\":8, \"learning_rate\":0.05, \"objective\":\"binary:logistic\", \"eval_metric\":\"auc\"}\n cv_fold = None\n # 设置early_stopping_rounds=10,当logloss在10轮迭代之内,都没有提升的话,就stop。如果说eval_metric有很多个指标,那就以最后一个指标为准。经验上,选择early_stopping_rounds... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract rule set from single decision tree according to sklearn binarytree format | def __extract_dt_rules__(self, dt):
t = dt.tree_ # Get tree object
rules = []
stack = [(0, -1, -1)] # (node id, parent depth, true[<=thresh]/false[>thresh] arm)
path = [(0, -1, -1)] # Begin path at root
while len(stack) > 0: # While nodes to visit is not empty
n... | [
"def __extract_xgb_dt_rules__(self, dt): \n md = self.max_depth + 1 # upper limit of max_depth?\n rules = []\n levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id)\n path = []\n\n # Extract feature numbers and thresholds for all nodes\n feat_thresh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract rules from `base_estimator` | def extract_rules(self, labels=None):
# Extract flat list of rules in array form
if isinstance(self.base_estimator, RandomForestClassifier):
rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))
elif isinstance(self.base_estimator, Gradien... | [
"def extract_estimators(wml_pipeline_config: Dict, training_details: Dict) -> Dict:\n estimators = {}\n parameters = wml_pipeline_config['entity']['document']['pipelines'][0]['nodes'][0]['parameters']\n scorer_for_ranking = parameters['optimization']['scorer_for_ranking']\n stages = training_details['en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the index of the section, or pseudosection, for the symbol. the index of the section, or pseudosection, for the symbol | def getSectionIndex(self) -> int:
... | [
"def symbol_to_index(self, symbol):\n for i, s in enumerate(self.symbols):\n if s == symbol:\n return i\n return -1",
"def index(self, sym):\r\n assert isinstance(sym, str)\r\n if sym in self.indices:\r\n return self.indices[sym]\r\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read logfile with the profiles written | def read_log(prefix):
l = []
with open('%s.log' % prefix) as F:
for line in F:
if 'profile written' not in line:
continue
else:
l.append(line.split()[0])
return l | [
"def read_logs(self):\n\n self.history = MesaData(self.history_path)\n self.history_data = self.history\n self.profiles = MesaProfileIndex(self.index_path)\n self.profile_numbers = self.profiles.profile_numbers\n self.model_numbers = self.profiles.model_numbers\n self.profi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute line with subprocess | def executeLine(line):
pl = Popen(line, shell=True, stderr=PIPE, stdout=PIPE)
o, e = pl.communicate()
return o, e | [
"def do(self, line): \n self.interface.onecmd(line)",
"def Run(command_line):\n print >> sys.stderr, command_line\n return subprocess.check_output(command_line, shell=True)",
"def call_command_line(string, **kwargs):\n return subprocess.run(string.split(\" \"), **kwargs)",
"def execute(cmd)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a bim/fam files from the plink fileset | def read_BimFam(prefix):
Bnames = ['CHR', 'SNP', 'cM', 'BP', 'A1', 'A2']
bim = pd.read_table('%s.bim' % (prefix), delim_whitespace=True, header=None,
names=Bnames)
Fnames = ['FID', 'IID', 'father', 'mother', 'Sex', 'Phenotype']
fam = pd.read_table('%s.fam' % (prefix), delim_white... | [
"def test_plink_bed_file_reading():\n rel_path = '/'.join(('test_resources', 'subset_of_exposure_cohort'))\n\n if len(__file__.split(\"/\")) > 1:\n plink_loc = \"{}/{}\".format(\"/\".join(__file__.split(\"/\")[:-1]), rel_path)\n else:\n plink_loc = rel_path\n\n plinkio = simulate_mr.read_g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate and read frequency files and filter based on threshold | def read_freq(bfile, plinkexe, freq_threshold=0.1, maxmem=1700, threads=1):
high = 1 - freq_threshold
low = freq_threshold
if not os.path.isfile('%s.frq.gz' % bfile):
nname = os.path.split(bfile)[-1]
frq = ('%s --bfile %s --freq gz --keep-allele-order --out %s --memory '
'%d -... | [
"def filter_by_freq(tsv_name):\r\n #open the tsv to be filtered\r\n tsv = open(tsv_name, 'r')\r\n #create names for the new tsvs by adding _u1 and _u5 to the end of the original tsv name\r\n filtered_5_tsv_name = 'u5.tsv'\r\n #tsv_name.split('.')[0] + '_u5.tsv'\r\n filtered_1_tsv_name = 'u1.tsv'\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse and sort clumped file | def parse_sort_clump(fn, allsnps):
# make sure allsnps is a series
allsnps = pd.Series(allsnps)
try:
df = pd.read_table(fn, delim_whitespace=True)
except FileNotFoundError:
spl = fn.split('.')
if spl[0] == '':
idx = 1
else:
idx = 0
fn = '.'... | [
"def process_file(f):\n with open(f, \"r\") as fp:\n lines = (l.split(\"|\") for l in fp)\n return sorted(dedup(lines), key=lambda x: -float(x[1]))",
"def sort_file(filename):\n\n data = []\n with open(filename, 'r') as file_object:\n line = file_object.readline()\n while line... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate qrange file to be used with plink qrange | def gen_qrange(prefix, nsnps, prunestep, every=False, qrangefn=None):
order = ['label', 'Min', 'Max']
# dtype = {'label': object, 'Min': float, 'Max': float}
if qrangefn is None:
# Define the number of snps per percentage point and generate the range
percentages = set_first_step(nsnps, prune... | [
"def _generate_qubits(self):\n return cq.LineQubit.range(4)",
"def process_qrange_file(filename):\n\n f = open(filename, 'r')\n q_ranges = yaml.load(f)\n\n return q_ranges",
"def write_q_table_file(q_table, q_file=\"Q_Table.txt\"):\n file = open(q_file, \"w+\")\n rows = len(q_table)\n c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to paralellize score_qfiles | def single_score_plink(prefix, qr, tup, plinkexe, gwasfn, qrange, frac_snps,
maxmem, threads):
qfile, phenofile, bfile = tup
suf = qfile[qfile.find('_') + 1: qfile.rfind('.')]
ou = '%s_%s' % (prefix, suf)
# score = ('%s --bfile %s --score %s 2 4 7 header --q-score-range %s %s '
# ... | [
"def multi_threaded(dataset, numElems, numQs, qType, filename): \n start = time.time()\n \n # 1. \n q = Queue()\n processes = [optimization_task(i, dataset, numElems, numQs, qType, q) for i in range(4)]\n [p.set_split(split) for p, split in zip(processes, ['lin', 'qdrt', 'rstar', 'bulk'])]\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of strings of METAR meteorological data for the specified station on sthe specified date. | def get_met_data(self, stn, ignore_errors, retries, **kwargs):
# Validate the common station name and convert it to the
# corresponding official station ID
try:
stn = self.stns[stn]
except:
raise UnknownStationError, stn
# Process the date components in th... | [
"def get_meteorological_month_data(\n year_str: int, month_str: int, station_id: Optional[str] = \"07149\", freq_min: Optional[int] = 30\n) -> pd.DataFrame:\n month_str_reformat = \"{:02d}\".format(month_str)\n df = pd.read_csv(METEO_FRANCE_URL.format(year_str, month_str_reformat), sep=\";\", dtype=str)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the METAR data for the specified station and date range. | def metar_data(station, begin, end, ignore_errors, retries):
def _parse_date(date_str):
"""Minimal date parser."""
yr, mo, day = [int(x) for x in date_str.split('-')]
try:
return date(yr, mo, day)
except ValueError:
raise InvalidDateError, begin
... | [
"def get_met_data(self, stn, ignore_errors, retries, **kwargs):\n # Validate the common station name and convert it to the\n # corresponding official station ID\n try:\n stn = self.stns[stn]\n except:\n raise UnknownStationError, stn\n # Process the date comp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add trick to the dog This function illustrate mistaken use of mutable class variable tricks (see below). | def add_trick(self, trick):
self.tricks.append(trick) | [
"def test_varAugmentedAssignment(self):\r\n self.flakes('''\r\n foo = 0\r\n foo += 1\r\n ''')",
"def test_augmented_assignment():\n c = Circle(2)\n c += 4\n assert c.radius == 6\n\n c *= 2\n assert c.radius == 12\n\n c **= 2\n assert c.radius == 144\n\n c -= 6\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method register the book in the books table, but before checks if the books is already registered. I decided to use the barcode in data string, because I can use the both bar code parameters. And the stock is defined in 0, because if the user doesn't pass the stock, the quantity is already set to 0 | def register_book(self, title: str, author: str, price: float, barcode: str, stock=0):
try:
if not self.verify_register(barcode):
self.db.cursor.execute('INSERT INTO books (title, author, price, bar_code, stock) VALUES (%s, %s, %s, '
'%s, %s)', ... | [
"def add_to_database(self):\r\n url = root_url + \"/book/b/\"\r\n barcode = self.add_barcode()\r\n book_title = input(\"Please enter book title: \")\r\n book_author = input(\"Please enter book author: \")\r\n book_published = input(\"Please enter book publish date: \")\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method update the price of the books, by the barcode. | def update_price_books(self, barcode, new_price):
try:
self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))
except Exception as error:
print(error)
else:
self.db.con.commit()
self.db.con.close()
... | [
"def update(self, current_price):\n for s in self.book:\n s.update(current_price)",
"def update_price(appointment):\n total = ItemInBill.objects.filter(appointment=appointment).aggregate(sum=Sum('price'))['sum']\n appointment.price = total\n appointment.save()",
"def __upd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method deleted books already registered in the database, by the barcode. | def delete_book(self, barcode):
try:
self.db.cursor.execute('DELETE FROM books where id_books = %s', (barcode,))
except Exception as error:
print(error)
else:
self.db.con.commit()
self.db.con.close()
print('Deleted Successfully!') | [
"def delete_book(self, book):\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('DELETE FROM books WHERE rowid = ?', (book.id, ))\n if not cur.rowcount:\n raise BookError('Tried to delete book tha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method return the specifications of the books, consulting the database by barcode | def consult_books(self, bar_code: str):
try:
book_data = []
self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))
for i in self.db.cursor.fetchall():
book_data.append(i)
except Exception as error:
print(error)
... | [
"def find_book(code: str) -> Dict:\n pass",
"def get_all_books() -> List[Dict]:\n pass",
"def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method checks if the books is already registered in the database, by barcode. | def verify_register(self, barcode: str):
try:
test = []
self.db.cursor.execute(f'SELECT * FROM books where bar_code = {barcode}')
for i in self.db.cursor.fetchall():
test.append(i)
except Exception as error:
print(error)
else:
... | [
"def check_if_isbn_present(isbn):\r\n for obj in book.book_list:\r\n if obj.isbn == isbn:\r\n print('Book with same ISBN exists, cannot add this book')\r\n return False\r\n return True",
"def register_book(self, title: str, author: str, price: float, barcode:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method to generate a nonce to send to the validation server. As specified by the protocol, the nonce must be between 16 and 40 alphanumeric characters long with random unique data. | def generate_nonce():
return uuid4().hex | [
"def generate_nonce():\n return int(time.time() + 100)",
"def nonce(self) -> int:\n return int(codecs.encode(self._nonce[::-1], \"hex\"), 16)",
"def _nonce(self):\n return str(int(round(time.time() * 10000)))",
"def nonce_length(self):\n return 5, 50",
"def _get_nonce(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that the response is a valid response to our request that is, the otp that was returned is the otp we sent originally, that the nonce that was sent was the nonce we had originally, and that the signature (if C{self.api_key} is not C{None}) is valid | def _verify_response(self, text_response, orig_otp, orig_nonce):
response_dict = dict([line.strip(' ').split('=', 1) for line in
re.split(r'\r\n', text_response)
if line.strip()])
if 'otp' in response_dict and response_dict['otp'] != orig_otp:... | [
"def validate_response(self, response):\n pass",
"def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False",
"def verify_ocsp_response(self) -> \"XmlSignature\":\n try:\n self.get_ocsp_response().verify(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verifies an OTP against the validation servers provided to the verifier. It queries all servers in parallel and waits for answers. Servers will not respond positively until it has synchronized the new OTP counter with the other servers, and this will wait until it has received one valid (200, otp and nonce match, and s... | def verify(self, otp, timestamp=None, sl=None, timeout=None):
query_dict = {
'id': self.verifier_id,
'otp': otp,
'nonce': self.generate_nonce()
}
if timestamp is not None:
query_dict['timestamp'] = int(bool(timestamp))
if sl is not None:
... | [
"def verify_otp(request: Request, body: VerifyOTPIn, db: Session = Depends(get_db)):\n mgr = LoginManager()\n mgr.verify_otp(db, body.identifier, body.code)\n request.session[\"access_token\"] = secrets.token_hex(16)\n return {\"status\": \"OK\"}",
"def validate_yubikey_otp(self, params):\n from_ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Makes a gif using a list of images. | def make_gif(image_list, gif_name):
if not gif_name.endswith(".gif"):
gif_name += ".gif"
imageio.mimsave(gif_name, [imageio.imread(x) for x in image_list]) | [
"def create_gif(image_list, gif_name, duration=1):\n\n frames = []\n # 把图片 append 进列表\n for image_name in image_list:\n frames.append(imageio.imread(image_name))\n # 保存为 gif 图\n imageio.mimsave(gif_name, frames, 'GIF', duration=duration)\n\n return",
"def create_gif(images, path):\n wi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper function for starting a net.Server connected to `pipe` | async def net_server(pipe):
server = await net.Server(pipe, host="0.0.0.0", port=8080)
return await server.wait_closed() | [
"def net_proc(pipe):\n asyncio.run(net_server(pipe))",
"def new_server(self, name, pipeline, port=None):\n if port is None:\n port = self.next_port\n self.next_port += 1\n\n self.servers[name] = port\n\n args = [\"owl-server\",\"--port\", str(port)] + pipeline.split()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wrapper for running net_server on its own thread/process | def net_proc(pipe):
asyncio.run(net_server(pipe)) | [
"async def net_server(pipe):\n server = await net.Server(pipe, host=\"0.0.0.0\", port=8080)\n return await server.wait_closed()",
"def run_server(server, thread=False, port=8080):\n if server is None:\n server = HTTPServer(('localhost', port), SimpleHandler)\n if thread:\n th = ThreadSer... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dynamic import of CVXOPT dense interface. | def get_cvxopt_dense_intf():
import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi
return dmi.DenseMatrixInterface() | [
"def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()",
"def dense2cvxopt(value):\n import cvxopt\n return cvxopt.matrix(value, tc='d')",
"def from_dense(cls, dense: Float[Array, \"N N\"]) -> \"LinearOperator\":\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dynamic import of CVXOPT sparse interface. | def get_cvxopt_sparse_intf():
import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi
return smi.SparseMatrixInterface() | [
"def test_import_values_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparse').toarray())",
"def ipu_sparse_ops(scope=\"session\"):\n build_path = Path(\n public_examples_dir,\n \"app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a SciPy sparse matrix to a CVXOPT sparse matrix. | def sparse2cvxopt(value):
import cvxopt
if isinstance(value, (np.ndarray, np.matrix)):
return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')
# Convert scipy sparse matrices to coo form first.
elif sp.issparse(value):
value = value.tocoo()
return cvxopt.spmatrix(val... | [
"def convert_sparse_matrix(x):\n if scipy.sparse.issparse(x):\n return x.toarray()\n else:\n return x",
"def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)",
"def as_sparse_matrix(matrix):\n irows, ico... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a NumPy matrix to a CVXOPT matrix. | def dense2cvxopt(value):
import cvxopt
return cvxopt.matrix(value, tc='d') | [
"def sparse2cvxopt(value):\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n # Convert scipy sparse matrices to coo form first.\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Is the constant a sparse matrix? | def is_sparse(constant) -> bool:
return sp.issparse(constant) | [
"def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))",
"def is_sparse(tensor):\n return isinstance(tensor, sparse_tensor.SparseTensor)",
"def isSparse(self):\n return self.grid_att... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a matrix is Hermitian and/or symmetric. | def is_hermitian(constant) -> bool:
complex_type = np.iscomplexobj(constant)
if complex_type:
# TODO catch complex symmetric but not Hermitian?
is_symm = False
if sp.issparse(constant):
is_herm = is_sparse_symmetric(constant, complex=True)
else:
is_herm = ... | [
"def is_symmetric(self):\n if self.is_square():\n return self == self.transpose()\n else:\n return False",
"def is_symmetric(mat):\n return np.allclose(mat.T, mat)",
"def isSymmetrical(self):\r\n if self.lam == 'SYM':\r\n return True\r\n return Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Walks through the full state trie, yielding one missing node hash/prefix at a time. The yielded node info is wrapped in a TrackedRequest. The hash is marked as active until it is explicitly marked for review again. The hash/prefix will be marked for review asking a peer for the data. Will exit when all known node hashe... | async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:
# For each account, when we have asked for all known storage and bytecode
# hashes, but some are still not present, we "pause" the account so we can look
# for neighboring nodes.
# This is a list of paused account... | [
"async def _request_tracking_trie_items(\n self,\n request_tracker: TrieNodeRequestTracker,\n root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:\n if self._next_trie_root_hash is None:\n # We haven't started beam syncing, so don't know which root ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Walk through the supplied trie, yielding the request tracker and node request for any missing trie nodes. | async def _request_tracking_trie_items(
self,
request_tracker: TrieNodeRequestTracker,
root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:
if self._next_trie_root_hash is None:
# We haven't started beam syncing, so don't know which root to start a... | [
"async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked for all known storage and bytecode\n # hashes, but some are still not present, we \"pause\" the account so we can look\n # for neighboring nodes.\n # This is a list of pau... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Walks through the storage trie at the given root, yielding one missing storage node hash/prefix at a time. The yielded node info is wrapped in a ``TrackedRequest``. The hash is marked as active until it is explicitly marked for review again. The hash/prefix will be marked for review asking a peer for the data. Will exi... | async def _missing_storage_hashes(
self,
address_hash_nibbles: Nibbles,
storage_root: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
if storage_root == BLANK_NODE_HASH:
# Nothing to do if the storage has an empty root
... | [
"async def _request_tracking_trie_items(\n self,\n request_tracker: TrieNodeRequestTracker,\n root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:\n if self._next_trie_root_hash is None:\n # We haven't started beam syncing, so don't know which root ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if this bytecode is missing. If so, yield it and then exit. If not, then exit immediately. This may seem like overkill, and it is right now. But... Code merkelization is coming (theoretically), and the other account and storage trie iterators work similarly to this, so in some ways it's easier to do this "overge... | async def _missing_bytecode_hashes(
self,
address_hash_nibbles: Nibbles,
code_hash: Hash32,
starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:
if code_hash == EMPTY_SHA3:
# Nothing to do if the bytecode is for the empty hash
ret... | [
"def disable_bytecode_generation():\n sentinel, sys.dont_write_bytecode = sys.dont_write_bytecode, True\n\n try:\n yield\n finally:\n sys.dont_write_bytecode = sentinel",
"async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Estimate the completed fraction of the trie that is contiguous with the current index (which rotates every 32 blocks) It will be probably be quite noticeable that it will get "stuck" when downloading a lot of storage, because we'll have to blow it up to more than a percentage to see any significant change within 32 blo... | def _contiguous_accounts_complete_fraction(self) -> float:
starting_index = bytes_to_nibbles(self._next_trie_root_hash)
unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes
if len(unknown_prefixes) == 0:
return 1
# find the nearest unknown prefix (typicall... | [
"def fraction_completed(self):\n return sum(self._chunk_done.values()) / len(self.chunks)",
"def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the Trie Fog that can be searched, ignoring any nodes that are currently being requested. | def _get_eligible_fog(self) -> fog.HexaryTrieFog:
return self._trie_fog.mark_all_complete(self._active_prefixes) | [
"def _build_feature_trie(self):\n feature_trie = Trie(\n default=lambda: (False, set(), set()),\n path_constructor=RelationshipPath,\n )\n\n for f in self.target_features:\n self._add_feature_to_trie(feature_trie, f, self.approximate_feature_trie)\n\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return title + episode (if series) | def inclusive_title(self):
return self.title + (" %s" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else "") | [
"def __get_episode_name(self):\n episode_url = '%s%s' % (url_ep, self.show_id)\n log.debug('Episode URL: %s' % episode_url)\n try: data = urllib2.urlopen(episode_url).read()\n except urllib2.URLError: raise EpisodeNotFoundException(log.name, self.show)\n dom = ET.fromstring(data)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns dataframe with mean profit per cluster basing on a df given as an argument | def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:
return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index() | [
"def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_clus... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Basing on a dataframe given as an argument, returns mean profit per class (buy, sell) in training dataset. sort dataframe descending by profit marks 1/3 of clusters with the highest profit as buy marks 1/3 of clusters with the lowest profit as sell if data contains less than 3 different clusters returns AssertionError | def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:
# if condition returns False, AssertionError is raised:
assert len(df_profit_per_cluster_train) >= 3, "Algorithm, returned less than 3 clusters."
df_profit_per_cluster = df_profit_per_cluster_train.s... | [
"def get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test: pd.core.frame.DataFrame,\n buy_clusters_list: List[int], sell_clusters_list: List[int]) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(buy_clusters_list) != 0 and... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Basing on a dataframe given as an argument, and list of buy and sell clusters returns mean profit per class (buy, sell) in testing dataset. | def get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test: pd.core.frame.DataFrame,
buy_clusters_list: List[int], sell_clusters_list: List[int]) -> tuple:
# if condition returns False, AssertionError is raised:
assert len(buy_clusters_list) != 0 and len(sel... | [
"def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_clus... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates strategy which can be used in testing part of the script. reads preprocessed split into training and testing sets data train som model calculates mean profit per cluster in training and testing dataset gets mean profits | def create_strategy(filename: str, columns_list: List[str], som_width: int, som_height: int, n_iter: int, sigma=0.3,
learning_rate=0.01) -> tuple:
# get prepared data
df, df_prepared, df_train, df_test, df_train_columns = get_data(filename, columns_list)
# train som
final_df_train, ... | [
"def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the bzip2 package. | def __init__(self, system):
super(Bzip2106, self).__init__("bzip2-1.0.6", system, "bzip2-1.0.6.tar.gz") | [
"def __init__(__self__, *,\n type: str):\n pulumi.set(__self__, \"type\", 'BZip2')",
"def list_bzip2 (archive, compression, cmd, verbosity, interactive):\n return stripext(cmd, archive, verbosity)",
"def __init__(self, zipfile, entry=...) -> None:\n ...",
"def __init__(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to check for blacklisted tokens | def check_blacklisted_token(token):
token = models.TokenBlackList.query.filter_by(token=token).first()
if token:
return True
return False | [
"def token_check_blacklist(decrypted_token):\n jti = decrypted_token[\"jti\"]\n return RevokedTokenModel.is_jti_blacklisted(jti)",
"def is_blacklisted(token):\n try:\n cur = conn.cursor()\n cur.execute(\n \"SELECT * FROM blacklist_tokens WHERE token = %s;\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the anticipated host switch name for the logical switch respresented by and store it in caller's . If an existing name is present, use it. | def _preprocess_resolve_switch_name(obj, kwargs):
# Determine the expected host_switch_name from the associated
# TransportZone. This must be done via API regardless of requested
# execution_type.
if kwargs.get('host_switch_name') is None:
# XXX(jschmidt): read() should be able to default to pro... | [
"def name(self):\n return self._switch[\"name\"]",
"def get_switch(self,host):\n switch_list = self.__graph_dict[host]\n switch_num = switch_list[0]\n return switch_num",
"def test_retrieve_context_connection_switch_control_switch_name_name(self):\n response = self.client.open... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fit LDA from a scipy CSR matrix (X). | def fit_lda(X, vocab):
print('fitting lda...')
return LdaModel(matutils.Sparse2Corpus(X, documents_columns=False), num_topics=100, passes=1, iterations=500,
chunksize=1000, update_every=1, id2word=dict([(i, s) for i, s in enumerate(vocab)])) | [
"def fit(self, X):\n\n X_sparse = X.copy().astype(np.float64)\n self.X_sparse = X_sparse\n self._fit()\n return self",
"def fit(self, dtm, seed_topics=None, seed_confidence=None):\n # check if guided\n if (bool(seed_topics) is False) and (bool(seed_confidence) is False):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used at initialization to update all scan groups with their database values | def load_all_groups(self):
for _, group in self.scopes.items():
group.update() | [
"def updateGroups(self):\n\t\tself.groups = self.extractGroups()",
"def __loadGroups(self):\n\t\tcursor = self.__getCursor()\n\t\tcursor.execute('SELECT `id`,`name` FROM `groups`')\n\t\tresult = cursor.fetchall()\n\t\tfor i in result:\n\t\t\tself.groups[str(i['name'])] = int(i['id'])\n\t\tcursor.close()",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
nvidiasmi命令统计单次GPU资源利用情况, 幂等, 支持多卡、多次调用 主要统计 device_id, uuid, gpu_util, mem_total, mem_used, mem_free, driver, gpu_name, serial, display_mode, display_active, temp_gpu | def get_gpus():
if platform.system() == "Windows":
# If the platform is Windows and nvidia-smi
# could not be found from the environment path,
# try to find it from system drive with default installation path
nvidia_smi = spawn.find_executable("nvidia-smi")
if nvidia_smi is N... | [
"def _nvidia_smi():\n\n status = check_output(['nvidia-smi', \n '--query-gpu=utilization.gpu,utilization.memory', \n '--format=csv'])\n status = pd.read_csv(StringIO(status.decode('utf-8')))\n \n # Reformat column names.\n # (Need the col.strip() be... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate header for oauth2 | def oauth_headers(oauth):
import base64
encoded_credentials = base64.b64encode(('{0}:{1}'.format(oauth.client_id, oauth.client_secret)).encode('utf-8'))
headers = {
'Authorization': 'Basic {0}'.format(encoded_credentials.decode('utf-8')),
'Content-Type': 'application/x-www-form-urlen... | [
"def generate_oauth_headers(access_token: str) -> dict:\n return {'Authorization': 'Bearer ' + access_token}",
"def __build_auth_header(self, oauth):\n result = \"OAuth \"\n values = []\n for key, value in oauth.items():\n values.append(key + \"=\\\"\" + urllib.parse.quote_plus(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates an access token from the supplied oauth2.0 object | def create_access_token(oauth):
#create parameters for API authorization
redirect_uri = 'oob'
params = {'client_secret': oauth.client_secret,
'redirect_uri': redirect_uri,
'response_type': 'code'}
#store the access code
url = oauth.get_authorize_url(**params)
#open a web browser to get access token ... | [
"def create_oauth2_access_token(self):\n if not isinstance(self.session, DropboxSession):\n raise ValueError(\"This call requires a DropboxClient that is configured with an \"\n \"OAuth 1 access token.\")\n url, params, headers = self.request(\"/oauth2/token_from... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
API query to return all available players, ssorted by number of fantasy points\n | def available_players_query():
#start the calculation timer
calc_start = time.time()
#initialize everything
last_first_names = []
full_names = []
player_key = []
player_pos = []
start = 1
done = False
#this is where the data is actually created
#loop thru to get all of the players available
while(not done... | [
"def query_players():\n return api.ChainballCentralAPI.central_api_get(\n sub_api=\"api\", path=\"players\"\n )",
"def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the player stats for the given week\n Takes the player list as an argument so the function can be used for available players and rostered players\n Only works for offensive players (QB, WR, RB, TE) right now | def player_stats_query(week, player_list, session=s):
#initialize lists
pos_list = []
team_list = []
#cycle thru each player that is currently available
for player in avail_player_key:
#build the API url for the unique player key
url_player = base_query_url+'league/'+leagueI... | [
"def add_player_week(pp, season, week):\n players[pp.player_id]['seasons'][str(season)]['weeks'][str(week)] = {'team': '',\n 'opponent': '',\n 'at_home': '', # boolean\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build and display svg view for current tab. | def refresh_svg_canvas(self):
if self.ui.tabWidget.currentIndex() == 0:
self.ui.svg_canvas.build_schematic()
self.ui.svg_canvas.viewport().update()
elif self.ui.tabWidget.currentIndex() in (1,2):
self.ui.svg_canvas.build_pcb()
self.ui.svg_canvas.viewport().update()
else:
raise ... | [
"def show(self):\n # Create a flamegraph svg based on the data\n self._make()\n\n # Open firefox\n username = os.environ['SUDO_USER']\n subprocess.call(\n [\"su\", \"-\", \"-c\", \"firefox \" + self.svg_temp_file, username])",
"def _repr_svg_(self):\n if not I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds extra height to schematic body | def on_body_height_add(self, val):
val = max(0, int(val))
self.mdl.cmp.s_add_height = val
self.refresh_svg_canvas() | [
"def adjust_body_height(self):\n self.set_body_height(self.full_body_height - self.head_height)\n self.head_transformation.translation = (0, self.body_height/2 + self.head_height/2, 0)\n if self.has_tail:\n self.tail_transformation.translation = (0, -self.body_height/2, 0)",
"def b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds extra width to schematic body | def on_body_width_add(self, val):
val = max(0, int(val))
self.mdl.cmp.s_add_width = val
self.refresh_svg_canvas() | [
"def _extra_width(self) -> int:\n width = 0\n if self.box and self.show_edge:\n width += 2\n if self.box:\n width += len(self.columns) - 1\n return width",
"def body_resize(self):",
"def width(width):\n #assert 0 < width\n self.turtle.width... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run figure's event loop while listening to interactive events. The events listed in event_names are passed to handler. This function is used to implement `.Figure.waitforbuttonpress`, `.Figure.ginput`, and `.Axes.clabel`. | def blocking_input_loop(figure, event_names, timeout, handler):
if figure.canvas.manager:
figure.show() # Ensure that the figure is shown if we are managing it.
# Connect the events to the on_event function call.
cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]
try:
... | [
"def run(self):\n cid_up = self.fig.canvas.mpl_connect('button_press_event', self.in_box)\n self.fig.canvas.mpl_connect('button_press_event', self.on_click)\n\n plt.show()",
"def startEventHandling():\n if not _nativeThreading:\n if _graphicsManager._handlingEvents == 'No':\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the perimeter given the bottom length, top length, 1st side length, and 2nd side length. | def perimeter(self):
return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2 | [
"def perimeter(self):\n return sum(self.side_length(k) for k in range(self.number_sides))",
"def perimeter(self):\r\n option = input('Lengths or coordinate points? \\'l\\' for lengths, \\'c\\' for coordinates')\r\n if option == 'l':\r\n x = float(input('Input length1: '))\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
labels with round numbers | def init_round_numbers(self):
for round_num in range(1, 13):
lbl_round_num = tk.Label(self.master, text=str(round_num), font='courier 10 bold',
fg='green', pady=2)
lbl_round_num.grid(row=round_num+1, column=0)
row = 14
for trump ... | [
"def test_label(self, op, decimals, expected):\n assert op.label(decimals=decimals) == expected",
"def label(self, margin):\n if self.alphaL == None or self.alphaR == None:\n self.label = \"N\"\n elif abs(self.alphaL - self.alphaR) <= margin:\n self.label = \"S\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
command button that calculates scores | def init_button_calc(self):
btn_calc = tk.Button(self.master, text='calculate', font='courier 10 bold',
fg='purple', command=self.update_scores)
btn_calc.grid(row=20, column=1, columnspan=3, sticky=tk.W+tk.E, pady=5) | [
"def updateScore(self):\n self.guiCells[0][-1].configure(text=\"SCORE\\n{}\".format(self.score))\n self.update_idletasks()",
"def update_score():\n pass",
"def enter_game_scores():\n pass",
"def btnClicked(self):\n\n sender = self.sender()\n\n # Dice method\n if se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate and display scores for each valid bid x trick pair | def update_scores(self):
totals = [0, 0, 0, 0]
for player in range(0, 4):
for round_num in range(0, 17):
try:
bid = int(self.spin_bids[player][round_num].get())
tricks = int(self.spin_tricks[player][round_num].get())
... | [
"def update_scores(self):\r\n print (self.rubber.vulnerable[sbridge.WEST_EAST],\r\n self.rubber.vulnerable[sbridge.NORTH_SOUTH],\r\n self.rubber.above[sbridge.WEST_EAST],\r\n self.rubber.above[sbridge.NORTH_SOUTH],\r\n self.rubber.below[sbridge.WEST_EAST],\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect to address and return the socket object. Convenience function. Connect to address (a 2tuple ``(host, port)``) and return the socket object. Passing the optional timeout parameter will set the timeout on the socket instance before attempting to connect. If no timeout is supplied, the | def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
msg = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto... | [
"def create_connection(\n address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, socket_options=None\n):\n\n host, port = address\n\n if host.startswith(\"[\"): # pragma: nocover\n host = host.strip(\"[]\")\n err = None\n\n for res in socket.getaddrinfo(\n host,\n port,\n 0,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes refl and exper files for each experiment modeled during the ensemble refiner | def write_output_files(Xopt, LMP, Modelers, SIM, params):
opt_det = geometry_refiner.get_optimized_detector(Xopt, LMP, SIM)
# Store the hessian of negative log likelihood for error estimation
# must determine total number of refined Fhkls and then create a vector of 0's of that length
num_fhkl_param = ... | [
"def save_reconstructions(self):\n data = self.gather_samples(self.test_loader, self.sample_count).to(self.device)\n with torch.no_grad():\n recons = self.model.reconstruct(data)\n texts = [self.tensor_to_string(data[i]) + '\\n' + self.tensor_to_string(recons[i])\n fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a URL, try to return its associated region, bucket, and key names based on this object's endpoint info as well as all S3 endpoints given in the configuration. | def resolve_url_to_location(self, url):
parsed_url = six.moves.urllib.parse.urlparse(url)
if not parsed_url.scheme:
parsed_url = six.moves.urllib.parse.urlparse('http://' + url)
parsed_own_url = six.moves.urllib.parse.urlparse(self.endpoint)
bucket, key = self.__match_path(pa... | [
"def urlparse(self, url):\n _url = deepcopy(url)\n if url[0:5] == 'https':\n _url = self.https_to_s3(url)\n if _url[0:5] != 's3://':\n raise Exception('Invalid S3 url %s' % _url)\n\n url_obj = _url.replace('s3://', '').split('/')\n\n # remove empty items\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
construct the P_kd (kappaDelta) matrix such that kappa = P_kd Delta equivalent to equation 31 & 32 in Simon 2009, using Delta = delta/a as in Hu and Keeton 2003 | def construct_P_kd(N1,N2,z_kappa,z_Delta,
cosmo=None,**kwargs):
if cosmo==None:
cosmo = Cosmology(**kwargs)
Nj = len(z_kappa)
Nk = len(z_Delta)
if max(z_Delta) > max(z_kappa):
print "-------"
print "WARNING: construct_P_kd: singular matrix [ min(z_kappa) < min... | [
"def em_epsilon_cdp(epsilon, delta, k):\n if delta <= 0:\n return epsilon / k\n else:\n log_delta = np.log(1 / delta)\n return max(\n epsilon / k,\n np.sqrt((8 * log_delta + 8 * epsilon) / k) -\n np.sqrt(8 * log_delta / k))",
"def calc_kappa1(T_K):\n kappa1 = 10.**(0.198 - 444./... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show that basic numpy operations with Column behave sensibly | def test_numpy_ops(self):
arr = np.array([1, 2, 3])
c = Column('a', arr)
eq = c == arr
assert np.all(eq)
assert len(eq) == 3
assert type(eq) == Column
assert eq.dtype.str == '|b1'
eq = arr == c
assert np.all(eq)
lt = c - 1 < arr
a... | [
"def _modify_columns(self, cols, X, y=None):",
"def view_as_column(x):\n if x.ndim == 1:\n x = x[:, None]\n elif x.ndim == 2 and x.shape[0] == 1:\n x = x.T\n return x",
"def _create_metric_column(\n data: pd.DataFrame,\n column_a: str,\n column_b: str,\n numpy_method: str,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write a recording to file in MDA format. | def write_recording(
recording,
save_path,
params=dict(),
raw_fname="raw.mda",
params_fname="params.json",
geom_fname="geom.csv",
dtype=None,
**job_kwargs,
):
job_kwargs = fix_job_kwargs(job_kwargs)
assert recording.get_num_segments() =... | [
"def save_audio_file(self):\n\n # has not recorded audio\n if not self.is_audio_record:\n print(\"***you did not set the record flag!\")\n return\n\n import soundfile\n\n # save audio\n soundfile.write('{}out_audio.wav'.format(self.mic_params['plot_path']), self.collector.x_all, self.featur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to create embedddings for documents by encoding their image. | def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs,
) -> None:
if not docs:
return
batch_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
... | [
"def create_oembed(self):\n logger.debug('document {pk:%s, mimetype:%s} init oembed' % (self.pk, self.mimetype))\n if self.mimetype == 'application/pdf' and self.attachment and hasattr(self.attachment, 'path'):\n url = '%s%s' %(settings.MILLER_SETTINGS['host'], self.attachment.url)\n self.data['html... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the digits are classified correctly by a classifier. | def __test_digits(self, X, y, clf):
self.assertEqual(len(X), len(y))
correct = 0
for i in xrange(len(y)):
expected = y[i]
prediction = clf.classify([X[i]])[0]
if expected == prediction:
correct += 1
self.assertGreaterEqual(correct, sel... | [
"def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_cla... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load training data from digits.png | def load_digits(cls):
gray = cls.imgfile_to_grayscale(cls.DIGITS_FILE)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
... | [
"def load_digits():\n \n images, target = [], []\n for image_file in digit_image_filenames:\n image = cv2.imread(image_file)\n if image is None:\n raise RuntimeError(\"Failed to read the image file '{}'\".format(\n image_file))\n image = cv2.cvtColor(image, cv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Using the public method mount to test _get_drive_mount_point_name | def test_get_drive_mount_point_name_unique_id_None(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/gluster-object')
drive = 'test'
_init_mock_variables(tmpdir)
gfs._allow_mount_per_server = True
self.assertTrue(gfs.mount... | [
"def test_get_drive_mount_point_name_unique_id_exists(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n gfs._uniqu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Using the public method mount to test _get_drive_mount_point_name and the _unique_id is already defined | def test_get_drive_mount_point_name_unique_id_exists(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/gluster-object')
drive = 'test'
_init_mock_variables(tmpdir)
gfs._allow_mount_per_server = True
gfs._unique_id = 0
... | [
"def test_get_drive_mount_point_name_unique_id_None(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n self.assertT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
lees de keyboard definities uit het/de settings file(s) van het tool zelf en geef ze terug voor schrijven naar het csv bestand | def buildcsv(settnames, page, showinfo=True):
shortcuts = collections.OrderedDict()
fdesc = ("File containing keymappings", "File containing command descriptions")
## pdb.set_trace()
for ix, name in enumerate(settnames):
try:
initial = page.settings[name]
except KeyError:
... | [
"def writeSettings(winnowargs): # pragma: no cover\n a = winnowargs['analysis']\n if winnowargs['beta'] is not None:\n a += 'WithBeta'\n else:\n a += 'WithoutBeta'\n with open(winnowargs['filename'] + \"_parameters.txt\", 'wb') as openFile:\n openFileWriter = csv.writer(openFile, d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the graph complement of G. | def complement(G):
R = G.__class__()
R.add_nodes_from(G)
R.add_edges_from(((n, n2)
for n, nbrs in G.adjacency()
for n2 in G if n2 not in nbrs
if n != n2))
return R | [
"def complement(G):\n\n nset = set(G.nodes())\n n_nodes = G.order()\n n_edges = n_nodes * (n_nodes - 1) - G.size() + 1\n \n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n deg = make_deg(n_nodes, cmp_edges)\n cmp_edges = ((u, v) for u in G.nodes()\n\t\t ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the reverse directed graph of G. | def reverse(G, copy=True):
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy) | [
"def reverse(self):\n H = DiGraph(multiedges=self.allows_multiple_edges(), loops=self.allows_loops())\n H.add_vertices(self)\n H.add_edges( [ (v,u,d) for (u,v,d) in self.edge_iterator() ] )\n name = self.name()\n if name is None:\n name = ''\n H.name(\"Reverse of... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads an INIfile containing domain type definitions and fills them into a TypeDefsobject. | def readDomainTypes(self, domainTypeFilePath):
result = TypeDefs()
inifile = IniFile(domainTypeFilePath)
for section in inifile.getSections():
if section.endswith("(n)"):
td = TypeDef(section[:-3], withLength = True)
else:
td = TypeDef(sect... | [
"def parse_domain(self, domainfile):\n\n with open(domainfile) as dfile:\n dfile_array = self._get_file_as_array(dfile)\n #Deal with front/end define, problem, :domain\n if dfile_array[0:4] != ['(', 'define', '(', 'domain']:\n print('PARSING ERROR: Expected (define (domain ... at start of domain ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |