query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Single click in file listbox. Move file to "filename" combobox | def singleselectfile(self):
cs=self.component('filenamebox').curselection()
if cs!=():
value=self.component('filenamebox').get(cs)
self.setfilename(value) | [
"def double_click(self, event):\n filepath = self.listbox.get(self.listbox.curselection())\n loc = Path(filepath)\n folder = os.path.dirname(loc)\n subprocess.Popen(f'explorer /select, \"{loc}\"')",
"def insert_files():\n filenames = filedialog.askopenfilenames(multiple = True)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take selected directory from the dirnamebox into the dirname | def selectdir(self):
cs=self.component('dirnamebox').curselection()
if cs!=():
value=self.component('dirnamebox').get(cs)
dir=self['directory']
if not dir:
dir=os.getcwd()
if value:
if value=='..':
dir=os.path.split(dir)[0]
else:
dir=os.path.join(dir,value)
self.configure(di... | [
"def select_dir(self, entry: Entry):\n entry.delete(0, END)\n filename = filedialog.askdirectory()\n entry.insert(0, filename)",
"def choose_dir(self):\n self.output_dir.set(filedialog.askdirectory())",
"def ask_directory():\n path = askdirectory()\n folder_selected = os.path.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validation function. Should return 1 if the filename is valid, 0 if invalid. May pop up dialogs to tell user why. Especially | def validate(self,filename):
return 1 | [
"def validate(cls, filename):\n \n filename = (\n re.sub(cls._INVALID_CHARS_PATTERN, \"\", filename)\n .strip(\" \")\n .rstrip(\".\")\n )\n \n root, ext = os.path.splitext(filename)\n # For reserved names, the comparison must be case-insensitive\n # (because Windows has case-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the callee (`__file__`) directory name | def get_file_dirname() -> Path:
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
assert module
assert module.__file__
return Path(module.__file__).parent.absolute() | [
"def calledModuleName() -> str:\n return Path(pywikibot.argvu[0]).stem",
"def get_current_dir():\n return os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))",
"def _get_calling_script():\n stack = traceback.extract_stack()\n\n script_path = None\n for trace in stack:\n if trace[2] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable termination and specify the termination character. Termination is currently only implemented for receive. When the the terminator is received, | def enableTermination(self, terminator: bytes = ...) -> None:
... | [
"def found_terminator(self):\n line = ''.join(self.data)\n self.data = [] # clears the buffer\n try:\n self.room.handle(self, line)\n except EndSession:\n self.handle_close()",
"def handle_ec(self, byte):\n self.log.debug('IAC EC: Erase Character')",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specify the size of the input buffer. Specify the amount of data that can be stored before data from the device is returned to Read. If you want data that is received to be returned immediately, set this to 1. It the buffer is not filled before the read timeout expires, all data that has been received so far will be re... | def setReadBufferSize(self, size: int) -> None:
... | [
"def receive_bytes(self, size):\n time_start = datetime.now()\n total_data = \"\"\n last_read = \"\"\n while True:\n last_read = self.request.recv(size)\n total_data += last_read\n size -= len(last_read)\n if size <= 0:\n break\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specify the size of the output buffer. Specify the amount of data that can be stored before being transmitted to the device. | def setWriteBufferSize(self, size: int) -> None:
... | [
"def set_buffer_size(self, buffer_size):\n self.buffer_size = buffer_size",
"def getBufferSize(self) -> \"size_t\":\n return _coin.SoOutput_getBufferSize(self)",
"def setReadBufferSize(self, size: int) -> None:\n ...",
"def set_max_output_buffer(self, *args):\n return _wmbus_swig.w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds the entities of pair_list and their classes to current_result. The format of the pair_list is the same as the result of the script download_class_ids | def add_entities_from_list(pair_list, class_name, current_result):
for entity, _ in pair_list[1:]: # Ignore the first entry
entity = entity.replace(URI_YAGO, '')
current_result[entity].add(class_name)
if class_name in PERSON_MAPPINGS:
current_result[entity].add(PERSON_MAPPINGS... | [
"def pair_list(self):\n global RUNNING\n project_dir = self.prj_dir\n foto_log = project_dir + '/foto_log'\n shp_dir = self.dir_generate(project_dir + '/shp')\n RUNNING = True\n with open(shp_dir + '/matcher.txt', 'w') as txt:\n txt.write('weg_num,foto_id,Color,D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add entities from all the files from the directory to current_result. | def add_entities_from_directory(input_dirname, current_result):
# Read files
filenames = [f for f in listdir(input_dirname)
if isfile(join(input_dirname, f))]
for filename in filenames:
if not ('.' in filename and filename.split('.')[1] == 'pickle'):
# Not a pickle file
... | [
"def __mergeResultFiles():\n\t# Get path of txt resutls\n\tresults_path = NEST.GetKernelStatus()['data_path']\n\t# Create structure - the dict of a lists. Main file (string) : child files (list)\n\tfiles_map = defaultdict(list)\n\t# Build tree of rough (threaded) files\n\tfiles_list = [file for file in os.listdir(r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper fxn to initialize/load the pretrained cifar resnet | def load_pretrained_cifar_resnet(flavor=32,
return_normalizer=False,
manual_gpu=None):
# Resolve load path
valid_flavor_numbers = [110, 1202, 20, 32, 44, 56]
assert flavor in valid_flavor_numbers
weight_path = os.path.join(RESNET_WEIGHT_... | [
"def resnet20(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3,3,3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet20']))\n return model",
"def post_init(self):\n import onnxruntime\n self.model_name = self.raw_model_path.split('/... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse plate file into dictionary | def readPlate(mypath):
plate = {}
with open(mypath, "r") as fh:
for l in fh:
l = l.rstrip("\n")
# Order of elements are:
# 1) well
# 2) mainsource
# 3) compound
# 4) concentration
well, ms, cpd, conc = l.split("\... | [
"def _parsefile(self, rngpath: str) -> dict:\n\n # TODO check it's a rng file (avoid utf-8 encoding errors)\n try:\n with open(rngpath, 'r') as file:\n r = [v.split() for v in file]\n except (IOError, FileNotFoundError):\n raise ReadError('Error opening rng ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test reading the crossref config file | def test_elifecrossref_config(self):
crossref_config = crossref.elifecrossref_config(settings_mock)
self.assertIsNotNone(crossref_config) | [
"def test_config_get(self):\n pass",
"def test_read_config(self):\n config = _read_config({'store_config': True,\n 'fp': os.getcwd()})\n self.assertEqual(len(config), 5)",
"def test_config_advanced_file(self):\n webhook_url = \"http://discord.webhook.url... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test finding a pub date in the article dates | def test_article_first_pub_date(self):
crossref_config = crossref.elifecrossref_config(settings_mock)
# build an article
articles = crossref.parse_article_xml([self.good_xml_file], self.directory.path)
article = articles[0]
# get the pub date
pub_date_object = crossref.ar... | [
"def test_parse_future_dated(self):\n for article in self.site.articles:\n self.assert_(article.headers['date'] <= datetime.today())",
"def test_2014_primary_date(self):\n self.assertEqual(\n date(2014, 6, 3),\n calaccess_processed.get_expected_election_date(2014, 'P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test approving when there is no pub date | def test_approve_to_generate_no_date(self):
crossref_config = crossref.elifecrossref_config(settings_mock)
# build an article
articles = crossref.parse_article_xml([self.good_xml_file], self.directory.path)
article = articles[0]
# reset the dates
article.dates = {}
... | [
"def test_edit_missingyear(self):\n a = baker.make_recipe(\"makeReports.announcement\")\n r = self.client.post(reverse('makeReports:edit-announ',kwargs={'pk':a.pk}),{\n 'text':'There are some technical difficulties. Please be paitent.',\n 'expiration_month':3,\n 'expir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test approving a list of files based on the pub date | def test_approve_to_generate_list(self):
crossref_config = crossref.elifecrossref_config(settings_mock)
# build an article
article = crossref.parse_article_xml([self.good_xml_file], self.directory.path)[0]
# make a fake article with a future pub date
future_article = crossref.par... | [
"def prev_pubs_file_check(prev_pubs):\n \n tracker_validate(instance=prev_pubs, schema=tracker_schema.publications_schema, format_checker=jsonschema.FormatChecker())",
"def is_published(file_path):\n\n return False",
"def revisionfiles(unrestricted=False):",
"def test_monthly_archive(build, output_di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to make a new, random ball. | def make_ball():
ball = Ball()
# Starting position of the ball.
# Take into account the ball size so we don't spawn on the edge.
ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)
ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)
# Speed and direction of rectangle
... | [
"def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n \n # Speed and direction of r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find movies in Cloudant database. | def find_movies(search_string: str) -> Dict[int, str]:
movie_db = cloudant_client[CL_MOVIEDB]
index_name = 'movie-search-index'
end_point = '{0}/{1}/_design/{2}/_search/{2}'.format ( CL_URL, CL_MOVIEDB, index_name )
data = {
"q": "name:" + search_string,
"limit"... | [
"def movie():\n return app.session.query(Movie)",
"def list_movies(self):\n return self.__repo.get_all()",
"def search():\n app.logger.info('Searching for %s' % request.args.get('q'))\n movie = request.args.get('q')\n m = i.search_movie(movie)\n resp = make_response(json.dumps(\n [{\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save user's rated movie | def save_rating(movie_id: int, user_id: str, rating: Optional[float]):
db = cloudant_client[CL_RATINGDB]
current_milli_time = lambda: int(round(time.time() * 1000))
id = 'user_{0}/movie_{1}'.format(user_id, movie_id)
with Document(db, id) as document:
if rating:
... | [
"def movie_rated(movie_id):\n\n\n added_rating = request.form.get(\"rate_score\")\n user_id = User.query.filter_by(email=session[\"login\"]).first().user_id\n\n\n all_movies_rated_by_user = db.session.query(Rating.movie_id, Rating.score).filter_by(user_id=user_id).all()\n \n for movie_tuple in all_movie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the timestamp that the latest recommendations were generated | def get_latest_recommendation_timestamp() -> datetime:
db = cloudant_client[CL_RECOMMENDDB]
# get recommendation_metadata document with last run details
try:
doc = db['recommendation_metadata']
doc.fetch()
except KeyError:
print('recommend... | [
"def _get_timestamp(self):\n return datetime.datetime.now()",
"def get_last_timestamp(self):\n if self.halo_module == \"scans\":\n url = \"/v1/scans?sort_by=created_at.desc&per_page=1\"\n elif self.halo_module == \"events\":\n url = \"/v1/events?sort_by=created_at.desc&p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract slot value from a tracker. | def _get_slot(tracker: Tracker, slot: str) -> str:
slot = tracker.get_slot(slot)
if slot is None:
raise SlotNotFound(slot)
return slot | [
"def value_at(self, frame):\n return self.value[frame // self.resolution]",
"def _getSlotValue(name, slotData):\n for slotFrame in slotData[::-1]:\n if slotFrame is not None and name in slotFrame:\n return slotFrame[name]\n else:\n raise UnfilledSlot(name)",
"def decode_slo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Visit a function call. We expect every logging statement and string format to be a function call. | def visit_Call(self, node):
# CASE 1: We're in a logging statement
if self.within_logging_statement():
if self.within_logging_argument() and self.is_format_call(node):
self.violations.append((node, STRING_FORMAT_VIOLATION))
super(LoggingVisitor, self).generic_... | [
"def log_call(func):\n @wraps(func)\n def logged(*args, **kawrgs):\n header = \"-\" * len(func.__name__)\n print(green(\"\\n\".join([header, func.__name__, header]), bold=True))\n return func(*args, **kawrgs)\n return logged",
"def log_function_call_and_only_first_argument(func):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process binary operations while processing the first logging argument. | def visit_BinOp(self, node):
if self.within_logging_statement() and self.within_logging_argument():
# handle percent format
if isinstance(node.op, Mod):
self.violations.append((node, PERCENT_FORMAT_VIOLATION))
# handle string concat
if isinstance(n... | [
"def process_log(self):\n self.logfile.seek(self.pos)\n line = self.logfile.readline()\n while line:\n self.parse_next_line(line)\n line = self.logfile.readline()\n self.pos = self.logfile.tell()",
"def log(self,\n byteName: str,\n arguments,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Does a function call use format? | def is_format_call(self, node):
try:
return node.func.attr == "format"
except AttributeError:
return False | [
"def format(stringArg=\"string\"):\n pass",
"def fn(fnid, fformat, *args):",
"def format_call(__fn, *args, **kw_args):\n try:\n name = __fn.__name__\n except AttributeError:\n name = str(__fn)\n args = [ repr(a) for a in args ]\n args.extend( n + \"=\" + repr(v) for n, v in kw_args.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set mass of LJ particle | def setmass(self,mass):
if isinstance(mass, float):
self.mass = mass
else:
print "1st arg should be float"
raise TypeError | [
"def set_mass():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN,\n description = \"Index of the particle for which the state is to be updated. This index must have be... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ptype1 of LJ particle | def get_ptype1(self):
return self.ptype1 | [
"def Ty(self):\n return self.P[1, 3]",
"def particle_type(self):\n return copy.deepcopy(self._particle_type)",
"def identify_particle(self,particle):\n\n #print particle.line[:-1],\n for name in ['begin','jet','miss','electron','photon']: #to fastenize the test in most of the... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set Harmonic parameters E = kb( r r0 )^2 | def setharmonic(self, r0, kb):
if isinstance(r0, float):
self.r0 = r0
else:
print "1st arg should be float"
raise TypeError
if isinstance(kb, float):
self.kb = kb
else:
print "2nd arg should be float"
rais... | [
"def __dowson_hamrock_parameters(r_eff, param_g, param_u, param_w):\n param_ehd = r_eff * param_g ** 0.53 * param_u ** 0.67 * param_w ** -0.067\n return param_ehd",
"def E_K(E_inv_cm):\n E_hz = E_inv_cm*c # (1/cm)*(cm/s)\n E_ergs = h*E_hz # ergs\n return E_ergs/k # K",
"def setharmonic(self, theta0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set Harmonic angle parameters E = kb( theta theta_0 )^2 | def setharmonic(self, theta0, kb):
if isinstance(theta0, float):
self.theta0 = theta0
else:
print "1st arg should be float"
raise TypeError
if isinstance(kb, float):
self.kb = kb
else:
print "2nd arg should be float"
... | [
"def alpha(phi,mu,ct,k,chi,dx,dt):\n return ((158*phi*mu*ct)/k)*np.exp(2*chi)*(dx**2)/dt",
"def oh2004(mv, ks, theta):\n p = 1 - (2.*theta/np.pi)**(0.35*mv**(-0.65)) * np.exp(-0.4 * ks**1.4)\n q = 0.095 * (0.13 + np.sin(1.5*theta))**1.4 * (1-np.exp(-1.3 * ks**0.9))\n a = 0.11 * mv**0.7 * np.cos(theta)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append ljtype object to this container. | def put(self, ljtyp ):
if isinstance(ljtyp, ljtype):
self.maxgid += 1
self.ljtypes[self.maxgid] = copy.deepcopy(ljtyp)
else:
print "Attempting to add non-ljtype type to container"
raise TypeError | [
"def addType(self, type):\n\t\tself.types.append(type)",
"def append(self, type: 'SoType') -> \"void\":\n return _coin.SoTypeList_append(self, type)",
"def addChildType(self, typeToAdd: 'SoType') -> \"void\":\n return _coin.SoNodeKitListPart_addChildType(self, typeToAdd)",
"def pushType(type):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append bondtype object to this container. | def put(self, btyp ):
if isinstance(btyp, bondtype):
self.maxgid += 1
self.bondtypes[self.maxgid] = copy.deepcopy(btyp)
else:
print "Attempting to add non-bondtypes type to container"
raise TypeError | [
"def addType(self, type):\n\t\tself.types.append(type)",
"def append(self, type: 'SoType') -> \"void\":\n return _coin.SoTypeList_append(self, type)",
"def append(self, bs: BitsType) -> None:\n self._append(bs)\n self._pos = len(self)",
"def put(self, ljtyp ):\n if isinstance(ljtyp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append angletype object to this container. | def put(self, atyp ):
if isinstance(atyp, angletype):
self.maxgid += 1
self.angletypes[self.maxgid] = copy.deepcopy(atyp)
else:
print "Attempting to add non-angletypes type to container"
raise TypeError | [
"def addType(self, type):\n\t\tself.types.append(type)",
"def __add__(self, angle):\n return self.rotate(angle)",
"def add_angle(self, atom_1: int, atom_2: int, atom_3: int) -> None:\n angle = Angle(atom_1, atom_2, atom_3)\n if angle not in self.internal_coordinates:\n self.inter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append dihtype object to this container. | def put(self, dtyp ):
if isinstance(dtyp, dihtype):
self.maxgid += 1
self.dihtypes[self.maxgid] = copy.deepcopy(dtyp)
else:
print "Attempting to add non-dihtype type to container"
raise TypeError | [
"def append(self, hdu):\n if isinstance(hdu, _AllHDU):\n super(HDUList, self).append(hdu)\n hdu._new = 1\n self._resize = 1\n else:\n raise \"HDUList can only append an HDU\"\n\n # make sure the EXTEND keyword is in primary HDU if there is extension\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append Imptyp object to this container. | def put(self, Imptyp ):
if isinstance(Imptyp, imptype):
self.maxgid += 1
self.imptypes[self.maxgid] = copy.deepcopy(Imptyp)
else:
print "Attempting to add non-dihtype type to container"
raise TypeError | [
"def add_empty_value(self):\n if not self.field.waardeObject:\n raise MethodNotApplicableError(\n \"In order to use this method this object must be one of these types: UnionType, ComplexType, KwantWrd, Dte\")",
"def empty_p(self):\n return _raw_util.raw_msg_queue_sptr_empty... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the counts of different meta data based on the text | def count_meta_data(self, text):
counter = defaultdict(int)
# tokenize by sentences
sentence_list = sent_tokenize(text)
for sentence in sentence_list:
# tokenize each sentence into words and tag part of speech
pos_tags = nltk.pos_tag(word_tokenize(sentence))
... | [
"def count_meta_data(self, text):\n\n counter = defaultdict(int)\n\n # tokenize by sentences\n sentence_list = sent_tokenize(text)\n\n for sentence in sentence_list:\n # tokenize each sentence into words and tag part of speech\n pos_tags = nltk.pos_tag(word_tokenize... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create_output_data_file() create output data file using timestamp and name of data source file | def create_output_data_file():
logging.info(cs_ref, 'create Output Data File')
current_date = '%Y%m%d-%H%M%S'
head, tail = osp.split(src_file)
first_data = "\nNX-COMPUTATIONS : OUTPUT DATA FILE for " + src_file
df = 'data/%s_%s' % (datetime.now().strftime(current_date), tail)
open(df, 'w').write... | [
"def create_datafile(datasource, ticlist, dest_basename):\n def get_gvkeys_from_ticlist(ticlist): #TODO: use actual gvkeys\n \"\"\"\n Returns 'gvkeys' from ticlist.dat as a sorted list.\n\n NOTE: Right now, 'gvkeys' are not the actual gvkeys that you'd see in\n Compustat. Instead, th... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calc_degree_sequence(g) Calculate & plot the degree sequence of the graph g & writes data to the created data output file | def calc_degree_sequence(g, dest_file):
func_intro = "\n\nDegree Sequence ... "
logging.info(cs_ref, func_intro)
print func_intro
with open(dest_file, "a") as dat_file:
dat_file.write(func_intro)
degree_sequence = sorted(nx.degree(g).values(), reverse=True)
with open(dest_file, "a") as ... | [
"def havel_hakimi_custom_graph(deg_sequence):\n\n if not (nx.is_valid_degree_sequence(deg_sequence) or nx.is_graphical(deg_sequence) or nx.is_valid_degree_sequence_erdos_gallai(deg_sequence)):\n raise nx.NetworkXError('Invalid degree sequence')\n\n p = len(deg_sequence)\n G=nx.empty_graph(p)\n nu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calc_assortativity_coefficient(g) Calculate & plot the assortativity coefficient of the graph g using inbuilt NetworkX functions, Writes data to the created data output file | def calc_assortativity_coefficient(g, dest_file):
func_intro = "\n\nAssortativity Co-Efficient ..."
logging.info(cs_ref, func_intro)
print func_intro
with open(dest_file, "a") as dat_file:
dat_file.write(func_intro)
dac = nx.degree_assortativity_coefficient(g) # calculate assortativity coe... | [
"def degree_assortativity_coefficient(graph, x=\"out\", y=\"in\", weight=None):\n weighted = False if weight is None else True\n ctx = AppAssets(algo=\"degree_assortativity_coefficient\", context=\"tensor\")(\n graph,\n source_degree_type=x,\n target_degree_type=y,\n weighted=weigh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calc_clustering_coefficient(g) Calculate & plot clustering coefficient of the graph g and writes data to the created data output file | def calc_clustering_coefficient(g, dest_file):
func_intro = "\n\nClustering Co-Efficient ..."
logging.info(cs_ref, func_intro)
print func_intro
with open(dest_file, "a") as dat_file:
dat_file.write(func_intro)
cce = nx.clustering(g) # calculate clustering co-efficient
with open(dest_fi... | [
"def analyze_clustering(network):\r\n \r\n print(\"Clustering coeeficient: {}\".format(get_cluster_coefficient(network))) \r\n \r\n # took too long\r\n #laplacian = nx.laplacian_matrix(network)\r\n #plt.figure(8)\r\n #plt.spy(laplacian)\r\n \r\n #laplacian = sparse.csr_matrix(laplacian)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tub loads from existing tub path. | def test_tub_load(tub, tub_path):
t = Tub(tub_path)
assert t is not None | [
"def _load_turicreate_model(self, path):\n return tc.load_model(path)",
"def load_study_from_run(run: neptune.Run):\n if run['study/storage_type'].fetch() == 'InMemoryStorage':\n return _get_pickle(path='study/study', run=run)\n else:\n return optuna.load_study(study_name=run['study/stu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tub updates its dataframe | def test_tub_update_df(tub):
tub.update_df()
assert len(tub.df) == 128 | [
"def updateTable(self):\r\n self.dataTable = Table(self.frame, dataframe = self.data)\r\n self.dataTable.show()",
"def update(self, df: pd.DataFrame):\n for stat in self._statistics.values():\n stat.update(df)",
"def run(self, df):\n raise NotImplementedError",
"def upda... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure the Tub will exclude records in the exclude set | def test_tub_exclude(tub):
ri = lambda fnm: int(os.path.basename(fnm).split('_')[1].split('.')[0])
before = tub.gather_records()
# Make sure we gathered records correctly
assert len(before) == tub.get_num_records()
tub.exclude.add(1)
after = tub.gather_records()
# Make sure we excluded the ... | [
"def test_tub_exclude(tub):\n ri = lambda fnm : int( os.path.basename(fnm).split('_')[1].split('.')[0] )\n\n before = tub.gather_records()\n assert len(before) == tub.get_num_records() # Make sure we gathered records correctly\n tub.exclude.add(1)\n after = tub.gather_records()\n assert len(after)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tub with augmented images which only differ slightly. | def test_tub_augment(tub):
import numpy as np
index = tub.get_index(shuffled=False)
img_arr_before = [tub.get_record(ix)['cam/image_array'] for ix in index]
tub.augment_images()
img_arr_after = [tub.get_record(ix)['cam/image_array'] for ix in index]
total_change = 0
for img_arr_b, img_arr_a ... | [
"def augmentator(images, masks):\n spatial_aug = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Flipud(0.5), # vertical flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The manage.py/donkey2.py drive command creates a tub using TubHandler, so test that way. | def test_tub_like_driver(self):
os.makedirs(self.tempfolder)
meta = ["location:Here2", "task:sometask2"]
th = TubHandler(self.tempfolder)
tub = th.new_tub_writer(inputs=self.inputs, types=self.types, user_meta=meta)
t2 = Tub(tub.path)
assert tub.meta == t2.meta
as... | [
"def test_post_folders(self):\n pass",
"def test_torsiondrive_tools():\n import torsiondrive.tools",
"def test_command_add(self):\n pass",
"def test_command_delete(self):\n pass",
"def test_post_folders_id_copy(self):\n pass",
"def setup(command):\n return NotImplemen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return gold labels as a list. | def gold(self):
return self.labels | [
"def get_labels(self):\n labels = []\n for g, graph in enumerate(self.graphs):\n for s in xrange(len(graph.sets)):\n label = \"G%dS%d\" % (g, s)\n labels.append(label)\n return labels",
"def get_labels(self):\n if not self.labels:\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalize and set colormap | def norm_cmap(values, cmap, normalize, cm, mn, mx):
if (mn is None) and (mx is None):
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap, norm | [
"def _norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None):\n\n mn = min(values) if vmin is None else vmin\n mx = max(values) if vmax is None else vmax\n norm = normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n return n_cmap",
"def _continuous_colormap(hue, cm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot a single LineString geometry | def plot_linestring(ax, geom, color='black', linewidth=1, **kwargs):
a = np.array(geom)
ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs) | [
"def plot_linestrings(linestrings):\n for linestring in linestrings:\n for line in zip(linestring.coords, linestring.coords[1:]):\n plt.plot(*zip(*line)) # I know...",
"def line(coords: CoordList, crs: MaybeCRS) -> Geometry:\n return Geometry({'type': 'LineString', 'coordinate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute a class saliency map using the model for images X and labels y. | def classifier_saliency_maps(X, y, model):
# Make sure the model is in "test" mode
model.eval()
# Make input tensor require gradient
X.requires_grad_()
scores = model(X)
correct_class_scores = scores.gather(1, y.view(-1,1)).squeeze()
dummy_loss = torch.sum(correct_class_scores)
dummy_l... | [
"def compute_saliency_maps(X, y, model):\n # Make input tensor require gradient\n \n ##############################################################################\n # TODO: Implement this function. Perform a forward and backward pass through #\n # the model to compute the gradient of the correct class score wi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transforms a direction angle to the correct bounds [0, 2pi) | def transform_direction_in_bounds(self, direction):
if direction < 0.:
direction = (direction + (1+int(-direction))*2*np.pi) % (2*np.pi)
elif direction >= 2*np.pi:
direction = direction % (2*np.pi)
return direction | [
"def wrap_angle2(angle):\n return np.mod(angle, 2*np.pi)",
"def convert_direction_to_angle(direction):\n if direction == 'up':\n return 3.1\n elif direction == 'down':\n return 0\n elif direction == 'left':\n return -1.57\n elif direction == 'right':\n return 1.57\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for equality of two particles. Ignores a particle's name and identification number, e.g. a particle is equal to another particle, if its physical parameters are the same. | def __eq__(self, other):
for key in self.__dict__.keys():
# ignore particle_id and name as these are not physical quantities
if key not in ['particle_id', 'name']:
if self.__dict__[key] != other.__dict__[key]:
return False
return True | [
"def test_particle_antiparticle_pairs(particle, antiparticle):\n\n assert not _Particles[particle]['antimatter'], \\\n f\"{particle} is incorrectly marked as antimatter.\"\n\n assert _Particles[antiparticle]['antimatter'], \\\n f\"{antiparticle} is incorrectly marked as matter.\"\n\n identica... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Menu slack and email send when Boolean send is True | def post_menu(instance, **kwargs):
if instance.send == True:
option = Option.objects.filter(menu__id=instance.id)
send_employee_mail(option)
send_slack(option) | [
"def sendMailToProprio():",
"def handle(self, **options):\n slack_app = SlackApp()\n\n slack_app.send_message(message='sente: Test message')",
"def send_feedback_email_task():\n print(\"Sent feedback email\")\n return",
"def post_hi():\n\tslack_utility.send_message(channel='#general', msg=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find VRF name containing a prefix | def search_for_prefix(self, prefix):
query = self.build_xml('show ip route vrf all')
ncdata = str(self.manager.get(('subtree', query)))
root = ET.fromstring(ncdata)
neighbors = {}
mod = {'mod': 'http://www.cisco.com/nxos:1.0:urib'}
# it is entirely possible that the pref... | [
"def verify_volume_name_prefix(\n hostname, prefix, namespace, pvc_name, heketi_server_url, **kwargs):\n heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(\n heketi_server_url, **kwargs)\n\n heketi_vol_name_prefix = \"%s_%s_%s_\" % (prefix, namespace, pvc_name)\n cmd = \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets client options, suppresses Asana deprecation warnings | def _set_client_options(self):
self._client.LOG_ASANA_CHANGE_WARNINGS = False
self._client.options['pretty'] = True
self._client.options['item_limit'] = 50
self._client.options['fields'] = ["this.name", "this.assignee.name",
"this.notes", "this.d... | [
"def setConfiguration(options):",
"def set_opts(self, **kwargs):\n raise NotImplementedError('Function not implemented in base class.')",
"def experimental_options(self):\n ...",
"def EgtpClientOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.egtpclie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For testing purposes only. Prints all tasks marked 'incomplete' in asana | def _find_all_incomplete(self):
for task in self._client.tasks.find_all(workspace=self._workspace_gid,
assignee=self._assignee['gid']):
if not task['completed']:
print(task) | [
"def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))",
"def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)",
"def _print_tasks(env, tasks, mark_active=False):\n\n if env.t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new node after an existed node based on the given key. | def add_after_node(self, key, data):
cur = self.head
while cur:
if cur.data == key:
if cur.next is None:
self.append(data)
return
new_node = Node(data)
new_node.next = cur.next
cur.next.pr... | [
"def insert_after_node(self, key, data):\n node = ListNode(data)\n p = self.head\n while p is not None:\n if p.data == key:\n node.next = p.next\n p.next = node\n p = p.next",
"def add_node(self, node, key):\n self.get_state()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new node before an existed node based on the given key. | def add_before_node(self, key, data):
cur = self.head
while cur:
if cur.data == key:
if cur.next is None:
self.append(data)
return
new_node = Node(data)
cur.prev.next = new_node
new_node.p... | [
"def insert_before(self, key, data):\n node = ListNode(data)\n p = self.head\n while p.next is not None:\n if p.next.data == key:\n node.next = p.next\n p.next = node\n p = p.next",
"def findAndInsert(self,node, key):\n leafNode = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. | def region_of_interest(self, img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or... | [
"def crop_image(image, vertices):\n # Create a mask with the same dimensions as the input image.\n mask = np.zeros(image.shape, dtype=np.uint8)\n #get image shape\n h, w, c = image.shape\n # Fill the mask with white polygon.\n vertices = (vertices * np.array([[h, w]])).astype(np.int32)\n cv2.fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simplified player gets it's id from Player object. It also stores a copy of this Player's Statistics, and an array of it's cards ID's. | def __init__(self, player: Player):
self.player_id = player.id
self.stats = copy(player.statistics)
# Create an array of ID's from player's deck
self.card_ids = list(x.card_model.id for x in player.deck.cards_queue) | [
"def get_id(self):\n return self.__player_id",
"def player():\n\n name_id = 1\n return card_game.Player(name_id)",
"def get_player_stats(self):\n\n\n return pd.concat([player.get_stats() for player in self.players],axis=0,sort=True).reset_index(drop=True)",
"def players(self):\r\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an ordered list of card ID's. "in order" means that the array represents an actual order of the player's cards within its deck. | def get_card_ids_in_order(self) -> list[int]:
return self.card_ids | [
"def get_card_id(self):\n card_id = self._ll.exchange_bytes(RFIDReader.COMMAND_GET_CARD_ID)[::-1]\n return [x for x in card_id]",
"def known_starting_deck_list(self) -> List[str]:\n\t\tret = list(self._known_starting_card_ids)\n\n\t\toriginal_card_ids = [\n\t\t\tget_original_card_id(entity.initial_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a Statistics object tied to the Player | def get_stats(self) -> Statistics:
return self.stats | [
"def get_player_stats(self):\n\n\n return pd.concat([player.get_stats() for player in self.players],axis=0,sort=True).reset_index(drop=True)",
"def get_stats_profile(self):\n # type: () -> StatsProfile\n return self._stats_profile",
"def _player_stats(self):\n print \"Player stats:\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculates and returns the shannon entropy of the col argument. there's probably a way to do a block of columns, but AlignIO seems to insist on including the record id in the multicolumn slice | def shannon_entropy(col, verbose=False):
alpha=list(set(col))
shannon=list()
for a in alpha:
freq = col.count(a)/len(col)
# math.log() is base e by default
logf = math.log(freq)
shannon.append(freq*logf)
if (verbose):
print("\tnumber of {0} is {1}\n\t\tfr... | [
"def _entropy(self, col):\n vals, counts = np.unique(col, return_counts = True)\n\n entropy = np.sum([(-counts[i]/np.sum(counts)) * np.log2(counts[i]/np.sum(counts))\n for i in range(len(vals))])\n return entropy",
"def calc_entropy(data):\r\n\r\n col = data[:,-1]\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rewrite of sliding window that reads the file of precalculated | def sliding_window(window, file_path, gene, in_folder, verbose=False):
runtime=datetime.now().strftime('%m_%d_%H')
#path_to_file="../processed_data/weighted_ent_{0}/{1}_weight_ent.csv".format(date, gene)
if verbose==True:
print("working on window size {0} for {1} ...".format(window, gene))
... | [
"def test_sliding_time_window(self):\n dst = \"ngc5921.split.sliding_time_window.ms\"\n ref = 'ngc5921_statwt_ref_test_sliding_time_window.ms'\n timebin = \"300s\"\n \"\"\"\n row_to_rows = []\n row_to_rows.append([0, 6])\n row_to_rows.append([0, 7])\n row_to_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
requires an AlignIO object read from a clustal file to be passed at the command line calculates and returns the mean Shannon entropy from the full length of a multisequence alignment. Also writes to a file "__full_entropy.csv" where is the alignment file name and is the date and time | def mean_entropy(msa, filename="testing", verbose=False):
runtime=datetime.now().strftime('%m_%d_%H_%M')
aln_length=msa.get_alignment_length()
holder=[shannon_entropy(msa[:,x+1]) for x in range(1, aln_length-1)]
if (verbose):
print("working on mean\n")
print("using MSA \n{}".format(... | [
"def runCalculation(self): \n \n # Calculate the sequence entropy of each column in a fasta file\n f = open(self.fasta_file,'r')\n self.data = wl.LogoData.from_seqs(wl.read_seq_data(f)) \n f.close()",
"def read_alignment(fname):\n\n alignment = AlignIO.read(open(fname), \"clustal\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
inputs 1) a multisequence alignment 2) the dictionary output by create_weight_dict() 3) the gene name output writes the weighted entropy of one or more genes as seperate files into the indicated directory | def write_weighted_ent(msa, w_dict, gene, conserve=False, verbose=False):
runtime=datetime.now().strftime('%m_%d_%H%M')
new_dir="../processed_data/weighted_ent_{0}".format(runtime)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# the ln is supposed to be the smaller of either
... | [
"def fengDoolittle(sequences, weightFunction, similarityScore, outputFile):\n fd = FengDoolittle(sequences, weightFunction, similarityScore)\n alignmentDict = fd.computeMultipleAlignment()\n alignment = [[]]\n for i in alignmentDict:\n alignment[0].append(alignmentDict[i])\n io().writeFastaFil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
dataset = [(ids, tag), ...] src = [id1, id2, ...] | def _preprocess_dataset(src_data, tokenizer):
dataset = []
max_length = 0
for (tag, sent) in src_data:
token_ids = tokenizer.encode(sent)
dataset.append({'src': token_ids, 'tgt': tag})
if max_length < len(token_ids):
max_length = len(token_ids)
return dataset, max_le... | [
"def build_dataset(args, input_data, target_data):\n dataset = SupervisedDataSet(len(input_data[0]), len(target_data[0]))\n for in_data, tg_data in izip(input_data, target_data):\n dataset.addSample(in_data, tg_data)\n\n if args['verbose']:\n print('Dataset built.')\n\n return dataset",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load a configuration from a file or stdin. If `filename` is `None` or "", then configuration gets read from stdin. | def load_config(filename=None):
try:
with _config_stream(filename) as handle:
filename = handle.name
return deserialize_config(handle.read())
except (OSError, toml.TomlDecodeError, UnicodeDecodeError) as exc:
raise ConfigError("Error loading configuration from {}".format(... | [
"def _config_stream(filename):\n if filename is None or filename == \"-\":\n log.info(\"Reading config from stdin\")\n yield sys.stdin\n else:\n with open(filename, mode=\"rt\") as handle:\n log.info(\"Reading config from %r\", filename)\n yield handle",
"def read_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The command to run to execute tests. | def test_command(self):
return self["test-command"] | [
"def command_test(self):\n if len(self.stdin()) > 0:\n cmd = \"./{} {} < {} > {} 2>&1\".format(self.programm_path(),\n self.command_args(),\n self.stdin_path(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The configuration for specified operators. This is a dict mapping operator names to dicts which represent keywordarguments for parameterizing an operator. Each keyword arg dict is a single parameterization of the operator, and each parameterized operator will be executed once for each parameterization. | def operators_config(self):
return self.get("operators", {}) | [
"def _supported_ops_dict(cls) -> Dict[str, Type[Operator]]:\n return {op.__name__: op for op in cls.consumes_types()}",
"def GET_all_operators(self):\n return list(map(lambda _: _.as_dict(), get_operators()))",
"def global_operator_extra_link_dict(self) -> Dict[str, Any]:\n from airflow imp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a configuration's filename, this returns a stream from which a configuration can be read. If `filename` is `None` or '' then stream will be `sys.stdin`. Otherwise, it's the open file handle for the filename. | def _config_stream(filename):
if filename is None or filename == "-":
log.info("Reading config from stdin")
yield sys.stdin
else:
with open(filename, mode="rt") as handle:
log.info("Reading config from %r", filename)
yield handle | [
"def open_file_stream(filename):\n if filename.endswith('.gz'):\n return gzip.open(filename, 'rt')\n else:\n return open(filename, 'rt')",
"def read_input(filepath, stdin):\n if filepath is not None:\n f = open(os.path.abspath(filepath))\n yield f\n f.close()\n elif ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read file and search for word | def file_contains(self, file, word):
if os.path.isfile(file):
with open(file) as ifd:
content = ifd.read()
if word in content:
return True
return False | [
"def find_word(f, w):\n while (True):\n line = f.readline()\n if line == \"\":\n print(\"Error: end of file reached in find_word\")\n sys.exit()\n fields = line.split()\n if (len(fields) > 0 and fields[1] == w):\n break\n return line",
"def grep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if Conan recipe is headeronly | def _is_header_only(self, conanfile):
if conanfile and os.path.isfile(conanfile) and \
self.file_contains(conanfile, "self.info.header_only()"):
return True
return False | [
"def secnd_hdr_flag(self) -> bool | None:\n if self.__hdr is None:\n return None\n\n return (self.__hdr['type'] >> 11) & 0x1",
"def is_section_header(self, text):\n return (self.section_regex1.search(text) or\n self.section_regex2.search(text))",
"def test_no_heade... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clone Github project to temporary directory | def _clone_project(self, github_url):
temp_dir = tempfile.mkdtemp(prefix='github')
project = github_url[(github_url.rfind('/') + 1):]
project_path = os.path.join(temp_dir, project)
repo = git.Repo.clone_from(github_url, project_path)
self.output_remote_update("Clone project {} to... | [
"def test_clone_repo(tmpdir, settings):\n settings.REPO_ROOT = str(tmpdir)\n tasks.clone_repo('bower-cache', 'git://github.com/Tinche/bower-cache')\n assert len(tmpdir.listdir()) == 1\n assert tmpdir.listdir()[0].basename == 'bower-cache'",
"def git_clone(self, url, target):\n pass",
"def git... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter list by user pattern | def _filter_list(self, names, pattern):
regex = re.compile(pattern)
filtered_list = [name for name in names if regex.match(name)]
self._logger.debug("Filtered list: {}".format(filtered_list))
return filtered_list | [
"def _FilterTestUsers(self, query, user_list):\n filter_key, filter_value = query.split(':')\n key_translations = {'email': 'primaryEmail'}\n if filter_key in key_translations:\n filter_key = key_translations[filter_key]\n filter_value = filter_value.rstrip('*')\n return [u for u in user_list if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This test checks that the export function (to graphdotviz) works with a model distributed across different files. It is checked that all filenames are included in the output and that some elements from every model file are incuded in the output. | def test_model_export():
#################################
# META MODEL DEF
#################################
this_folder = dirname(abspath(__file__))
def get_meta_model(provider, grammar_file_name):
mm = metamodel_from_file(join(this_folder, grammar_file_name),
... | [
"def test_biolink_graphviz(self):\n # We don't do the comparison step because different graphviz libraries generate slightly different binary output\n # We also don't commit the results -- the graphviz output is in .gitignore\n self.directory_generator(\"graphviz\", DotGenerator)",
"def test_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Paginate by specified value in querystring, or use default class property value. | def get_paginate_by(self, queryset):
if 'page' in self.request.GET:
return self.paginate_by
else:
return self.first_paginate | [
"def pagination_hook(self, query, req, **kwargs):\n size = req.params.get('pageSize')\n\n if not size:\n size = self.default_page_size\n else:\n size = int(size)\n\n # -1 here is so that the page numbers start at 1\n page = int(req.params.get('page', 1)) - 1\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Represent the level of update automation in the user interface | def show_auto_update_level(self):
# Security Updates
self.set_security_update_level()
# Other Updates
if self.settings:
level_other = self.settings.get_int("regular-auto-launch-interval")
model = self.combobox_other_updates.get_model()
for (i, row) i... | [
"def test_update_level(self):\n pass",
"def update(self):\n\n self.update_level()\n self.update_complete()\n self.update_value()",
"def set_security_update_level(self):\n \n # Security Updates\n level_sec = self.get_update_automation_level()\n if level_sec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch the security level, Enable/Disable and set the value appropriately | def set_security_update_level(self):
# Security Updates
level_sec = self.get_update_automation_level()
if level_sec == None:
self.combobox_security_updates.set_sensitive(False)
else:
self.combobox_security_updates.set_sensitive(True)
if (level_se... | [
"def set_sec_update_automation_level(self, widget):\n index = widget.get_active()\n state = -1\n if index == 0: # Display immediately\n state = softwareproperties.UPDATE_NOTIFY\n elif index == 1: # Download automatically\n state = softwareproperties.UPDATE_DOWNLOA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call the backend to set the security update automation level to the given value | def set_sec_update_automation_level(self, widget):
index = widget.get_active()
state = -1
if index == 0: # Display immediately
state = softwareproperties.UPDATE_NOTIFY
elif index == 1: # Download automatically
state = softwareproperties.UPDATE_DOWNLOAD
e... | [
"def set_security_update_level(self):\n \n # Security Updates\n level_sec = self.get_update_automation_level()\n if level_sec == None:\n self.combobox_security_updates.set_sensitive(False)\n else:\n self.combobox_security_updates.set_sensitive(True)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the other update automation level to the given value via gconf | def set_other_update_automation_level(self, widget):
index = widget.get_active()
model = self.combobox_other_updates.get_model()
# the second column is the update interval days
days = model[index][1]
self.settings.set_int("regular-auto-launch-interval", days) | [
"def set_sec_update_automation_level(self, widget):\n index = widget.get_active()\n state = -1\n if index == 0: # Display immediately\n state = softwareproperties.UPDATE_NOTIFY\n elif index == 1: # Download automatically\n state = softwareproperties.UPDATE_DOWNLOA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Disable or enable the source code for all sources | def on_checkbutton_source_code_toggled(self, checkbutton):
try:
if checkbutton.get_active() == True:
self.backend.EnableSourceCodeSources()
else:
self.backend.DisableSourceCodeSources()
except dbus.DBusException as e:
if e._dbus_error_n... | [
"def setAlwaysUseSourceFiles(v: bool) -> None:\n ...",
"def disable_all(self) -> None:\n raise NotImplementedError()",
"def alwaysUseSourceFiles() -> bool:\n ...",
"def disablemoddepends(self):\n pass",
"def enable_all(self) -> None:\n raise NotImplementedError()",
"def enable()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The apt keys have changed and need to be redisplayed | def on_keys_modified(self):
self.show_keys() | [
"def aptUpdate():\n logging.debugv(\"functions/linux.py->aptUpdate()\", [])\n cmd = \"apt-get -qqy update 2>/dev/null\"\n try:\n apt = os.popen(cmd)\n except excepts.RunException, msg:\n logging.error(\"APT update error: %s\" % str(msg))",
"def update_package_manager_package_lists():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The sources list was changed and now needs to be saved and reloaded | def on_sources_list_modified(self):
self.reload_sourceslist()
self.show_distro()
self.show_isv_sources()
self.show_cdrom_sources()
self.button_revert.set_sensitive(True)
self.modified_sourceslist = True | [
"def save_sources(self, sources: list):\n self.sources = json.dumps(sources)\n self.save()",
"def update(source):",
"def test_01_refresh_all_sources(self):\n completed_proc = self.client.run(\n 'pulp-admin content sources refresh'.split()\n )\n self.check_error_exis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show CDROM/DVD based repositories of the currently used distro in the CDROM based sources list | def show_cdrom_sources(self):
self.cdrom_store.clear()
for source in self.get_cdrom_sources():
contents = self.render_source(source)
self.cdrom_store.append([not source.disabled, contents,
source, False, True]) | [
"def list_() -> None:\n available_sources = [\n \"Wikipedia (wiki) [with different locales]\",\n \"Accadde Oggi (accadde)\",\n ]\n print(\"\\nAvailable sources:\\n\")\n\n for source in available_sources:\n print(f\" • {source}\")",
"def on_sources_list_modified(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if the selected row is a separator | def is_separator(self, model, iter, column):
try:
return model.get_value(iter, column)
except Exception as e:
print("is_seperator returned '%s' " % e)
return False | [
"def _get_isSeparator(self) -> \"bool\" :\n return _core.ListItem__get_isSeparator(self)",
"def _isEndOfRow(self):\r\n\t\tinfo=self.copy()\r\n\t\tinfo.expand(textInfos.UNIT_CHARACTER)\r\n\t\treturn info._rangeObj.getText(-1)==u'\\u0007'",
"def detecteSeparateur(liste, j, i):\r\n return liste[j][i] in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show a dialog to edit an ISV source | def on_edit_clicked(self, widget):
sel = self.treeview_sources.get_selection()
(model, iter) = sel.get_selected()
if not iter:
return
old_source_entry = model.get_value(iter, LIST_ENTRY_OBJ)
dialog = DialogEdit(self.window_main, self.sourceslist,
... | [
"def __Action_editSong__(self):\r\n dialog = dialogSongEdit.SongEditWindow(MpGlobal.Window)\r\n\r\n dialog.initData(self.getSelection())\r\n \r\n dialog.exec_()\r\n \r\n del dialog",
"def view_source(self):\n if self.source_object_id is None:\n from suga... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the selected source | def on_remove_clicked(self, widget):
model = self.treeview_sources.get_model()
(path, column) = self.treeview_sources.get_cursor()
iter = model.get_iter(path)
if iter:
source_entry = model.get_value(iter, LIST_ENTRY_OBJ)
try:
self.backend.RemoveSou... | [
"def remove_source(src):\n src.stop()\n try:\n src.data.release_data_flag = 1\n src.cell_scalars_name = ''\n src.cell_tensors_name = ''\n src.cell_vectors_name = ''\n src.point_scalars_name = ''\n src.point_tensors_name = ''\n src.point_vectors_name = ''\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
when a cdrom is requested for adding | def on_button_add_cdrom_clicked(self, widget):
try:
self.backend.AddCdromSource()
except dbus.DBusException as e:
if e._dbus_error_name == 'com.ubuntu.SoftwareProperties.PermissionDeniedByPolicy':
logging.error("Authentication canceled, changes have not been saved... | [
"def attach_cdrom_drive(self, name):\n vm = self.get_vm_obj(name, fail_missing=True)\n controller = None\n cdrom_device_key = 3000 # 300x reserved for cd/dvd drives in vmware\n # Find last IDE controller and free device key\n for device in vm.config.hardware.device:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create GTI mask. Assumes that no overlaps are present between GTIs | def create_gti_mask(time, gtis, safe_interval=0, min_length=0,
return_new_gtis=False, dt=None):
import collections
check_gtis(gtis)
dt = _assign_value_if_none(dt,
np.zeros_like(time) + (time[1] - time[0]) / 2)
mask = np.zeros(len(time), dtype=bool)
... | [
"def create_mask(src, trg):\n src_mask = src.eq(PAD)\n trg_mask = trg.eq(PAD)\n length = trg.size(1)\n trg_mask = trg_mask.unsqueeze(1)\n nopeak_mask = np.triu(np.ones((1, length, length)), k=1).astype(np.uint8)\n nopeak_mask = torch.from_numpy(nopeak_mask)\n trg_mask = trg_mask | nopeak_mask\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if GTIs are wellbehaved. No start>end, no overlaps. Raises AssertionError If GTIs are not wellbehaved. | def check_gtis(gti):
gti_start = gti[:, 0]
gti_end = gti[:, 1]
logging.debug('-- GTI: ' + repr(gti))
# Check that GTIs are well-behaved
assert np.all(gti_end >= gti_start), 'This GTI is incorrect'
# Check that there are no overlaps in GTIs
assert np.all(gti_start[1:] >= gti_end[:-1]), 'This... | [
"def is_physically_valid(st: SpaceTime):\n\n #these can be uncommneted once this has been merged with the feature/faces_containing_fix branch\n test_faces_containing_size(st)\n test_accuracy_of_faces_containing(st)\n test_total_neighbors(st)\n test_future(st)\n test_past(st)\n test_right(st)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a GTI list from a time array and a boolean mask ("condition"). | def create_gti_from_condition(time, condition,
safe_interval=0, dt=None):
import collections
assert len(time) == len(condition), \
'The length of the condition and time arrays must be the same.'
idxs = contiguous_regions(condition)
if not isinstance(safe_interval,... | [
"def __call__(self, data: np.ndarray, threshold: float):\n t_list = []\n time = 0\n # Find all threshold crossings\n data_thresh = data[data[:, 2] >= threshold, :]\n while time < self.max_time:\n # Find threshold crossings less than \"time\" before the time of event\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find contiguous True regions of the boolean array "condition". Return a 2D array where the first column is the start index of the region and the second column is the end index. | def contiguous_regions(condition): # NOQA
# Find the indicies of changes in "condition"
diff = np.diff(condition)
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the... | [
"def contiguous_true_regions(condition: np.ndarray) -> np.ndarray:\n if len(condition) == 0:\n return np.empty((0, 2), dtype=np.intc)\n\n # convert condition array to integer\n condition = np.asarray(condition, np.intc)\n\n # Find the indices of changes in \"condition\"\n d = np.diff(condition... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests whether `validate_speaker_ids` works as intended. | def test__validate_speaker_ids__0():
speaker_id_1 = 202303120075
speaker_id_2 = 202303120076
for input_value, expected_output in (
(None, None),
([], None),
([speaker_id_2, speaker_id_1], (speaker_id_1, speaker_id_2)),
([User.precreate(speaker_id_1)], (speaker_id_1, )),
... | [
"def test__validate_speaker_ids__1():\n for input_value in (\n 12.6,\n [12.6],\n ):\n with vampytest.assert_raises(TypeError):\n validate_speaker_ids(input_value)",
"def test__put_speaker_ids_into():\n speaker_id = 202303120074\n \n for input_value, defaults, expecte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests whether `validate_speaker_ids` works as intended. | def test__validate_speaker_ids__1():
for input_value in (
12.6,
[12.6],
):
with vampytest.assert_raises(TypeError):
validate_speaker_ids(input_value) | [
"def test__validate_speaker_ids__0():\n speaker_id_1 = 202303120075\n speaker_id_2 = 202303120076\n \n for input_value, expected_output in (\n (None, None),\n ([], None),\n ([speaker_id_2, speaker_id_1], (speaker_id_1, speaker_id_2)),\n ([User.precreate(speaker_id_1)], (speak... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search using Spearman correlation. assumes that the input data has already been aligned by gene name. | def spearman_search(input_data, db_data):
corr = scipy.stats.spearmanr(input_data, db_data)[0]
return corr | [
"def spearman_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None):\n if db_gene_names is not None:\n data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_gene_names)\n data_subset = input_data[data_gene_ids]\n results = []\n for cell_typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search using Poisson distance | def poisson_search(input_data, db_data):
from uncurl_analysis import bulk_data
data = db_data/db_data.sum()
dist = bulk_data.log_prob_poisson(data, input_data)
return dist | [
"def poissonDist(avg, k):\n PMF = ((avg**k)*(np.exp(-1*avg))/(factorial(k)))\n return(PMF)",
"def scan_neighbors(self, positions):\n # Calculate potential generated by topology while removing the current drone\n v = pygame.math.Vector2(0,0) \n for position in positions:\n dis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search using cosine similarity | def cosine_search(input_data, db_data):
from uncurl_analysis import bulk_data
dist = bulk_data.cosine(db_data, input_data)[0][0]
return dist | [
"def cosine_similarity(self, query, indices=None):\n\n pass",
"def calculate_cosine_similarity(self):\n\n data = []\n #prepare input for the sklearn cosine similarity function\n for k in sorted(self.node_dict.keys()):\n data.append(\" \".join(self.cleaned_data[self.node_dict... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search using Hamming distance on binarized data | def hamming_search(input_data, db_data):
import scipy
input_data = (input_data != 0)
db_data = (db_data != 0)
dist = scipy.spatial.distance.hamming(input_data, db_data)
return dist | [
"def hamming_distance(i, j):\n # TODO: Find something better than this.\n return bin(i ^ j).count('1')",
"def hamming_distance(bits1: str, bits2: str) -> int:\n bits1 = [int(b) for b in bits1]\n bits2 = [int(b) for b in bits2]\n return hamming(bits1, bits2) * len(bits1)",
"def calcHammDist(a, b):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs spearman search and also returns pvals. | def spearman_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None):
if db_gene_names is not None:
data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_gene_names)
data_subset = input_data[data_gene_ids]
results = []
for cell_type_name, da... | [
"def spearman_search(input_data, db_data):\n corr = scipy.stats.spearmanr(input_data, db_data)[0]\n return corr",
"def spearman_permutation_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None, n_perms=100):\n if db_gene_names is not None:\n data_gene_ids, db_ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs spearman search; calculates P(score >= score1) for each score1 using a permutation test. | def spearman_permutation_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None, n_perms=100):
if db_gene_names is not None:
data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_gene_names)
data_subset = input_data[data_gene_ids]
results = []
... | [
"def spearman_pval_search(input_data, input_gene_names, db_data, db_gene_names=None, db_gene_data=None):\n if db_gene_names is not None:\n data_gene_ids, db_gene_ids = gene_overlap_indices(input_gene_names, db_gene_names)\n data_subset = input_data[data_gene_ids]\n results = []\n for cell_typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the map folder in the build path if it does not yet exist | def create_cache(name: str, folder: str) -> str:
if (folder is not None):
create_folder(os.path.join(folder, "map"))
return folder
else:
build_folder = Config.get('default_build_folder')
cache_folder = os.path.join(build_folder, name)
create_folder(os.path.join(cache_fold... | [
"def maps_dir():\n return _mkifnotexists(\"web/maps\")",
"def create_build_folder(path=settings.hatchery_build_folder):\n path = Path(path)\n if not path.exists():\n makedirs(path, exist_ok=True)",
"def create_project_dirs() -> None:\n create_folder('cache/data')\n create_folder('cache/mod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends a json object to the port given as the env variable DACE_port. If the port isn't set we don't send anything. | def send(data: json):
if "DACE_port" not in os.environ:
return
HOST = socket.gethostname()
PORT = os.environ["DACE_port"]
data_bytes = bytes(json.dumps(data), "utf-8")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, int(PORT)))
s.sendall(data_... | [
"def PortWrite( data ):\r\n global gTelnetConn\r\n if gTelnetConn == None:\r\n OpenTelnet()\r\n \r\n gTelnetConn.write( data )\r\n \r\n return;",
"def send( self, data: JSONData ) -> None:\n\n self.sock.sendall( self.encode( data ) )\n self.sock.shutdown( socket.SHUT_WR ) # Signal end of ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search all nodes for debuginfo to find the source filenames | def get_src_files(sdfg):
sourcefiles = []
for node, _ in sdfg.all_nodes_recursive():
if (isinstance(node, (nodes.AccessNode, nodes.Tasklet, nodes.LibraryNode, nodes.Map, nodes.NestedSDFG))
and node.debuginfo is not None):
filename = node.debuginfo.filename
if not... | [
"def map_debuginfos(self):\n for rpm in self.rpms:\n if rpm.is_debuginfo:\n # print('calling map_debuginfo(%s)' % rpm.nevra)\n self.map_debuginfo(rpm)",
"def source_listing(self):\n result = []\n for fname in self.source_files:\n if fname no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |