query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Tries to find the best tag for the current cursor position.
def findTag(bufferNumber, changedTick): # DOC {{{ # }}} # CODE {{{ # try to find the best tag {{{ try: # get the tags data for the current buffer tagLineNumbers, tags = getTags(bufferNumber, changedTick) # link to vim's internal data {{{ currentBuffer = vim.current....
[ "def _get_spot(precursor):\n return 0", "def find_max_tag(self, word):\n count = []\n for tag in self.pos_tags:\n count.append(self.tag_word_data.count((tag, word)))\n max_index = np.argmax(np.asarray(count))\n return self.pos_tags[max_index]", "def tagName(self, pos_ta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes tags data for the specified buffer number.
def deleteTags(bufferNumber): # DOC {{{ # }}} # CODE {{{ # define global variables global TAGS, TAGLINENUMBERS, BUFFERTICKS # try to delete the tags for the buffer {{{ try: del TAGS[bufferNumber] del TAGLINENUMBERS[bufferNumber] del BUFFERTICKS[bufferNumber] exc...
[ "def removeTag(self, tag_num):\n\n # Check if we know of this tag, and if this is the case, set it to an empty\n # list\n if self.tags.query(\"num\", tag_num):\n self.fields[tag_num] = []", "def commit_buffer_discard(self, tag=None):\n\n tag = self._auto_set_tag_context(tag)\n\n if tag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When a resource record is deleted, delete all related attachments. When a bucket or collection is deleted, it removes the attachments of every underlying records.
def on_delete_record(event): keep_old_files = asbool(utils.setting_value(event.request, 'keep_old_files', default=False)) # Retrieve attachments for these records using links. resource_name = event.payload['resource_name'] filter_field = '%s_uri' % resource_name uri = event.payload['uri'] utils...
[ "def post_provider_attachment_delete(self, resource_id, resource_dict):\n pass", "def pre_customer_attachment_delete(self, resource_id):\n pass", "def post_customer_attachment_delete(self, resource_id, resource_dict):\n pass", "def pre_provider_attachment_delete(self, resource_id):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refresh the index by recomputing the embeddings for all points.
def refresh_index(self): synchronize() # TODO: add logger call here self._compute_embeddings()
[ "def _warm_cache(self):\n for word, index in self.word_to_index.items():\n self.embedding_layer.weight.data[index].copy_(torch.from_numpy(self.embedder.get_word_vector(word)))", "def flush(self):\n slots = torch.nonzero(self.cached_idx_map > -1).squeeze(1)\n row_ids = self.cached_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a plot of the MSE against lambda. Draw a plot of the MSE of the learning curve for lambda = 0,1.
def plot_mse(mse, lambda0, lambda1, scale, loc='lower right'): import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(211) ax.plot(*zip(*mse)) plt.xlabel('$\lambda$') plt.ylabel('MSE') plt.yticks(scale) ax = fig.add_subplot(212) ax.plot(*zip(*lambda0), label='$\lam...
[ "def lambda_Param_Scan_visualization(lambds,method, mse_tr0):\n plt.semilogx(lambds, mse_tr0, marker=\".\", color='b', label='train error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"mse\")\n plt.title(\"Penalty term parameter_scan for \"+method)\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Brief Gets the binary and source files from the Github Release server [in] `tag_name` Git tag of the current release [in] `config` confi metadata set in main.py `List[ReleaseFile]` List of release files `Dict[str, SourceFile]` Dictionary of source files Sends an `HTTP GET` request to github using their REST API to re...
def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]: @retry_multi(5) # retry at most 5 times def execute_request(path): """! @brief Performs a GET request with the given path. To be used with Github's REST API. @returns If successful, returns a .JS...
[ "def main(config):\n for key, value in config.items():\n for version in value['versions']:\n pull_docker_image(version)", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content'][...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Performs a GET request with the given path. To be used with Github's REST API. If successful, returns a .JSON object
def execute_request(path): headers = { "Accept": "application/vnd.github.v3+json" } url = "https://api.github.com" + path # GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json" response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOU...
[ "def get(self, path):\n return self.request(requests.get, url=self.join_path(path))", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes [in] `build_type` Unknown str [in] `tag_name` Github tag name of the release [in] `config` config metadata set in main.py
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] : tag_regex = re.compile("nightly_(.*)") build_group_regex = re.compile("nightly_.*-builds-([^.]+).*") files = [] try: with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp: # extrac...
[ "def local_meta_info(basepath):\n config = ConfigParser.ConfigParser()\n meta_path = os.path.join(os.path.dirname(basepath), 'config.yt-bulk')\n config.read(meta_path)\n\n try:\n desc = config.get('video-info', 'description')\n tags = config.get('video-info', 'tags')\n category = co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
serialize triples to chosen format supported by rdflib, e.g. xml, turtle, n3, etc
def serialize(triples, format='xml'): g = Graph() for k, v in NAMESPACES.iteritems(): g.bind(k, v) for triple in triples: g.add(triple) return g.serialize(format=format)
[ "def model_to_triple_string(model, request=None):\n rdf_n3 = \"\"\n rdf_array = model_to_triples(model)\n\n for row in rdf_array: # ast.literal_eval(request.session['rdf_array']):#['rdf_array']:\n for elem in row:\n elem = elem.replace(\",\", \"\\\\,\") # escape commas\n if e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an unbound port number on 127.0.0.1.
def find_unbound_port(): while True: port = random.randint(*PORT_RANGE) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.bind(("127.0.0.1", port)) return port except socket.error: print("randomly generated port %d is bound. Trying...
[ "def unused_port() -> int:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"127.0.0.1\", 0))\n return cast(int, s.getsockname()[1])", "def unused_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind(('127.0.0.1', 0))\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends an email to a single recipient straight to his MTA. Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.
def send(self): answers = dns.resolver.query(self.domain, 'MX') try: for answer in answers: ex = answer.exchange.to_text() server = smtplib.SMTP(ex) server.set_debuglevel(self.verbose) server.sendmail(self.sender, [self.recipien...
[ "def send_mail(self):\n if self.recipients or self.bcc_recipients:\n return tasks.send_mail.delay(self.subject, self.body, self.recipients, self.bcc_recipients)\n else:\n logger.info(\"[Message.send_mail] Can't send a message without recipients, did you call 'set_recipients(...)'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts an iterative task which update org admins.
def updateOrgAdmins(request): return updateRole('gsoc_org_admin')
[ "def module_update_non_admin_all_tenant(self):\n self.test_runner.run_module_update_non_admin_all_tenant()", "def as_orgadmin():\n config = current.test_config\n browser = config.browser\n driver = browser\n\n login()\n open_organisation_roles()\n select_user()\n\n # Reset those access...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns GSoCProfile or GCIProfile which corresponds to the specified entity.
def _getProfileForRole(entity, profile_model): if isinstance(entity, profile_model): return entity if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor): key_name = entity.program.key().name() + '/' + entity.user.key().name() else: key_name = entity.key().name() parent = entity.user ret...
[ "async def csgo_profile(self) -> ProfileInfo[Self]:\n msg = await self._state.fetch_user_csgo_profile(self.id)\n if not msg.account_profiles:\n raise ValueError\n return ProfileInfo(self, msg.account_profiles[0])", "def _getProfileFromUser(self):\n # make sure user is authed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns Key instance of the Profile which corresponds to the Role which is represented by the specified Key.
def _getProfileKeyForRoleKey(key, profile_model): entity = db.get(key) profile = _getProfileForRole(entity, profile_model) return profile.key()
[ "def get_key(self, role):\n\n for key, role_name in self.assignable_roles[0].items():\n if role_name == role.name:\n return key", "def _get_key(self, key_id):\n return self._key_object.get_key(key_id)", "def _getProfileForRole(entity, profile_model):\n\n if isinstance(en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Convert ifg phase data into numpy binary files. 2. Save the preread_ifgs dict with information about the ifgs that are later used for fast loading of Ifg files in IfgPart class
def _create_ifg_dict(dest_tifs, params): ifgs_dict = {} nifgs = len(dest_tifs) process_tifs = mpiops.array_split(dest_tifs) for d in process_tifs: ifg = shared._prep_ifg(d, params) ifgs_dict[d] = PrereadIfg(path=d, nan_fraction=ifg.nan_fraction, ...
[ "def _create_ifg_dict(params):\n dest_tifs = [ifg_path for ifg_path in params[C.INTERFEROGRAM_FILES]]\n ifgs_dict = {}\n process_tifs = mpiops.array_split(dest_tifs)\n for d in process_tifs:\n ifg = Ifg(d.tmp_sampled_path) # get the writable copy\n ifg.open()\n nan_and_mm_convert(if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MPI wrapper function for MST calculation
def _mst_calc(dest_tifs, params, tiles, preread_ifgs): process_tiles = mpiops.array_split(tiles) log.info('Calculating minimum spanning tree matrix') def _save_mst_tile(tile, i, preread_ifgs): """ Convenient inner loop for mst tile saving """ mst_tile = mst.mst_multiprocessi...
[ "def distribute_matrix_data():\n def split_matrix(seq, p):\n \"\"\"\n Split matrix into small parts according to the no of workers. These\n parts will be send to slaves by master node\n \"\"\"\n rows = []\n n = int(len(seq) / p)\n r = len(seq) % p\n b, e = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenient inner loop for mst tile saving
def _save_mst_tile(tile, i, preread_ifgs): mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params) # locally save the mst_mat mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i)) np.save(file=mst_file_process_n, arr=mst_tile)
[ "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper for reference pixel calculation
def _ref_pixel_calc(ifg_paths: List[str], params: dict) -> Tuple[int, int]: lon = params[cf.REFX] lat = params[cf.REFY] ifg = Ifg(ifg_paths[0]) ifg.open(readonly=True) # assume all interferograms have same projection and will share the same transform transform = ifg.dataset.GetGeoTransform() ...
[ "def referencepixel(self, *args, **kwargs):\n return _coordsys.coordsys_referencepixel(self, *args, **kwargs)", "def reference_pixel(self):\n return Pair(self.meta.get('crpix1',\n (self.meta.get('naxis1') + 1) / 2.) * u.pixel,\n self.meta.get('crpi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MPI wrapper for maxvar and vcmt computation
def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs): log.info('Calculating the temporal variance-covariance matrix') process_indices = mpiops.array_split(range(len(ifg_paths))) def _get_r_dist(ifg_path): """ Get RDIst class object """ ifg = Ifg(ifg_path) ifg.open()...
[ "def maxout_var(self, rv):\r\n #self.cpt += 0.00002\r\n exp_len = int(len(self.cpt)/self.card[rv])\r\n new_cpt = np.zeros(exp_len)\r\n\r\n rv_card = self.card[rv]\r\n rv_stride = self.stride[rv]\r\n\r\n k=0\r\n p = np.prod([self.card[r] for r in self.scope if self.st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MPI wrapper for time series calculation.
def _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs): if params[cf.TIME_SERIES_CAL] == 0: log.info('Time Series Calculation not required') return if params[cf.TIME_SERIES_METHOD] == 1: log.info('Calculating time series using Laplacian Smoothing method') elif params[cf....
[ "def mpi_schedule_job_array(csvstore, job_array, mpi_service=MPIService()):\n param_array = job_array.param_array\n job = job_array.job\n try:\n if mpi_service.rank == 0:\n # master\n results = []\n nb_completed_tasks = 0\n nb_tasks = len(param_array)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the number of simulation threads to use in Calculix
def setNumThreads(cls, numThreads: int): cls.NUMTHREADS = numThreads
[ "def setNumThreads(self, num):\r\n self.threads = num", "def set_num_threads(self, numt=1): \n self.NUMT = numt", "def setNumThreads(self, num):\n self.threads = num", "def setNthreads(self, nthreads=None):\n if nthreads is None:\n nthreads = 4\n lib._omp_set_num_thre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of threads used
def getNumThreads(cls) -> int: return cls.NUMTHREADS
[ "def get_threads():\n return K.nthreads", "def get_nb_threads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())", "def get_threads_number():\n try:\n cores_number = multiprocessi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default installation proceedure for Calculix
def setCalculixPath(cls, calculixPath: str) -> None: if os.path.isdir(calculixPath) : cls.CALCULIX_PATH = calculixPath
[ "def set_path(self, path):\n self._exe = path", "def set_install_path(path):\n global install_path\n install_path = os.path.expanduser(path)", "def os_set(self):\n if self.mod:\n path_startup = fr\"C:\\Users\\{environ['USER']}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Program...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates node sets for any RBE connectors used in the simulation
def prepareConnectors(self): # Kinematic Connectors require creating node sets # These are created and added to the node set collection prior to writing numConnectors = 1 for connector in self.connectors: # Node are created and are an attribute of a Connector se...
[ "def _build_nodes(self):\n # print 'build_nodes'\n self._clear()\n self._initialize()\n\n for ns in self._node_sets:\n nodes = ns.build(nid_generator=self._node_id)\n self._add_nodes(nodes)\n self._nodes_built = True", "def set_nodes(self):\n self.in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns if the analysis was completed successfully.
def isAnalysisCompleted(self) -> bool: return self._analysisCompleted
[ "def has_result(self):\n return len(self.__analysis_items) > 0", "def is_success(self):\n return self.current_state == self.States.SUCCEEDED", "def success(self):\n return self.retcode == 0", "def successful(self):\n return self._successful", "def did_solve(self) -> bool:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears any files generated from the analysis
def clearAnalysis(self, includeResults:bool = False) -> None: filename = 'input' # Base filename for the analysis files = [filename + '.inp', filename + '.cvg', filename + '.sta'] if includeResults: files.append(filename + '.frd') file...
[ "def clean_files(self):\n self.filenames.clear()", "def clear_data_files():\n demo_folder = osp.join(osp.dirname(osp.dirname(__file__)), 'demo_files')\n if osp.isdir(demo_folder):\n for file in os.listdir(demo_folder):\n full_file = osp.join(demo_folder, file)\n if osp.is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
crawl targeted twitter account, save tweets to csv
def crawlAccount(target): # connect Twitter api twitter = connectTwitter() try: user_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=False, exclude_replies=False) except TwythonError: sys.exit('Received 404 for %s. Account does not exist or is banned.' % target) user_timelin...
[ "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
adding an assertion for testing dataframe equality setting up a database_handler object with the dummy database path and connecting it
def setUp(self): self.addTypeEqualityFunc(pandas.DataFrame, self.assertDataframeEqual) self.database_connection.connect()
[ "def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1.2.3 checking data type is correct 4. testing sum of columns is correct 5. (added assertion) testing dataframes are the same, one manufactured 6. clearing the database and inserting altered data CustomerId duplicate, checking difference in Count (34) 7. testing raising an exception of AssertionError with dataframe and...
def test_build_dataframe(self): insert_good_data() dataframe = get_dataframe() # 1 2 3 self.assertIs(type(dataframe['Total'][0]), numpy.float64) self.assertIs(type(dataframe['InvoiceDate'][0]), str) self.assertIs(type(dataframe['Count'][0]), numpy.int64) # 4 ...
[ "def test_create_dataframe(dataframe):\n results = True\n rows = dataframe.shape[0]\n column_names = sorted(dataframe.columns)\n column_datatypes = list(dataframe[column_names].dtypes)\n\n # Checks columns match those specified in #1\n if column_names != DATA_COLUMNS:\n raise ValueError(\"D...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. testing the build_graph method returns the correct string, and waiting for file to open (less than 1 sec)
def test_build_graph(self): insert_good_data() dataframe = get_dataframe() results = processing.build_graph(dataframe, figure_path, False) # 1 self.assertEqual(results, "Updated html File and Opened it")
[ "def make_graph(file_name: str, file_prefix: str, limit: int) -> None:\n print(\"Reading lines\")\n with open(file_name) as f:\n lines = f.readlines()\n\n print(\"Read lines\")\n\n # Figure out the room version, assume the first line is the create event.\n room_version = KNOWN_ROOM_VERSIONS[\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a (potentially existing) directory without errors. Raise OSError if directory can't be created. If clobber is True, remove dirpath if it exists.
def mkdir(dirpath, clobber=False): if clobber: shutil.rmtree(dirpath, ignore_errors=True) try: os.mkdir(dirpath) except OSError: pass if not path.exists(dirpath): raise OSError('Failed to create %s' % dirpath) return dirpath
[ "def makedirs_ifneeded(dirpath):\n if sys.version[0] == '3':\n # python 3.X\n os.makedirs(dirpath, exist_ok=True) # 'recursive' mkdir, as needed\n else:\n # python 2.X\n try:\n os.makedirs(dirpath) # 2.X has no exists_ok\n ex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy fname from package data to outdir/subdir (creating dir if necessary), and return the path to the copy of fname relative to outdir.
def make_local_copy(outdir, subdir, fname): destdir = path.join(outdir, subdir) mkdir(destdir) shutil.copyfile(package_data(fname), path.join(destdir, fname)) return path.join(subdir, fname)
[ "def package_dest_path(self, package):\n\n if self.destdir is None:\n return self.package_final_path(package)\n else:\n return os.path.join(\n self.destdir,\n self.package_install_space(package).lstrip(os.sep))", "def GetOutputFilename(fname):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a course code, requests the correspnding course page
def get_coursepage(code): url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code print url coursepage = requests.get(url) return coursepage
[ "def get_course_page(self):\n\n print(\"Course URL: {}\".format(self.course_url))\n try:\n self.course_page = BeautifulSoup(requests.get(self.course_url).text, \"lxml\")\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictionary with a headingvalue pair, which is the structure of all the sections in the courses dictionary
def new_dict(heading, value): value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ') # Currently encoding is causeing me problems - the quick fix below removes # all the characters that have broken the code so far. This solution is not # likely to work if more courses were added v...
[ "def create_sections_dict(self):\n # test if self.sections defined, if not -> create\n try:\n self.sections\n except AttributeError:\n # print \"Create sections dictionary\"\n self.get_sections()\n self.section_dict = {}\n for section in self.secti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each course page has a small info section at the beginning, which I had to extract and formulate in a different way to the main sections. This function constructs the dictionary entries for he course when given a string with all the details required for the info section
def get_info_list(info_string, course): info_list = [] split_on_newline = info_string.split("\n") for elem in split_on_newline: split = elem.split(": ") for s in split: info_list.append(s) info_list = info_list[1:-1] info_tags = [ 'session', 'school', 'credits', '...
[ "def get_course_lab_info(self, courses):\n # Build dict to hold structured course info\n all_courses = {}\n for c in courses:\n subject, number = c.upper().split(\" \")\n if subject not in all_courses:\n all_courses[subject] = {number: {}}\n else:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of course codes, ge5t their corresponding titles and format them in a bulletted TeX list. This is used to indicate in the abstract which courses have been deliberately discluded from the document
def create_not_included_list(codes): string = '\\begin{itemize}\n' for code in codes: title = get_course_title_only(code) string += '\\item{' + title + '}\n' string += '\\end{itemize}\n' return string
[ "def create_tex(unwanted_courses, wanted_courses=None):\n page = requests.get(\n 'http://gla.ac.uk/coursecatalogue/courselist/' +\n '?code=REG30200000&name=School+of+Computing+Science')\n tree = html.fromstring(page.content)\n spans = tree.xpath('//span/text()')\n codes = []\n if wanted...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a TeX formatted string for a given subsubsection
def latex_subsection(section): string = '\\subsubsection*{' + section['heading'] + '}\n' string += section['value'] + '\n' return string
[ "def sub(string, subscript):\n return string + \"<sub>\" + subscript + \"</sub>\"", "def print_sub_section(self, s, level=0):\n section = s.capitalize()\n\n self.print_newline()\n self._write('%s+ %s\\n' % ('-' * level, section))\n self.print_newline()", "def subtitle(self, txt):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a TeX formatted string for a course
def latex_course(course): basic_info_list = [ 'session', 'school', 'credits', 'level', 'offered', 'visiting_students', 'erasmus_students' ] generic_subsection_list = [ 'description', 'timetable', 'requirements_of_entry', 'excluded_courses', 'co_requisites', 'assessment_weight...
[ "def create_tex(unwanted_courses, wanted_courses=None):\n page = requests.get(\n 'http://gla.ac.uk/coursecatalogue/courselist/' +\n '?code=REG30200000&name=School+of+Computing+Science')\n tree = html.fromstring(page.content)\n spans = tree.xpath('//span/text()')\n codes = []\n if wanted...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the TeX document from the Computer Science Course Catalog
def create_tex(unwanted_courses, wanted_courses=None): page = requests.get( 'http://gla.ac.uk/coursecatalogue/courselist/' + '?code=REG30200000&name=School+of+Computing+Science') tree = html.fromstring(page.content) spans = tree.xpath('//span/text()') codes = [] if wanted_courses is ...
[ "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_cours...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a TeX document and then runs the pdflatex command to create a PDF from the TeX
def pdflatex(unwanted_courses): create_tex(unwanted_courses) cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex'] proc = subprocess.Popen(cmd) proc.communicate() return None
[ "def generate_pdf(tex_file):\n\n os.system(\"pdflatex {}\".format(tex_file))", "def genPDF(filename):\n newlines = 'echo -e \"' + '\\\\n' * 100 + '\"' # Hack to continue when pdflatex halts.\n cmd = newlines + \" | pdflatex \" + filename + \" --shell-escape 2>/dev/null >/dev/null\"\n os.system(cmd)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Функция проверяет содержится ли в команде слово из списка ignore. command строка. Команда, которую надо проверить ignore список. Список слов any Возвращает True, если в команде содержится слово из списка ignore, False если нет
def ignore_command(command, ignore): # 1 var. return any(word in command for word in ignore)
[ "def ignore_command(command, ignore):\n ignore_status = False\n for word in ignore:\n if word in command:\n ignore_status = True\n return ignore_status", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trim an upper triangle sparse matrix so that only the first n diagonals are kept.
def diag_trim(mat, n): if sp.issparse(mat): if mat.format != "csr": raise ValueError("input type must be scipy.sparse.csr_matrix") # Trim diagonals by removing all elements further than n in the # upper triangle trimmed = sp.tril(mat, n, format="csr") trimmed = sp...
[ "def clean_mat(square_array):\n np.fill_diagonal(square_array, 0)\n return ( np.tril(square_array, k=0) )", "def upper_triangle(M, k=1):\n keep = np.triu(np.ones(M.shape), k=k).astype('bool').reshape(M.size)\n # have to use dropna=FALSE on stack, otherwise will secretly drop nans and upper triangle\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes genomic distance law by averaging over each diagonal in the upper triangle matrix. If a list of detectable bins is provided, pixels in missing bins will be excluded from the averages. A maximum distance can be specified to define how many diagonals should be computed.
def distance_law( matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean ): mat_n = matrix.shape[0] if max_dist is None: max_dist = mat_n n_diags = min(mat_n, max_dist + 1) dist = np.zeros(mat_n) if detectable_bins is None: detectable_bins = np.array(range(mat_n...
[ "def feat_numc_diag(cmap, diaglen=21, score_threshold=0.2):\n \n nd = np.zeros(diaglen/2)\n for dist in range(1, diaglen, 2):\n\n # function to apply to each window\n # counts contacts in corners of window if there is a contact\n # in the center\n # IDEA: apply this function to ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the sum of matrices bins (i.e. rows or columns) using only the upper triangle, assuming symmetrical matrices.
def sum_mat_bins(mat): # Equivalaent to row or col sum on a full matrix # Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array # from the matrix return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
[ "def row_col_sums(i, b_j, bins, C, n_u):\n s= C[i][i]*n_u[i]*n_u[i]\n for j in range(bins[b_j], bins[b_j+1]):\n if i != j:\n s+= (C[i][j] + C[j][i])*n_u[i]*n_u[j]\n return s", "def corner_sum_matrix(self):\n asm = self.to_matrix()\n n = asm.nrows() + 1\n return matr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bootstrap sampling of contacts in a sparse HiC map.
def subsample_contacts(M, n_contacts): S = M.data.copy() # Match cell idx to cumulative number of contacts cum_counts = np.cumsum(S) # Total number of contacts to sample tot_contacts = int(cum_counts[-1]) # Sample desired number of contacts from the range(0, n_contacts) array sampled_conta...
[ "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def bootstrap(dataset,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a frame around input mask, given a kernel. The goal of this frame is define margins around the matrix where the kernel will not perform convolution (denoted by 1). If the matrix is upper symmetric, a margin of half the kernel's width is added below the diagonal and a maximum distance from the diagonal above which ...
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None): if mask.dtype != bool: raise ValueError("Mask must contain boolean values") if not sp.issparse(mask): raise ValueError("Mask must be a sparse matrix") framed_mask = mask.copy() ms, ns = mask.shape mk, nk = k...
[ "def slidekernelthroughdiagonal(kernel, matrix):\n size_kernel = kernel.shape[0]\n size_matrix = matrix.shape[0]\n result = np.zeros([size_matrix])\n for i in range(size_matrix):\n # Calculate zero padding needed\n padding_b = -min(i - int(size_kernel/2), 0)\n padding_a = -min(size_matrix - int(i + siz...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure all elements defined as missing by the mask are set to zero in the signal. If this is not the case, raises an error.
def check_missing_mask(signal, mask): if sp.issparse(mask): # Check if there are nonzero values in the signal reported as missing # by the mask missing_with_signal = np.nonzero( abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0 )[0] if len(missing_with_signal...
[ "def applymask(self,mask):\n self.spec[mask==0]=np.nan", "def example4(a):\n nan_mask = np.isnan(a)\n a[nan_mask] = 0\n return nan_mask", "def propagate_missing_data(im1, im2, im3, mask):\n nrows,ncols = im1.shape\n\n for col in range(ncols):\n for row in range(nrows):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given lists of valid rows and columns, generate a sparse matrix mask with missing pixels denoted as 1 and valid pixels as 0. If a max_dist is provided, upper symmetric matrices will only be flagged up to max_dist pixels from the diagonal.
def make_missing_mask( shape, valid_rows, valid_cols, max_dist=None, sym_upper=False ): # Error if the matrix upper symmetric but shape is rectangle or missing # rows and cols are different sm, sn = shape if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)): raise ValueError("Re...
[ "def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):\n if mask.dtype != bool:\n raise ValueError(\"Mask must contain boolean values\")\n if not sp.issparse(mask):\n raise ValueError(\"Mask must be a sparse matrix\")\n\n framed_mask = mask.copy()\n ms, ns = mask.shap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds margin of zeros around an input sparse matrix.
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"): sm, sn = mat.shape padded_mat = mat.copy() # Up and down margins initialized with zeros and filled as needed margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype) margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype...
[ "def add_padding(sparse_matrix: np.ndarray, dtype: np.dtype) -> np.ndarray:\n\n max_length_row = max([ len(row) for row in sparse_matrix ])\n \n padded_matrix = np.zeros((len(sparse_matrix), max_length_row), dtype=dtype)\n for i, row in enumerate(sparse_matrix):\n padded_matrix[i, :len(row)] = ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crop a kernel matrix to target size horizontally and vertically. If the target size is even, the target size is adjusted to the next integer up.
def crop_kernel(kernel, target_size): # Use list for mutability target = [d for d in target_size] adjusted = False for dim in range(len(target)): if not target[dim] % 2: target[dim] += 1 adjusted = True if adjusted: sys.stderr.write( "WARNING: Crop...
[ "def crop_to_target(x, target):\n\n if target.ndim==3:\n t_h, t_w = target.shape[1], target.shape[2]\n elif target.ndim==4:\n t_h, t_w = target.shape[2], target.shape[3]\n cr = int((x.shape[2] - t_h) / 2)\n cc = int((x.shape[3] - t_w) / 2)\n x_cropped = x[:, :, cr:cr + t_h, cc:cc + t_w]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resize a kernel matrix based on the resolution at which it was defined and the signal resolution. E.g. if a kernel matrix was generated for 10kb and the input signal is 20kb, kernel size will be divided by two. If the kernel is enlarged, pixels are interpolated with a spline of degree 1. Alternatively, a resize factor ...
def resize_kernel( kernel, kernel_res=None, signal_res=None, factor=None, min_size=7, quiet=False, ): km, kn = kernel.shape if km != kn: raise ValueError("kernel must be square.") if not (km % 2) or not (kn % 2): raise ValueError("kernel size must be odd.") if fa...
[ "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs truncated SVD on an input kernel, returning the singular vectors necessary to retain a given proportion of information contained in the kernel.
def factorise_kernel(kernel, prop_info=0.999): u, sigma, v = la.svd(kernel) total_info = np.sum(sigma ** 2) # Compute min. number of singular vectors to retain enough info keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1 if keep_k > np.floor(min(kernel.shape) / 2): ...
[ "def truncated_svd(A,k=None):", "def svd_shrink(self, X, sv, mu):\n svd = irlb.irlb(X, sv, maxit=self.max_iter)\n U = svd[0]\n S = svd[1]\n V = svd[2]\n svp = S[S>(mu**-1)].shape[0]\n n = X.shape[1]\n if svp < sv:\n sv = min(svp + 1, n)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an array of valid indices, return the corrsesponding array of missing indices.
def valid_to_missing(valid, size): missing = np.ones(size, dtype=bool) try: missing[valid] = False # In case there is no valid index except IndexError: pass missing = np.flatnonzero(missing) return missing
[ "def _iloc_indices_with_missing(self, indices):\n raise NotImplementedError", "def get_removed_idxs(nnindexer):\n invalid_idxs = np.nonzero(nnindexer.ax2_aid[nnindexer.idx2_ax] == -1)[0]\n return invalid_idxs", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Close a registry entry.
def close_registry_entry(cls, value): out = False if not value.closed: # pragma: debug value.close() out = True return out
[ "def crb_exit(self):\n self.close()", "def __del__(self):\n for key_path_prefix, registry_file in iter(self._registry_files.items()):\n self._registry_files[key_path_prefix] = None\n if registry_file:\n registry_file.Close()", "def CloseKey(*args, **kwargs): # real signature unknown\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Record the current position in the file/series.
def record_position(self): _rec_pos = self.fd.tell() _rec_ind = self._series_index return _rec_pos, _rec_ind
[ "def change_position(self, file_pos, series_index=None):\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)", "def record(self, pos):\n self.lasts += (datetime.now(), pos),\n if len(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Change the position in the file/series.
def change_position(self, file_pos, series_index=None): if series_index is None: series_index = self._series_index self.advance_in_series(series_index) self.advance_in_file(file_pos)
[ "def _do_seek(self,offset):\n assert (self.realpos + offset) >= 0\n self.fo.seek(self.realpos + offset)\n self.realpos+= offset", "def update_position(position):\r\n pass", "def update_position(position):\n pass", "def _update_offset_file(self):\n offset = self._fileh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Advance to a certain position in the current file.
def advance_in_file(self, file_pos): if self.is_open: try: self.fd.seek(file_pos) except (AttributeError, ValueError): # pragma: debug if self.is_open: raise
[ "def advance(self):\n self.current_idx += 1\n self.current_line = self.raw_file[self.current_idx]", "def advance(self):\n self.current_character = self.file.read(1)", "def _do_seek(self,offset):\n assert (self.realpos + offset) >= 0\n self.fo.seek(self.realpos + offset)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if there is a pending symbolic updates for any one of the variables in `args`. If called with no arguments, return True if the update dictionary is nonempty.
def pending_update(*args): if len(args) == 0: return len(cf.symbolic_updates) > 0 else: for x in _expand_args(args): if is_graph_object(x) and x in cf.symbolic_updates: return True return False
[ "def update(args):\n\n home = args.assert_home()\n\n if args.all:\n env_repos = list(home.iter_env_repos())\n else:\n env_repos = [home.get_env_repo(x) for x in args.repos] if args.repos else [home.get_env_repo()]\n\n success = True\n\n for env_repo in env_repos:\n did_update = e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the same function as theano.printing._print_fn, with the difference that 'file' is passed as a keyword argument to print().
def _get_print_fn(file=sys.stdout): def _print_fn(op, xin,): for attr in op.attrs: temp = getattr(xin, attr) if callable(temp): pmsg = temp() else: pmsg = temp print(op.message, attr, '=', pmsg, file=file) return _print_fn
[ "def makeprint(file_desc):\n return lambda *param, **arg: print(*param, **arg, file=file_desc)", "def printTo(file = None, *args, **kwargs):\n \n print(*args, **kwargs)\n if file is not None:\n sys.stdout = file\n print(*args, **kwargs)\n sys.stdout = sys.__stdout__", "def print...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function for printing just one element in an array. All parameters except `idx` are the same as for `print`. Returns an identity operation on `x`, so that it can be used as follows >>> x = shim.tensor(np.arange(100, 0.1)) >>> x = shim.print_array(x, idx=3)
def print_array(x, idx=slice(None), message=None, message_prefix="SHIM - ", file=sys.stdout): return set_subtensor(x[idx], print(x[idx], message=message, message_prefix=message_prefix, ...
[ "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call pretty printer (`pprint`) on Theano objects, otherwise standard `print`
def pprint(x): if is_theano_object(x): return _gettheano().printing.pprint(x) else: return str(x)
[ "def pretty_print (object):\n if object is None:\n return\n import __builtin__\n __builtin__._=object\n\n from sage.plot.plot import Graphics\n from sage.plot.plot3d.base import Graphics3d\n if isinstance(object, (Graphics, Graphics3d)):\n print repr(object)\n return\n else...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If `value` is a Theano variable, return its test value if it is defined. Otherwise just return `value` unchanged. If `nofail` is False (default), will raise an error if no test value is found. Otherwise returns None
def get_test_value(var, nofail=False): if 'theano' in sys.modules and isinstance(var, _getT().sharedvar.SharedVariable): retval = var.get_value() elif 'theano' in sys.modules and isinstance(var, _gettheano().graph.basic.Variable): try: retval = var.tag.test_value except Attri...
[ "def ensure_not_none(value: Optional[TTT]) -> TTT:\n assert value is not None\n return value", "def if_none_then(value: Optional[T], default: T) -> T:\n if value is not None:\n return value\n return default", "def iif(test,tval,fval=''):\n if test : return tval\n else : return fval", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Todo There seems to be some redundancy between ``is_pure_symbolic(x)`` and ``not graph.is_computable(x)``.
def is_pure_symbolic(*var): # return 'theano' in sys.modules and builtins.any(isinstance(v, _gettheano().tensor.TensorVariable) return 'theano' in sys.modules and builtins.any(isinstance(v, cf.PureSymbolicTypes) for v in _expand_args(var))
[ "def is_pure(self) -> bool:\r\n return self.is_valid and np.all([x[\"operation\"].is_pure for x in self.operations_by_name.values()])", "def is_pure(self):\r\n return isinstance(self, PureOperation)", "def is_symbolic(self: Q) -> bool:\n\n symbolic = False\n\n if (\n h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make x arraylike. Note that if broadcastable is not None, and that Theano is loaded, the return value will always be a Theano variable, even if x is pure Python or Numpy. This is because `broadcastable` is a Theanoonly property.
def asarray(x, dtype=None, broadcastable=None, symbolic=None): _symbolic = 'theano' in sys.modules and isinstance(x, _gettheano().graph.basic.Variable) if symbolic is None: symbolic = _symbolic elif symbolic is False and _symbolic is True: raise ValueError("Cannot force a symbolic variable ...
[ "def addbroadcast(x, *axes):\n if is_theano_object(x):\n # T.addbroadcast only works with positive axes\n axes = [ ax if ax >= 0 else x.ndim + ax for ax in axes ]\n return T.addbroadcast(x, *axes)\n else:\n for ax in axes:\n if x.shape[ax] != 1:\n raise Va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if `var` is any recognized sparse format.
def issparse(var): if 'theano.sparse' in sys.modules: return (sp.sparse.issparse(var) or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable)) else: return sp.sparse.issparse(var)
[ "def isspsparse(var):\n if 'theano.sparse' in sys.modules:\n return (sp.sparse.issparse(var)\n or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))\n else:\n return sp.sparse.issparse(var)", "def _is_sparse_variable(x):\n if not isinstance(x, Variable):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if `var` is sparse with `scipy.sparse` interface. True for scipy.sparse, theano.sparse.
def isspsparse(var): if 'theano.sparse' in sys.modules: return (sp.sparse.issparse(var) or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable)) else: return sp.sparse.issparse(var)
[ "def _is_sparse_variable(x):\n if not isinstance(x, Variable):\n raise NotImplementedError(\n \"this function should only be called on \"\n \"*variables* (of type sparse.SparseTensorType \"\n \"or TensorType, for instance), not \",\n x,\n )\n return is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Equivalent to theano.tensor.addbroadcast. For NumPy objects, checks that broadcasted dimensions have length 1, but otherwise does nothing.
def addbroadcast(x, *axes): if is_theano_object(x): # T.addbroadcast only works with positive axes axes = [ ax if ax >= 0 else x.ndim + ax for ax in axes ] return T.addbroadcast(x, *axes) else: for ax in axes: if x.shape[ax] != 1: raise ValueError("Tri...
[ "def simple_broadcast(self, *args):\n\n def generic_len(a):\n try:\n return len(a)\n except TypeError:\n if len(a.shape) < 1:\n return 0\n else:\n return a.shape[0]\n\n args = [self.astensor(arg) f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call this function on any expression that might appear in a Theano graph as a boolean (Theano expects integers rather than booleans.)
def bool(a): # Booleans need to be converted to integers for Theano if cf.use_theano and isinstance(a, (builtins.bool, np.bool_)): return np.int8(a) elif cf.use_theano or is_theano_object(a): return a else: return builtins.bool(a)
[ "def evaluateBoolean(compiled_expression):", "def boolean_func(experiment):", "def on_true(self) -> global___Expression:", "def test_graph_bool():\n adjacency = test_graph()\n adjacency.data = adjacency.data.astype(bool)\n return adjacency", "def _op(\n x: Union[bool, dts.Boolean, tps.Boolea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All parameters except `outshape` are the same as for theano.ifelse.ifelse `outshape` is an extra parameter to allow the then_branch and else_branch
def ifelse(condition, then_branch, else_branch, name=None, outshape=None): # First check if we can replace an Theano conditional by a Python one if is_theano_object(condition) and is_constant(condition): condition = bool(condition.data) # Now the actual function if (cf.use_theano and no...
[ "def inferoutshape(self, inpshape=None, checkinput=False):\n if inpshape is None:\n inpshape = self.inpshape\n return inpshape", "def symbolic_ifelse(cond, then, else_):\n def outfunc(df): \n cond_out = maybe_eval(df, cond)\n n = len(cond_out)\n if cond_out.all():\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the default broadcastable pattern for a shape, replacing 1s with `True`.
def shape_to_broadcast(shape): return tuple(n==1 for n in shape)
[ "def create_mask(shape):\n return np.zeros(shape).astype(bool)", "def idx_to_mask(idx, shape):\n output = np.zeros(shape)\n output[idx] = 1\n return output.astype(np.bool)", "def ones(shape, dtype=float, **kwargs):\n dtype = np.dtype(dtype)\n return fill(shape=shape, dflt=np.ones((), dtype.bas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an object into a tensor. If `object` is a numpy array, a new tensor matching its shape and dtype is returned. The array values are used to set the test value.
def tensor(object, name=None, dtype=None): # Try to infer the tensor shape, test_value, dtype and broadcast pattern broadcastable = None shape = None if isinstance(object, np.ndarray): # Numpy arrays become the symbolic's test value shape = object.shape test_value = object ...
[ "def to_scalar(obj):\n if isinstance(obj, np.generic):\n return obj.item()\n else:\n return obj", "def tf_tensor_2_serializable(obj):\n import tensorflow as tf\n import numpy as np\n\n # Tensor -> ndarray or object\n if isinstance(obj, tf.Tensor):\n if tf.__version__.startsw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If `allow_resize` is false (default), will raise an error if new_value has a different shape than the stored variable.
def set_value(self, new_value, borrow=False): new_value = np.array(new_value, copy = not borrow) try: if self.shape != new_value.shape: self.resize(new_value.shape, refcheck=False) # refcheck is necessary to get this to work, but bypasses # the...
[ "def resize(a, new_shape):\n return _npi.resize_fallback(a, new_shape=new_shape)", "def _new_shape_action(self, value):\n if self.value.shape == value.shape:\n pass\n elif self.on_shape_change == 'raise':\n raise ShapeChangeError(\"cannot change shape of {}\"\\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In contrast to Theano's `shared()`, the broadcast pattern is set to be compatible with NumPy's behaviour; i.e., any axis in `value` with dimension 1 is considered broadcastable by default. As with Theano's `shared()`, broadcast pattern can by changed by passing
def shared(value, name=None, strict=False, allow_downcast=None, symbolic=True, **kwargs): if not isinstance(value, np.ndarray): value = np.asarray(value) if 'dtype' in kwargs: logger.warning("You passed the keyword 'dtype' to the shared constructor. " "Theano do...
[ "def test_broadcastable_flag_assignment_mixed_thisaxes(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(2, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(False, False, True))\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In contrast to `numpy.atleast_1d`, will not cast lists or tuples to arrays. This is to allow lists of symbolic variables.
def atleast_1d(*arrays): if len(arrays) == 1: a = arrays[0] if isscalar(a): a = add_axes(a, 1) return a else: assert len(arrays) > 1 return [atleast_1d(a) for a in arrays]
[ "def atleast_1d(*arys):\n res = []\n for ary in arys:\n ary = asarray(ary)\n if len(ary.shape) == 0: \n result = numpy.array([ary[0]])\n else:\n result = ary\n res.append(result)\n if len(res) == 1:\n return res[0]\n else:\n return res", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an axis to `x`, e.g. to treat a scalar as a 1x1 matrix. String arguments for `pos` should cover most typical use cases; for more complex operations, like adding axes to the middle, specify the insertion position for the axes directly.
def add_axes(x, num=1, pos='left'): if is_theano_object(x): if pos in ['left', 'before', 'begin', 'first']: shuffle_pattern = ['x']*num shuffle_pattern.extend(range(x.ndim)) elif pos in ['right', 'after', 'end', 'last']: shuffle_pattern = list(range(x.ndim)) ...
[ "def add_pos_enc(x: Tensor) -> Tensor:\n d_model = x.shape[-1]\n pe = get_positional_encoding(x.shape[1], d_model)\n pe = pe.to(x.device)\n return pe + x", "def set_new_pos_in_x(self, new_pos):\n self.__pos_x = new_pos", "def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_ax...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All parameters except `array_shape` are the same as for np.pad. `array_shape` is necessary because while we can deal with a Theano array, we need to know its shape.
def pad(array, array_shape, pad_width, mode='constant', **kwargs): if mode not in ['constant']: raise ValueError("theano_shim does not support mode '{}'".format(mode)) if not is_theano_object(array): assert(array.shape == array_shape) # If this fails, than the Theano code will also f...
[ "def _pad_array(self, array: np.ndarray, shape: Any) -> np.ndarray:\n result = np.zeros(shape=shape)\n slices = tuple(slice(s) for s in array.shape)\n result[slices] = array\n return result", "def pad(array, size = 1):\n\n return np.pad(array, size, 'edge')", "def _padding(self, x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convolve each component of data_arr with kernel_arr and stack the result into an array. data_arr is an NxM array, where N is the number of time bins and M the number of components kernel_arr is an MxM array, for which the element with index (i,j) represents the contribution of component j to component i. (Consistent wi...
def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'): assert(data_arr.ndim == 2) output_shape = discrete_kernel_shape[1:] if (kernel_arr.ndim == 2): # Algorithm assumes a "to" axis on the kernel. Add it. kernel_arr = add_axes(kernel_arr, 1, 'before last') ...
[ "def convolve(data, kernel):\n return _convolve(data, kernel)", "def convolve(self, kernel):\n kernel_rows, kernel_cols = kernel.shape\n img_rows, img_cols = self.img_array.shape\n\n print(\"imgae shape: \", self.img_array.shape)\n print(self.img_array[:10,:10])\n\n # flip th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper for the linear filter operator implemented by scipy.signal.lfilter At the moment, the implementation is restricted to the case a = 1.
def lfilter(size, b, a, x, *args, **kwargs): sym_a = is_theano_object(a) sym_b = is_theano_object(b) sym_x = is_theano_object(x) M, N = size if sym_b or sym_x: s = x * b[0] for tau in range(1, M): u = x[:-tau] * b[tau] s = T.inc_subtensor(s[tau:], u) els...
[ "def LP_filt(filterLength, x):\n b=np.ones(filterLength,)/(filterLength) #Finite Impulse Response (FIR) Moving Average (MA) filter with one second filter length\n a=1\n y = signal.filtfilt(b, a, x)\n return y", "def _filtfilt_gust(b, a, x, axis=-1, irlen=None):\n # In the comments, \"Gustafsson's p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method used to search for a specific blob, commit or tree. If a tree is searched for, the result is splitted into its components (blobs and directories), which are again splitted into their mode, hash and name. In the case of a commit, we split the information string and the tree hash and parent's commit hash are retur...
def search(hash, type): out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type) if type == 'tree': return [blob.split(';') for blob in out.strip().split('\n')] if type == 'commit': splitted = out.split(';') # the tree and parent commit hashes are the second and third word, respectively # the commit time...
[ "def tree_lookup(self, target_path, commit):\n segments = target_path.split(\"/\")\n tree_or_blob = commit.tree\n path = ''\n while segments:\n dirent = segments.pop(0)\n if isinstance(tree_or_blob, pygit2.Tree):\n if dirent in tree_or_blob:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method used to check the usage of Continuous Integration in a tree, given its hash.
def ci_lookup(tree_hash): query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"' out = bash(query) """ # alternate method blobs = search(tree_hash, 'tree') index = {'mode':1, 'hash':1, 'name':2} ci = False for blob in blobs: name = blob[index['name']] hash = blob[ind...
[ "def check(self, tree):\n pass", "def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)", "def test_hash_for_branch_with_cwd(self):\n\n current_dir = os.getcwd()\n\n os.chdir(Path(__file__).parent / \"resolver\" / \"ostree_repo\")\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We check the parent commit to see if its child commit introduced or modified a CI config file.
def check_if_introduction(commit, result): tree_hash, parent_commit_hash, time = search(commit, 'commit') # controlling for the case of no parent commits if parent_commit_hash == '': return True # controlling for the case of multiple parent commits all_parent_CI = False for parent in parent_commit_hash.split...
[ "def bi_is_current(build_instr_file):\n\n meta = read_meta_data(build_instr_file)\n baseline = meta['Parent Models']['Baseline']\n alternatives = meta['Parent Models']['Alternatives']\n # parents = baseline.update(alternatives)\n # print meta['Parent Models']['Baseline']\n # print alternatives\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to investigate how many commits, from a user, modified a unit testing file. Unix commands are used to achieve a better performance. The blobs are parsed, looking for unit testing library imports. An alternative would be using the thruMaps directories or the ClickHouse API, but those options seem slower.
def calc_test(commits, author): open('modifications.csv', 'w').close() for count, commit in enumerate(commits): # status update if (count + 1) % 5 == 0: print commit, '.. ..', count + 1, ' / ', len(commits) # getting every blob from a given commit query = ('for x in $(echo ' + commit + ' | ~/lookup/g...
[ "def test_repo_commit_count():\n\n commit_count = BehavioralUtils.count_commits('drupal', 'builds')\n assert commit_count == 4", "def test_commit_counter(self):\n\n pass", "def get_git_commiter_count(path):\n process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subproc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method used to count the usage of certain languages' good practices and modern approaches. We parse the diff of a modified file and the content of an introduced file, in order to find those practices, and we count the extent of the usage. Then, we write to a file, for each commit that included these features.
def calc_lang_features(commits, author): lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation '^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion 'assert', 'TODO', 'lambda'] # delete contents open('lang_features.csv', 'w').close() for count, commit in enumerate(co...
[ "def test_line_counts(self):\n diff = (\n b'+ This is some line before the change\\n'\n b'- And another line\\n'\n b'Index: foo\\n'\n b'- One last.\\n'\n b'--- README 123\\n'\n b'+++ README (new)\\n'\n b'@@ -1,1 +1,1 @@\\n'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the name of the program for Popen. Windows is finnicky about having the complete file name. Popen won't search the %PATH% for you automatically. (Adapted from ctypes.find_library)
def find_program(name): # See MSDN for the REAL search order. base, ext = os.path.splitext(name) if ext: exts = [ext] else: exts = ['.bat', '.exe'] for directory in os.environ['PATH'].split(os.pathsep): for e in exts: f...
[ "def find_program(basename):\n names = [basename]\n if os.name == \"nt\":\n # Windows platforms\n extensions = (\".exe\", \".bat\", \".cmd\", \".dll\")\n if not basename.endswith(extensions):\n names = [basename + ext for ext in extensions] + [basename]\n for name in names:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run svn cmd in PIPE exit if svn cmd failed
def run_svn(args, fail_if_stderr=False, encoding="utf-8"): def _transform_arg(a): if isinstance(a, unicode): a = a.encode(encoding or locale_encoding) elif not isinstance(a, str): a = str(a) return a t_args = map(_transform_arg, args) cmd = find_prog...
[ "def run_svn(*cmd, **kwargs):\n kwargs.setdefault('stdin', subprocess2.PIPE)\n kwargs.setdefault('stdout', subprocess2.PIPE)\n kwargs.setdefault('stderr', subprocess2.PIPE)\n\n cmd = (SVN_EXE,) + cmd\n proc = subprocess2.Popen(cmd, **kwargs)\n ret, err = proc.communicate()\n retcode = proc.wait()\n if retco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse an SVN date as read from the XML output and return the corresponding timestamp.
def svn_date_to_timestamp(svn_date): # Strip microseconds and timezone (always UTC, hopefully) # XXX there are various ISO datetime parsing routines out there, # cf. http://seehuhn.de/comp/pdate date = svn_date.split('.', 2)[0] time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S") return c...
[ "def get_file_date(tree):\n \n date = tree.find(\"{http://www.dspin.de/data/metadata}MetaData/{http://www.dspin.de/data/metadata}source/{http://www.clarin.eu/cmd/}CMD/{http://www.clarin.eu/cmd/}Components/{http://www.clarin.eu/cmd/}teiHeader/{http://www.clarin.eu/cmd/}fileDesc/{http://www.clarin.eu/cmd/}sourc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the XML output from an "svn info" command and extract useful information as a dict.
def parse_svn_info_xml(xml_string): d = {} tree = ET.fromstring(xml_string) entry = tree.find('.//entry') if entry: d['url'] = entry.find('url').text d['revision'] = int(entry.get('revision')) d['repos_url'] = tree.find('.//repository/root').text d['last_changed_r...
[ "def svn_info():\n code, result = popen('svn info --xml .', False, False)\n parser = ElementTree.XMLTreeBuilder()\n parser.feed(''.join(result))\n return parser.close()", "def get_svn_info(svn_url_or_wc, rev_number=None):\r\n if rev_number is not None:\r\n args = [svn_url_or_wc + \"@\" + str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the XML output from an "svn log" command and extract useful information as a list of dicts (one per log changeset).
def parse_svn_log_xml(xml_string): l = [] tree = ET.fromstring(xml_string) for entry in tree.findall('logentry'): d = {} d['revision'] = int(entry.get('revision')) # Some revisions don't have authors, most notably # the first revision in a repository. author =...
[ "def parse_svn_log(xml):\n logs = []\n tree = Xml_parser.fromstring(xml)\n for log in tree:\n logs.append({\n 'revision': log.attrib['revision'],\n 'author': log[0].text,\n 'date': log[1].text,\n 'comment': log[2].text\n })\n return logs", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the XML output from an "svn status" command and extract useful info as a list of dicts (one per status entry).
def parse_svn_status_xml(xml_string, base_dir=None): l = [] tree = ET.fromstring(xml_string) for entry in tree.findall('.//entry'): d = {} path = entry.get('path') if base_dir is not None: assert path.startswith(base_dir) path = path[len(base_dir):].ls...
[ "def svn_info():\n code, result = popen('svn info --xml .', False, False)\n parser = ElementTree.XMLTreeBuilder()\n parser.feed(''.join(result))\n return parser.close()", "def get_svn_status(svn_wc):\r\n # Ensure proper stripping by canonicalizing the path\r\n svn_wc = os.path.abspath(svn_wc)\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get SVN information for the given URL or working copy, with an optionally specified revision number. Returns a dict as created by parse_svn_info_xml().
def get_svn_info(svn_url_or_wc, rev_number=None): if rev_number is not None: args = [svn_url_or_wc + "@" + str(rev_number)] else: args = [svn_url_or_wc] xml_string = run_svn(svn_info_args + args, fail_if_stderr=True) return parse_svn_info_xml(xml_string)
[ "def parse_svn_info_xml(xml_string):\r\n d = {}\r\n tree = ET.fromstring(xml_string)\r\n entry = tree.find('.//entry')\r\n if entry:\r\n d['url'] = entry.find('url').text\r\n d['revision'] = int(entry.get('revision'))\r\n d['repos_url'] = tree.find('.//repository/root').text\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checkout the given URL at an optional revision number.
def svn_checkout(svn_url, checkout_dir, rev_number=None): args = [] if rev_number is not None: args += ['-r', rev_number] args += [svn_url, checkout_dir] return run_svn(svn_checkout_args + args)
[ "def checkout(self, url=None, rev=None):\r\n args = []\r\n if url is None:\r\n url = self.url\r\n if rev is None or rev == -1:\r\n if (py.std.sys.platform != 'win32' and\r\n svncommon._getsvnversion() == '1.3'):\r\n url += \"@HEAD\" \r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch up to 'limit' SVN log entries between the given revisions.
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False): if stop_on_copy: args = ['--stop-on-copy'] else: args = [] args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit', str(limit), svn_url_or_wc] xml_string = run_svn(svn_log_args + args) ...
[ "def __getLogEntries(self, startRev=None):\n fetchLimit = 10\n self._reset()\n \n QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))\n QApplication.processEvents()\n \n limit = self.limitSpinBox.value()\n if startRev is None:\n start = pysvn.Rev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get SVN status information about the given working copy.
def get_svn_status(svn_wc): # Ensure proper stripping by canonicalizing the path svn_wc = os.path.abspath(svn_wc) args = [svn_wc] xml_string = run_svn(svn_status_args + args) return parse_svn_status_xml(xml_string, svn_wc)
[ "def CaptureSVNStatus(options, path):\n info = CaptureSVN(options, [\"status\"], path)\n result = []\n if not info:\n return result\n for line in info.splitlines():\n if line:\n new_item = FileStatus(line[7:], line[0:1], line[1:2], line[2:3],\n line[3:4], line[4:5], line[5:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first SVN log entry in the requested revision range.
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False): entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy) if not entries: display_error("No SVN log for %s between revisions %s and %s" % (svn_url, rev_start, rev_end)) return entries[0...
[ "def get_first_svn_log_entry(svn_url, rev_start, rev_end):\r\n return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)", "def get_last_svn_log_entry(svn_url, rev_start, rev_end):\r\n return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)", "def svn_ra_get_log(*ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first log entry after/at the given revision number in an SVN branch. By default the revision number is set to 0, which will give you the log entry corresponding to the branch creaction.
def get_first_svn_log_entry(svn_url, rev_start, rev_end): return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
[ "def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):\r\n entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)\r\n if not entries:\r\n display_error(\"No SVN log for %s between revisions %s and %s\" %\r\n (svn_url, rev_start, rev_end))\r\n\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the last log entry before/at the given revision number in an SVN branch. By default the revision number is set to HEAD, which will give you the log entry corresponding to the latest commit in branch.
def get_last_svn_log_entry(svn_url, rev_start, rev_end): return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
[ "def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):\r\n entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)\r\n if not entries:\r\n display_error(\"No SVN log for %s between revisions %s and %s\" %\r\n (svn_url, rev_start, rev_end))\r\n\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over SVN log entries between first_rev and last_rev. This function features chunked log fetching so that it isn't too nasty to the SVN server if many entries are requested.
def iter_svn_log_entries(svn_url, first_rev, last_rev): cur_rev = first_rev chunk_length = log_min_chunk_length chunk_interval_factor = 1.0 while last_rev == "HEAD" or cur_rev <= last_rev: start_t = time.time() stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_fa...
[ "def __getLogEntries(self, startRev=None):\n fetchLimit = 10\n self._reset()\n \n QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))\n QApplication.processEvents()\n \n limit = self.limitSpinBox.value()\n if startRev is None:\n start = pysvn.Rev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an SVN log entry and an optional sequence of files, do an svn commit.
def commit_from_svn_log_entry(entry, files=None, keep_author=False): # This will use the local timezone for displaying commit times timestamp = int(entry['date']) svn_date = str(datetime.fromtimestamp(timestamp)) # Uncomment this one one if you prefer UTC commit times #svn_date = "%d 0" % times...
[ "def svn_client_commit(svn_client_commit_info_t_commit_info_p, apr_array_header_t_targets, svn_boolean_t_nonrecursive, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def svn_fs_commit_txn(*args):\r\n return _fs.svn_fs_commit_txn(*args)", "def commit (files)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pull SVN changes from the given log entry. Returns the new SVN revision. If an exception occurs, it will rollback to revision 'svn_rev 1'.
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False): svn_rev = log_entry['revision'] run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc]) removed_paths = [] merged_paths = [] unrelated_paths = [] commit_paths = [] for d in log_entr...
[ "def get_last_svn_log_entry(svn_url, rev_start, rev_end):\r\n return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)", "def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):\r\n entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)\r\n if not entr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrements the field in the position x, y (zero based) x int y int
def decrement(self, x, y): self.field.add(x, y, -1) self.depth += 1
[ "def posXDecr(self):\n self.posMoveDict['x'].setValue(self.posMoveDict['x'].value()-1)", "def droite(self):\n self.__y += 1\n if self.__y > 10:\n self.__y = 10", "def posYDecr(self):\n self.posMoveDict['y'].setValue(self.posMoveDict['y'].value()-1)", "def decrement(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }