query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Uses the lar_gen object to create a TS row, dictionary object.
def make_ts_row(self): self.ts_row = self.lar_gen.make_ts_row(self.clean_config) return self.ts_row
[ "def create_custom_row(self, dictionary, clean_filepath, clean_filename):\n\t\t#Creates a TS and LAR dataframe from the clean filepath and name\n\t\t#specified.\n\t\tts_df, lar_df = utils.read_data_file(\n\t\t\t\t\tpath=clean_filepath,\n\t\t\t\t\tdata_file=clean_filename)\n\n\t\t#Changes each column (key in the dic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses the lar_gen object and a TS row to create a LAR row that passes syntax and validity edits according to the FIG.
def make_clean_lar_row(self, ts_row): #Stores the stop condition and the initial number of iterations. stop = False iters = 1 #Makes a new row using the lar generator. row = self.lar_gen.make_row(lei=self.lei) #Begins a loop that creates the LAR row. The loop generates the row #with the lar_generator a...
[ "def create_custom_row(self, dictionary, clean_filepath, clean_filename):\n\t\t#Creates a TS and LAR dataframe from the clean filepath and name\n\t\t#specified.\n\t\tts_df, lar_df = utils.read_data_file(\n\t\t\t\t\tpath=clean_filepath,\n\t\t\t\t\tdata_file=clean_filename)\n\n\t\t#Changes each column (key in the dic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function takes in a filepath and name, producing a report on whether any rows of the data failed syntax, validity or quality edits. The report contains among its fields the edit name, the status of the edit, the number of rows failed by the edit, if any, and the ULI's or NULIs (loan ID) of the rows that fail the e...
def edit_report(self): #Instantiates the rules engine class as a checker object with a #LAR schema, a TS schema, and geographic geographic data. checker = rules_engine(lar_schema=self.lar_schema_df, ts_schema=self.ts_schema_df, geographic_data=self.geographic_data) #Seperates data from the filepath and fil...
[ "def config_report_check(config_json):\n ## Make sure sort and column_order only have values in columns for any report.\n attributes_to_check = [\"sort\", \"column_order\"]\n for attribute in attributes_to_check:\n if \"summary_report\" in config_json and \"columns\" in config_json[\"summary_report\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a custom clean LAR row by passing in a dictionary of columns and new values to modify all the rows of an existing clean file, filters the modified file for clean rows, and then pulls the first row from the file. Pulls rows from the clean file last generated. Suggestion that the file pulled should be 1000 origin...
def create_custom_row(self, dictionary, clean_filepath, clean_filename): #Creates a TS and LAR dataframe from the clean filepath and name #specified. ts_df, lar_df = utils.read_data_file( path=clean_filepath, data_file=clean_filename) #Changes each column (key in the dictionary) to the new value in ...
[ "def open_and_clean_csv():\n\n with open(SOURCE, newline='') as csvfile:\n inv_reader = csv.DictReader(csvfile, delimiter=',')\n rows = list(inv_reader)\n for row in rows[:]:\n\n row['product_quantity'] = int(row['product_quantity'])\n row['product_price'] = (row['produ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a node by moving the peg at position pos on the board defined by node in the direction specified by dir.
def get_next_valid_node(node, pos, dir): #assert node[pos[0]][pos[1]] == 1 nrows = len(node) ncols = len(node[0]) #assert pos[0] >= 0 and pos[0] < nrows #assert pos[1] >= 0 and pos[1] < ncols newpos = (pos[0] + 2 * config.DIRECTION[dir][0], pos[1] + 2 * config.DIRECTION[dir][1])...
[ "def get_node_after_move(node, move):\n size = config.TAQUIN_SIZE\n if (move == config.MOVE_LEFT and node.empty % size == 0) or \\\n (move == config.MOVE_RIGHT and node.empty % size == size - 1):\n return None\n if (move == config.MOVE_TOP and node.empty // size == 0) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the trace on the board configuration defined by start_state and reports if it is valid
def is_valid_trace(start_state, trace): board = [list(l) for l in start_state] nrows = len(board) ncols = len(board[0]) for i in range(0, len(trace), 2): # peg is moved from r1, c1 to r2, c2 r1, c1 = trace[i] r2, c2 = trace[i+1] assert r1 >= 0 and r2 >= 0 and \ ...
[ "def check_status(self, start):\n check = False\n mate = False\n for move in self.get_moves(start):\n attack = self.board.get(move[1], False)\n if attack:\n if attack.get(\"piece\", False) == \"k\":\n check = True\n if n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders a Google Adsense block in the same sizes as the BannerAd model.
def render_adsense(type): if type == 'banner': code = """ <script type="text/javascript"><!-- google_ad_client = "pub-5361914556213729"; google_ad_slot = "1625200313"; google_ad_width = 468; google_ad_height = 60; //--> </sc...
[ "def render_banner_ad(type, fallback='True'):\n site = Site.objects.get_current()\n \n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix things up a bit.\n if type == 'vertical':\n type = random.choice(['vertical', 'shortvert'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders a BannerAd instance of the desired size, 'cube'. Defaults to None if no such ad exists for a given site.
def render_cube_ad(): site = Site.objects.get_current() try: ads = BannerAd.current.filter(site=site).filter(ad_type='cube') # .filter(special_section__isnull=True) see above if not ads: ad = None else: ad = random.choice(ads) except Banne...
[ "def render_banner_ad(type, fallback='True'):\n site = Site.objects.get_current()\n \n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix things up a bit.\n if type == 'vertical':\n type = random.choice(['vertical', 'shortvert'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders a BannerAd instance of the desired size, 'eyebrow'. Defaults to None if no such ad exists for a given site.
def render_eyebrow_ad(): site = Site.objects.get_current() try: ads = BannerAd.current.filter(site=site).filter(ad_type='eyebrow') # .filter(special_section__isnull=True) Not sure if we need this anymore. Comment back in otherwise. if not ads: ad = None else: ...
[ "def render_banner_ad(type, fallback='True'):\n site = Site.objects.get_current()\n \n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix things up a bit.\n if type == 'vertical':\n type = random.choice(['vertical', 'shortvert'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders a BannerAd instance of the desired size. If fallback is 'True', the site will display an AdSense ad if there is no current BannerAd of the specified type.
def render_banner_ad(type, fallback='True'): site = Site.objects.get_current() # If we ask for a vertical ad, this means we'll have room for either a # vertical ad or a shortvert. Let's mix things up a bit. if type == 'vertical': type = random.choice(['vertical', 'shortvert']) if t...
[ "def render_special_banner_ad(type, section_id, fallback='True'):\n site = Site.objects.get_current()\n\n try:\n section_id = int(section_id)\n except:\n section_id = 0\n\n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders a BannerAd instance of the desired size. If fallback is 'True', the site will display an AdSense ad if there is no current BannerAd of the specified type.
def render_special_banner_ad(type, section_id, fallback='True'): site = Site.objects.get_current() try: section_id = int(section_id) except: section_id = 0 # If we ask for a vertical ad, this means we'll have room for either a # vertical ad or a shortvert. Let's mix things up a bit...
[ "def render_banner_ad(type, fallback='True'):\n site = Site.objects.get_current()\n \n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix things up a bit.\n if type == 'vertical':\n type = random.choice(['vertical', 'shortvert'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders the requested BannerAd instance. If fallback is 'True', the site will display an AdSense ad if there is no current BannerAd with the specified ID and of the specified type.
def render_ad_by_id(ad_id, fallback='True'): try: ad_id = int(ad_id) except: ad_id = 0 try: ad = BannerAd.current.get(id=ad_id) except BannerAd.DoesNotExist: ad = None if not ad: ad = None if fallback == 'True': return render_adsense(type) ...
[ "def render_banner_ad(type, fallback='True'):\n site = Site.objects.get_current()\n \n # If we ask for a vertical ad, this means we'll have room for either a\n # vertical ad or a shortvert. Let's mix things up a bit.\n if type == 'vertical':\n type = random.choice(['vertical', 'shortvert'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of files which a is missing from b
def get_missing_files(a, b): missing_files = [] for f in b: if not is_file_in_list(f['filename'], a): missing_files.append(f) return missing_files
[ "def difference(a, b):\n return [x for x in a if x in a and not x in b]", "def get_missing(list1, list2):\n return list(set(list1) - set(list2))", "def array_diff(a, b):\n\n\n return list(filter(lambda x: x not in b, a))", "def systemFileCompare(ip1, ip2):\r\n files=getFolder(ip1, path)\r\n ano...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize with any normal Mappings the Mapping of all consonant characters that can be doubled, the characters that end in i sounds, the y Character Mappings and the double COnsonant Character
def __init__(self, normalMapping, consonants, *, yIChars, otherIChars, yMappings, doubleChar, extras=[]): self.normalMapping = normalMapping self.doubleChar = doubleChar self.compoundMapping = {key+yKana:consonants[key][:-1]+yKanaValue for key in yIChars for yKana, yKanaValue in yMa...
[ "def getCharacterMapping(self):", "def getMappings(self, symbols):\r\n maps = [self.doubleConsonantMapping] if self.doubleChar not in symbols else []\r\n maps.extend(self._process_mapping(extra, symbols) for extra in self.extras)\r\n maps.extend([self._process_mapping(self.compoundMapping, sy...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Mappings to use given the symbols that should not be converted
def getMappings(self, symbols): maps = [self.doubleConsonantMapping] if self.doubleChar not in symbols else [] maps.extend(self._process_mapping(extra, symbols) for extra in self.extras) maps.extend([self._process_mapping(self.compoundMapping, symbols), self._process_map...
[ "def _process_mapping(self, mapping, symbols):\r\n return {key:value for key, value in mapping.items() if not set(key).issubset(symbols)}", "def scrub(mappings):\n return mappings", "def _convert_to_cmap_props(glyphs):\n return {\n ord(_g.char): _name\n for _name, _g in glyphs.items()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the proper mapping dictionary that will not convert the learned symbols
def _process_mapping(self, mapping, symbols): return {key:value for key, value in mapping.items() if not set(key).issubset(symbols)}
[ "def getMappings(self, symbols):\r\n maps = [self.doubleConsonantMapping] if self.doubleChar not in symbols else []\r\n maps.extend(self._process_mapping(extra, symbols) for extra in self.extras)\r\n maps.extend([self._process_mapping(self.compoundMapping, symbols),\r\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download youtube video, convert it to mp3 format and then upload to gg drive.
def download_vid_n_upload_to_ggdrive(yt_url, destination_folder_id): import subprocess try: result = subprocess.run( ["youtube-dl", "-x", "--audio-format", "mp3", yt_url], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True ) except subp...
[ "def download_mp3(video):\n try:\n youtube_dl = os.path.join(settings.BASE, \"youtube-dl\")\n if len(settings.FFMPEG) > 0:\n args = [youtube_dl,\n \"--no-post-overwrites\",\n \"-x\",\n \"--ffmpeg-location\",\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve opponent players token for a given players token.
def opponent(self, token): for t in self.players: if t != token: return t
[ "def get_token():\n token_json = requests.get(token_issuer)\n return token_json.json()['token']", "def get_player_move(board, player_token):\n\n # Make use of the raw_input to ask the user a question. Make sure only\n # valid inputs work (use is_space_free function). The question should be\n # aske...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle Deduplication of People
def person_duplicates(): # Shortcut persons = db.pr_person table_header = THEAD(TR(TH(T("Person 1")), TH(T("Person 2")), TH(T("Match Percentage")), TH(T("Resolve")))) # Calculate max possible combinations of records ...
[ "def union(self, domain):", "def mergeOrgDataFam(recordid, families, originalData):\n# print 'mergeOrgDataFam', recordid\n famDict = families.find_one({'_id': recordid})\n #keep _id, refId\n orgDataRec = originalData.find_one({'recordId': recordid, 'type': 'family'})\n #children\n famDict['child...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add given user to the participants list
def add_participant(self, user: UserModel) -> None: if user in self.guests: raise exc.IntegrityError("User can not be guest and participant", params=None, orig=None) if user in self.participants: raise exc.IntegrityError("User already in guest...
[ "def _add_user_to_list(self, user):\n self._user_list.append(user)", "def add_guest(self, user: UserModel) -> None:\n if user in self.guests:\n raise exc.IntegrityError(\"User can not be guest and participant\",\n params=None, orig=None)\n if use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add given user to the guests list
def add_guest(self, user: UserModel) -> None: if user in self.guests: raise exc.IntegrityError("User can not be guest and participant", params=None, orig=None) if user in self.participants: raise exc.IntegrityError("User already in participant...
[ "def _add_user_to_list(self, user):\n self._user_list.append(user)", "def adduser(self, nick):\n # add user\n if not self.users.has_key(nick):\n i = GtkListItem(nick)\n i.show()\n self.list.append_items([i])\n self.users[nick] = i\n if le...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method for searching by event status
def find_by_status(cls, status: str, queryset: Optional[BaseQuery] = None) \ -> Optional['EventModel']: queryset = queryset or cls.query return queryset.filter_by(status=status)
[ "def events(request):\n try:\n if request.method == 'GET':\n events_list = Events.retrieve_all()\n if events_list is not []: # not empty list\n node_id = request.GET.get('node_id', '')\n user_id = request.GET.get('user_id', '')\n status =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method for searching by event owner
def filter_by_owner(cls, user_id: int, queryset: Optional[BaseQuery] = None) \ -> Optional['EventModel']: queryset = queryset or cls.query return queryset.filter_by(owner_id=user_id)
[ "def getEventListByOwner(ownerUserID):\n\tquery = Event.query(Event.ownerid==ownerUserID)\n\treturn _fetchEventList(query)", "def __search (self, event):\n wx.PostEvent (self.GetEventHandler ( ), custom_events.SearchEvent (self.GetId ( )))", "def __search(self, event):\n wx.PostEvent (self.GetEventHandler...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a sourcesystem_code that identifies the test case and make sure it doesn't pollute the database. _sourcesystem_cd and _upload_id are added to the specific object
def sourcesystem_cd(self) -> str: save_ss_cd = getattr(self, "_sourcesystem_cd", None) save_up_id = getattr(self, "_upload_id", None) self._sourcesystem_cd = self.test_prefix + type(self).__name__ self._upload_id = self.text_to_number(self._sourcesystem_cd) try: yield...
[ "def test_create_source_entity(self):\r\n s = SourceEntity.objects.create(string='Source Identifier 1',\r\n context='title',\r\n position=8,\r\n occurrences='/home/user1/test.py:18',\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
crawl data in github.com/trending
def Trending_github(url=None, number_of_trend=None): "" " url = https://github.com/trending/python?since=monthly """ s = "" try: r = requests.get(url) except OSError: s = " something wrong, pls check url again ! " return s if number_of_trend is not None: number_of_tr...
[ "def collect_github_data(config):\n\n crawler = Crawler(config['organization'].lower())\n\n crawler.add_members()\n crawler.add_info()", "def crawl():\n # blog crawler\n runner = CrawlerRunner(\n {\n 'FEED_FORMAT': 'json',\n 'FEED_URI': DATA_FILE,\n }\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we using Kaggle API for crawl competitions
def crawl_competitions(): READ_SHELL_COMMAND = shell('kaggle competitions list') information = [] for file in READ_SHELL_COMMAND.output(): information.append(file) result = "" link_perfix = 'https://www.kaggle.com/c/' for index, value in enumerate(information): if index == 1 : ...
[ "def kagglesubmit(self):\n kagglescore = np.NaN\n\n print(\"Submit to kaggle? : Y/N\")\n\n if input().lower() == 'y':\n self._finalize()\n\n df_test = self.rawfeatures[self.rawfeatures.period=='201511']\n\n df_test['item_cnt_month'] = self._predict(df_test)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we crawl articles in medium.com
def crawl_medium(): # url = "https://medium.com/topic/artificial-intelligence" url = 'https://medium.com/topic/machine-learning' r = requests.get(url) soup = BeautifulSoup(r.text, 'lxml') root = soup.find('div', {'class': 'a b c'}).find('div', {'class': 'n p'}).find('div', ...
[ "def get_articles(rep):\n\n article_url = 'http://webhose.io/filterWebContent?token=' + \\\n WEBHOSE_API_KEY + \\\n '&format=json&sort=crawled&q=%22' + rep.firstname + \\\n '%20' + rep.lastname + \\\n '%22%20language%3Aenglish%20site_type' + \\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if our version of urllib.request.urlopen has a timeout argument.
def _urllib_has_timeout(): # NOTE: This is a terrible hack, but there's no other indication that this # argument was added to the function. version = sys.version_info return version[0] >= 2 and version[1] >= 6
[ "def sethttpstimeout(timeout):\n if _under_26():\n opener = urllib2.build_opener(TimeoutHTTPSHandler(timeout))\n urllib2.install_opener(opener)\n else:\n raise Error(\"This python version has timeout builtin\")", "def is_timeout(exc):\n\n if isinstance(exc, Timeout):\n return True\n if not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a new Response object. code, body, headers, msg are retrievable as instance attributes. Individual headers can be retrieved using dictionary syntax (i.e. response['header'] => value.
def __init__(self, code, body, headers=None, msg='', response=None): self.code = code self.msg = msg self.body = body if headers is None: headers = {} self.headers = headers self.response = response
[ "def make_response_object(body, headers=copy.copy({'Content-Type': CONTENT_TYPE_JSON}), response_code=200):\n response = flask.Response(body)\n response.status_code = response_code\n for header, value in headers.items():\n response.headers.add(header, value)\n return response", "def from_respon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Response object based on an httplib.HTTPResponse object.
def from_httpresponse(cls, response): return cls(response.code, response.read(), dict(response.headers), response.msg, response)
[ "def from_httplib(r):\n\t\tlogger.debug2(\"Creating HTTPResponse from httplib...\")\n\t\treturn HTTPResponse(\n\t\t\tdata=r.read(),\n\t\t\theaders=HTTPHeaders(r.getheaders()),\n\t\t\tstatus=r.status,\n\t\t\tversion=r.version,\n\t\t\treason=r.reason,\n\t\t\tstrict=r.strict\n\t\t)", "def from_response(response, bod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the auth information and base url for a site.
def _parse_site(self, site): parts = urllib.parse.urlparse(site) host = parts.hostname if parts.port: host += ":" + str(parts.port) new_site = urllib.parse.urlunparse((parts.scheme, host, '', '', '', '')) return (new_site, parts.username, parts.password)
[ "def _get_auth_url(self):\r\n auth_url = self.auth_url\r\n\r\n if self._ex_force_auth_url is not None:\r\n auth_url = self._ex_force_auth_url\r\n\r\n return auth_url", "def site_info(request):\n\n return {\n 'domain': Site.objects.get_current().domain\n }",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new request object.
def _request(self, url): return Request(url)
[ "def create_http_request(self):\n http_request = HttpRequest()\n if \"REQUEST_METHOD\" in os.environ:\n http_request.method = os.environ[\"REQUEST_METHOD\"].strip().lower()\n if \"HTTP_COOKIE\" in os.environ:\n http_request.cookie = os.environ[\"HTTP_COOKIE\"].strip()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrap calls to urllib so they can be overriden.
def _urlopen(self, request): if _urllib_has_timeout(): return urllib.request.urlopen(request, timeout=self.timeout) else: return urllib.request.urlopen(request)
[ "def _wrap_urlopen(url: str, timeout: Optional[int] = None) -> requests.Response:\n try:\n raw = urlopen(url, timeout=timeout)\n except OSError as e:\n msg = f'Error getting {url}: {e}'\n logger.error(msg)\n raise RequestException(msg)\n resp = requests.Response()\n resp.raw ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append given text as a new line at the end of file
def append_new_line(file_name, text_to_append): # Open the file in append & read mode ('a+') with open(file_name, "a+") as file_object: # Move read cursor to the start of file. file_object.seek(0) # If file is not empty then append '\n' data = file_object.read(100) if len...
[ "def append(self, text):\n\n self.string += text", "def add_text(self, message, newLine=True):\n if newLine and len(self.txt) > 0:\n self.txt += \"\\r\\n\"\n self.txt += message\n return self", "def add_line(self, txt, indent=0):\n self.add(txt+'\\n', indent)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the given CV method on the given estimator, with the given parameters and their values
def tune_params(estimator, cv_method: str, param_grid: Dict, x: pd.DataFrame, y: np.array, **kwargs): # Get extra arguments, or set them to default values cv = kwargs.get('cv', 3) scoring = kwargs.get('scoring', 'roc_auc') n_iter = kwargs.get('n_iter', 25) return_train_score = kwargs.get('return_tr...
[ "def internal_cross_validation(cls, kwargs, paramname, paramrange, statistic,\n X, y):\n\n # Delay these imports so that we don't have circular imports!\n from main import get_folds\n from stats import StatisticsManager\n\n # Much of this code is sourced from main.py's templ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an idiom that enables a sequence of tasks to pick up where it left off. Rudely interrupted while enjoying a sandwich, a caveman (just because they wore loincloths does not mean they were not civilised), picks up his club and fends off the sabretooth tiger invading his sanctum as if he were swatting away a gnat. ...
def pick_up_where_you_left_off( name: str, tasks: typing.List[behaviour.BehaviourSubClass] ) -> behaviour.Behaviour: root = composites.Sequence(name=name, memory=True) for task in tasks: task_selector = composites.Selector(name="Do or Don't", memory=False) task_guard = behaviours.CheckBlackb...
[ "def tappable(ch: chain, clause: Signature, callback: Signature, nth: Optional[int] = 1):\n newch = []\n for n, sig in enumerate(ch.tasks):\n if n != 0 and n % nth == nth - 1:\n newch.append(pause_or_continue.s(clause=clause, callback=callback))\n newch.append(sig)\n ch.tasks = tup...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test sea surface height timeseries
def test_get_sea_surface_height_time_series(self): request = { "region": {"type": "Point", "coordinates": [54.0, 0.0]} } r = self.client.post( '/get_sea_surface_height_time_series', data=json.dumps(request), content_type='application/json' ...
[ "def grubbs_test(timeseries):\n series = scipy.array([x for x in timeseries])\n stdDev = np.std(series) \n mean = np.mean(series)\n tail_average = tail_avg(timeseries)\n z_score = (tail_average - mean) / stdDev\n return z_score", "def test_inst_hr():\n from inst_hr import inst_hr\n peaks ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get glossis waterlevel data, latest with no time stamp
def test_get_glossis_data_with_waterlevel(self): request = { "dataset": "waterlevel", "band": "water_level" } resp = self.client.post( '/get_glossis_data', data=json.dumps(request), content_type='application/json' ) ass...
[ "def test_get_gloffis_data_log(self):\n\n request = {\n \"dataset\": \"hydro\",\n \"band\": \"discharge_routed_simulated\"\n }\n resp = self.client.post(\n '/get_gloffis_data',\n data=json.dumps(request),\n content_type='application/json'\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get glossis current data
def test_get_glossis_data_with_current(self): request = { "dataset": "currents" } resp = self.client.post( '/get_glossis_data', data=json.dumps(request), content_type='application/json' ) assert resp.status_code == 200 res...
[ "def test_greenalgas_get(self):\n pass", "def test_get_gloffis_data(self):\n\n request = {\n \"dataset\": \"weather\",\n \"band\": \"mean_temperature\"\n }\n resp = self.client.post(\n '/get_gloffis_data',\n data=json.dumps(request),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get gloffis weather data
def test_get_gloffis_data(self): request = { "dataset": "weather", "band": "mean_temperature" } resp = self.client.post( '/get_gloffis_data', data=json.dumps(request), content_type='application/json' ) assert resp.statu...
[ "def sample_weather_scenario():\n times = pd.date_range('1/1/2000', periods=72, freq='6H')\n latitude = np.linspace(0, 10, 11)\n longitude = np.linspace(0, 10, 11)\n wsp_vals = np.full((72, 11, 11), 10.0)\n wdi_vals = np.full((72, 11, 11), 0.0)\n cusp_vals = np.full((72, 11, 11), 0.0)\n cudi_va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get gloffis hydro data
def test_get_gloffis_data_log(self): request = { "dataset": "hydro", "band": "discharge_routed_simulated" } resp = self.client.post( '/get_gloffis_data', data=json.dumps(request), content_type='application/json' ) asser...
[ "def test_get_gloffis_data(self):\n\n request = {\n \"dataset\": \"weather\",\n \"band\": \"mean_temperature\"\n }\n resp = self.client.post(\n '/get_gloffis_data',\n data=json.dumps(request),\n content_type='application/json'\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get gebco data
def test_get_gebco_data(self): request = { "dataset": "gebco" } resp = self.client.post( '/get_gebco_data', data=json.dumps(request), content_type='application/json' ) assert resp.status_code == 200 result = json.loads(res...
[ "def test_greenalgas_get(self):\n pass", "def test_get_data(self):\n\n\t\t# Test to go here when best approach is decided for making requests.", "def test_get_gloffis_data(self):\n\n request = {\n \"dataset\": \"weather\",\n \"band\": \"mean_temperature\"\n }\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test get gll_dtm data
def test_get_gll_dtm_data(self): request = {"dataset": "gll_dtm"} resp = self.client.post( "/get_gll_dtm_data", data=json.dumps(request), content_type="application/json", ) assert resp.status_code == 200 result = json.loads(resp.data) ...
[ "def test_get_words_to_learn(self):\n pass", "def test():\n vocabulary = [\n \"bass\", \"pike\", \"deep\", \"tuba\", \"horn\", \"catapult\",\n ]\n beta = np.array([\n [0.4, 0.4, 0.2, 0.0, 0.0, 0.0],\n [0.0, 0.3, 0.1, 0.0, 0.3, 0.3],\n [0.3, 0.0, 0.2, 0.3, 0.2, 0.0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a rectangle, return a PIL Image of that part of the screen. Handles a Linux installation with and older Pillow by fallingback to using XLib
def screenGrab( rect ): global use_grab x, y, width, height = rect if ( use_grab ): image = PIL.ImageGrab.grab( bbox=[ x, y, x+width, y+height ] ) else: # ImageGrab can be missing under Linux dsp = display.Display() root = dsp.screen().root raw_image = root.get_...
[ "def makeRectangle(width, height, desiredPixel):\n resultImage = cImage.EmptyImage(width + 100, height + 100)\n\n for col in range(height):\n for row in range(width):\n resultImage.setPixel(row,col,desiredPixel)\n\n return resultImage", "def screenshot (b):\n # Read pixels.\n pixels ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the ngrams from a given sentence for a given n.
def ngrams(sentence, n): assert isinstance(sentence, list), "Sentences are lists, got %s: %s" \ % (str(type(sentence)), str(sentence)) ngrams = [] for start in range(0, len(sentence) - n + 1): ngrams.append(sentence[start:start + n]) return ngrams
[ "def get_n_grams(s, n):\n n_grams = []\n for i in range(0, len(s) - n + 1):\n n_grams.append(s[i:i+n])\n return n_grams", "def get_ngram(sentences: list, n_gram: int=1):\n ngram_list = []\n for sentence in sentences:\n for word in sentence:\n if not (word.is_stop or word.is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the name of the sniffer
def get_sniffer_name(self): return self.sniffer_name
[ "def detector_name(det):\n return gu.string_from_source(det.pyda.source).replace(':','-').replace('.','-')", "def get_sniffer_path(self):\n if self.config:\n try:\n return self.config.get(\"Sniffer\", \"path\")\n except Exception, why:\n self._error_pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
requests Response's text property automatically uses the default encoding to convert it to unicode However, sometimes it falls back to ISO88591, which is not appropriate. This method checks whether it could be interpreted as UTF8. If it is, it uses it. Otherwise, it uses whatever was defined.
def get_text_from_response(response): if response.encoding is None: response.encoding = 'utf8' elif response.encoding == 'ISO-8859-1': try: response.content.decode('utf8') except UnicodeDecodeError: pass else: response.encoding = 'utf8' ret...
[ "def test_encoding_detection():\n \n url = 'http://lavr.github.io/python-emails/tests/requests/some-utf8-text.html'\n expected_content = u'我需要单间。' # Chinese is for example only. Any other non-european encodings broken too.\n\n r =\trequests.get(url)\n\n # Response.apparent_encoding is good\n assert r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
After restoring contributor permissions, this runs to finalize removing rows to tables that were added for guardian. Creates new auth_group table that only contains groups not added with node guardian work
def finalize_reverse_node_guardian_migration(): with connection.cursor() as cursor: cursor.execute(drop_node_group_object_permission_table) logger.info('Finished deleting records from NodeGroupObjectPermission table.') cursor.execute(remove_users_from_node_django_groups) logger.info...
[ "def drop_group_table(self):\n query = \"\"\"DROP TABLE IF EXISTS group_members;\"\"\"\n self.cur.execute(query)\n self.conn.commit", "def truncate_group_tables():\n res = db.query('SELECT count(*) FROM articles2groups')\n if res.ntuples() and int(res.getresult()[0][0]) > 0:\n pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a collection of vectors and a u,v pair compute the average of the uvdistorted distribution in each weighted Voronoi cell
def compute_gradient_from_distorted_averages_in_cells(self, vectors_array, UVvector): N_samples=len(vectors_array) distortions_vector = self.compute_array_uv_distorted_weights(vectors_array, UVvector)#computes the value of exp(-1-v\phi(z)-u) for z in sample indices_vector = self.compute_...
[ "def update_euclidean(self, U, V):\n X = self.X\n W = self.W\n lmbda = self.lmbda\n L = self.L\n D = self.D\n # update V\n V = V * np.divide(U.T @ X + lmbda * (V @ W), U.T @ U @ V + lmbda * (V @ D))\n # update U\n U = U * np.divide(X @ V.T, U @ V @ V.T)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the density of an independent 2d vector unif distributed in [1,1] x [1,1]
def two_d_uniform_density(vector): #prototype of a density function. This is how measures are specified. x = vector[0] y = vector[1] if (0<=x) and (x<=1) and (0<=y) and (y<=1): return 1.0 else: return 0.0
[ "def density_x1(x1: float):\n # Bind global variables\n global mu1, sigma1\n # Marginal density of x1 is normal with mean mu1 and standard deviation sigma1\n return norm.pdf(x1, loc=mu1, scale=sigma1)", "def density(self, xs, x):\n xs = np.asarray(xs)\n n = len(xs) # before in_domain?\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a OpenTrackingSubstitutionTag object
def __init__(self, open_tracking_substitution_tag=None): self._open_tracking_substitution_tag = None if open_tracking_substitution_tag is not None: self.open_tracking_substitution_tag = \ open_tracking_substitution_tag
[ "def open_tracking_substitution_tag(self, value):\n self._open_tracking_substitution_tag = value", "def get(self):\n return self.open_tracking_substitution_tag", "def make_tag(\n class_name: str, subs: Optional[List[SubAnnotation]] = None, slot_names: Optional[List[str]] = None\n) -> Annotation...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows you to specify a substitution tag that you can insert in the body of your email at a location that you desire. This tag will be replaced by the open tracking pixel.
def open_tracking_substitution_tag(self, value): self._open_tracking_substitution_tag = value
[ "def PlaceAtTag(self, tag, newText):\n \n index = self.text.find(\"<!--tag:{}-->\".format(tag))\n if index > -1:\n newStr = self.text[:index]\n newStr += newText\n newStr += self.text[index:]\n self.text = newStr\n logging.debug(\"Succesful...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a JSONready representation of this OpenTrackingSubstitutionTag.
def get(self): return self.open_tracking_substitution_tag
[ "def json(self):\n return json.dumps(self._spec)", "def get_json(self) -> str:\n return json.dumps(self._raw_meta)", "def json(self):\n return {\n \"id\": self.id,\n \"string_id\": self.string_id,\n \"upvotes\": self.upvotes,\n \"downvotes\": self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a new user then a new channel and returns a merged dictionary
def new_channel_and_user(new_user): new_channel = channels_create(new_user['token'], "channel_name", False) return {**new_channel, **new_user}
[ "def new_channel_and_user_2(new_user_2):\n new_channel_2 = channels_create(new_user_2['token'], \"channel_name\", False)\n return {**new_channel_2, **new_user_2}", "def _channel_create(param):\n return bot.ts3conn._parse_resp_to_dict(\n bot.ts3conn._send(\"channelcreate\", param))", "def create_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a new user then a new channel and returns a merged dictionary
def new_channel_and_user_2(new_user_2): new_channel_2 = channels_create(new_user_2['token'], "channel_name", False) return {**new_channel_2, **new_user_2}
[ "def new_channel_and_user(new_user):\n new_channel = channels_create(new_user['token'], \"channel_name\", False)\n return {**new_channel, **new_user}", "def _channel_create(param):\n return bot.ts3conn._parse_resp_to_dict(\n bot.ts3conn._send(\"channelcreate\", param))", "def create_user(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rule_s is like ../. => ./../... The components of rule_s are => Both pattern and replacement are converted to numpy arrays.
def __init__(self, rule_s): pattern, replacement = [str_to_mat(x) for x in rule_s.split(" => ")] self.pattern = pattern self.replacement = replacement self.size = self.pattern.shape[0]
[ "def __init__(self, rules_s):\n\t\tself.rules = [Rule(line) for line in rules_s.split(\"\\n\")]", "def compile_rules(s : str):\n ss = s.split('\\n')\n rules = {}\n for srules in ss:\n arrow_index = srules.find('->')\n left_nonterm = srules[:arrow_index].strip()\n right_derivs = list(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rules_s is a string with rule strings on separate lines.
def __init__(self, rules_s): self.rules = [Rule(line) for line in rules_s.split("\n")]
[ "def compile_rules(s : str):\n ss = s.split('\\n')\n rules = {}\n for srules in ss:\n arrow_index = srules.find('->')\n left_nonterm = srules[:arrow_index].strip()\n right_derivs = list(map(lambda x: x.strip(), srules[arrow_index+2:].strip().split('|')))\n if left_nonterm != '' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an array of items, build a merkle tree
def merkle_tree(items): tree = [[H(x) for x in items]] while len(tree[-1]) != 1: it = iter(tree[-1]) tree.append([H(item, next(it, item)) for item in it]) return tree
[ "def merkle_hash(input_items: Sequence[Any]) -> Hash32:\n\n # Store length of list (to compensate for non-bijectiveness of padding)\n data_length = len(input_items).to_bytes(32, \"little\")\n if len(input_items) == 0:\n # Handle empty list case\n chunks = (b'\\x00' * SSZ_CHUNK_SIZE,)\n eli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an item and a tree, return its path
def merkle_path(item, tree): lvl = 0 itemidx = tree[lvl].index(H(item)) even = itemidx % 2 baseidx = itemidx - even otheridx = itemidx - 1 if even else idx + 1 path = [tree[lvl][otheridx]] lvl += 1 while len(tree[lvl]) != 1: baseidx = baseidx / 2 path += tree[lvl][baseidx...
[ "def get_tree_path(h_tree, item_id):\n path_names = get_tree_path_names(h_tree, item_id)\n path_names.reverse()\n return '/'.join(path_names)", "def traverse_tree(path, word_list):\r\n for item in path_iterator(path):\r\n # For each entity / item in the directory path.\r\n if os.path.isd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback when the device receives a PUBACK from the MQTT bridge.
def on_publish(self, unused_client, unused_userdata, unused_mid): print('Published message - ACK received')
[ "def pub_callback(topic, payload, qos, retain):\n mqtt.async_publish(topic, payload, qos, retain)", "def on_publish(self, unused_client, unused_userdata, unused_mid):\n self.print_debug('Published message acked.')", "def _callback(msg):\n print('subscription message data: ', msg.data.decode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pick random sample from sample space a clause i and satisfying assigment sigma... Decide if clause i is the first clause to be satisfied by sigma
def sample(dnf, space_size): # generate a sample from the space with uniform probability clause_ind, free_choice = sample_choice_helper(dnf, space_size) clause = np.copy(dnf[clause_ind]) sat_choice = clause sat_choice[clause == 0] = free_choice # filling the free variables with the rando...
[ "def sample_uniform(self, num_choices: int) -> int:\n return self.sample_distribution(np.ones(num_choices) / num_choices)", "def sample_from(space):\n if type(space)!= list:\n return space\n else:\n distrs = {\n 'choice': choice,\n 'randint': randint,\n 'uni...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pass commandline arguments to ppn.run() method.
def main(): # Read command line arguments args = get_input_arguments() # Unpack dictionary into keyword arguments # Unused arguments should be ignored silently. ppn.run(**args)
[ "def main():\n # set up the program to take in arguments from the command line", "def main(self): # just put into if __name__ ...\n parser = self.get_parser()\n args = parser.parse_args()\n self.run(args)", "def main():\n\n parser = argparse.ArgumentParser(prog=\"oct-run\")\n parse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate inputs that depend might depend on each other and cannot be validated by the spec. Also define dictionary `inputs` in the context, that will contain the inputs for the calculation that will be launched in the `run_calculation` step.
def validate_inputs(self): self.ctx.inputs = AttributeDict({ 'code': self.inputs.code, 'structure': self.inputs.structure, 'parameters': self.inputs.parameters.get_dict() }) if 'CONTROL'not in self.ctx.inputs.parameters: self.ctx.inputs.parameters...
[ "def validate_inputs(self):\n self.ctx.inputs = AttributeDict({\n 'code': self.inputs.code,\n 'qpoints': self.inputs.qpoints,\n 'parent_folder': self.inputs.parent_folder,\n })\n\n if 'parameters' in self.inputs:\n self.ctx.inputs.parameters = self.in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether an initialization calculation should be run, which is the case if the user wants to use automatic parallelization and has specified the ParameterData node in the inputs
def should_run_init(self): return 'automatic_parallelization' in self.inputs
[ "def inspect_init(self):\n calculation = self.ctx.calculation_init\n\n if not calculation.is_finished_ok:\n return self.exit_codes.ERROR_INITIALIZATION_CALCULATION_FAILED\n\n # Get automated parallelization settings\n parallelization = get_pw_parallelization_parameters(calcula...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the inputs that are required for the initialization calculation. The automatic_parallelization
def validate_init_inputs(self): parallelization = self.inputs.automatic_parallelization.get_dict() expected_keys = ['max_wallclock_seconds', 'target_time_seconds', 'max_num_machines'] received_keys = [(key, parallelization.get(key, None)) for key in expected_keys] remaining_keys = [key ...
[ "def validate_inputs(self):\n self.ctx.inputs = AttributeDict({\n 'code': self.inputs.code,\n 'structure': self.inputs.structure,\n 'parameters': self.inputs.parameters.get_dict()\n })\n\n if 'CONTROL'not in self.ctx.inputs.parameters:\n self.ctx.inpu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the initialization PwCalculation to determine the required resource settings for the requested calculation based on the settings in the automatic_parallelization input
def inspect_init(self): calculation = self.ctx.calculation_init if not calculation.is_finished_ok: return self.exit_codes.ERROR_INITIALIZATION_CALCULATION_FAILED # Get automated parallelization settings parallelization = get_pw_parallelization_parameters(calculation, **self...
[ "def validate_init_inputs(self):\n parallelization = self.inputs.automatic_parallelization.get_dict()\n\n expected_keys = ['max_wallclock_seconds', 'target_time_seconds', 'max_num_machines']\n received_keys = [(key, parallelization.get(key, None)) for key in expected_keys]\n remaining_ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Diagonalization failed with current scheme. Try to restart from previous clean calculation with different scheme
def _handle_error_diagonalization(self, calculation): input_parameters = calculation.inp.parameters.get_dict() input_electrons = input_parameters.get('ELECTRONS', {}) diagonalization = input_electrons.get('diagonalization', self.defaults['qe']['diagonalization']) if (( any(['too many bands are ...
[ "def update_diag(self, examples):\n self.data = (self.data[0], self.generator.get_kfe_diag(self.data[0], examples))", "def del_vrpotential_diag13():\n global vpotr, oldcu\n if (in1.narrec > 0):\n in1.closeff(iuar)\n in1.narrec -= 1\n del vpotr, oldcu\n# spectral analysis\n if ((in1.ndar==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculation failed with an error that was not recognized by the parser and was attached wholesale to the warnings. We treat it as an unexpected failure and raise the exception
def _handle_error_unrecognized_by_parser(self, calculation): warnings = calculation.res.warnings if (any(['%%%' in w for w in warnings]) or any(['Error' in w for w in warnings])): raise UnexpectedCalculationFailure('PwCalculation<{}> failed due to an unknown reason'.format(calculation.pk))
[ "def test_bad_numeric_raises_exception(self):\n with self.assertRaises(KeyError):\n math_diff(\n self.thresh_dict,\n os.path.join(self.diff_files_dir, 'eplusout.csv'),\n os.path.join(self.diff_files_dir, 'eplusout_bad_numeric.csv'),\n os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the test parameters for coordinate tests
def get_coordinate_test_parameters(self): return [ ( # Standard coordinate match 'CP_coord_regex', # Name of the registry key holding regex self.get_standard_coord_data, # formatting function None, # Specific input format for the for...
[ "def test_get_ps_kwargs(self):\n ps_kwargs = mcmc_utils.get_ps_kwargs(measured_img_ra=np.random.randn(4), measured_img_dec=np.random.randn(4), astrometry_sigma=0.005)\n ps_param_names = ps_kwargs[-1][0].keys()\n # Check that lower is less than upper\n for p in ps_param_names:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optimize tsettle of PIcontroller PLL damping for fixed phase margin and damping
def opt_pll_tf_pi_ph_margin(damping, ph_margin, tsettle_tol, fclk): k = np.log(tsettle_tol)**2/(damping**2) fz = np.sqrt(k)/(2*damping*2*np.pi) tf_params = dict(_type=2, k=k, fz=fz, fp=np.inf, delay=0) return phase_margin(tf_params, fclk)
[ "def pll_tf_pi_controller(tsettle, tol, damping, delay=0.0):\n print(\"\\n********************************************************************************\")\n print(\"* Computing PI-controller PLL\")\n print(\"\\ttsettle\\t-> %E +/- %e\"%(tsettle, tsettle*tol))\n print(\"\\tdamping\\t-> %f\"%damping)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes response of PLL with PIcontroller.
def pll_tf_pi_controller(tsettle, tol, damping, delay=0.0): print("\n********************************************************************************") print("* Computing PI-controller PLL") print("\ttsettle\t-> %E +/- %e"%(tsettle, tsettle*tol)) print("\tdamping\t-> %f"%damping) k = np.log(tol)**2/...
[ "def tdp_voltage_response(ns,s,p,E,lm):\n name = s.name\n infile_name_re_xx = '/usr/lib/meqtrees/Cattery/Siamese/fits_test/beam_real_co_1.fits'\n infile_name_im_xx = '/usr/lib/meqtrees/Cattery/Siamese/fits_test/beam_imag_co_1.fits'\n ns.image_re_xx ** Meq.FITSImage(filename=infile_name_re_xx,cutoff=1.0,mode=2)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optimized PIcontroller PLL for phase noise and settling time. Subject to maximum settling time constrained by tsettle, tol. points=1025 for Romberg integration (2k+1)
def opt_pll_tf_pi_controller(tsettle, tol, pn_dco, pn_dco_df, m, n, kdco, fclk, fmax, delay=0.0, points=1025, mode="tdc", sigma_ph=0.1): tsettle_min = 0.01*tsettle # have to constrain, 0 will cause div-by-0 def cost(tsettle): opt = opt_pll_tf_pi_controller_damping(tsettle,...
[ "def pll_tf_pi_controller(tsettle, tol, damping, delay=0.0):\n print(\"\\n********************************************************************************\")\n print(\"* Computing PI-controller PLL\")\n print(\"\\ttsettle\\t-> %E +/- %e\"%(tsettle, tsettle*tol))\n print(\"\\tdamping\\t-> %f\"%damping)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine number of max bits need to represent integer parts of filter coefficients
def n_int_bits(lf_params): pos_coefs = [] neg_coefs = [] for key in ["a0", "a1", "b0", "b1", "b2"]: if lf_params[key] is not np.inf: if lf_params[key] >= 0.0: pos_coefs.append(abs(lf_params[key])) else: neg_coefs.append(abs(lf_params[key])) ...
[ "def num_bits(n):\n return math.floor(math.log2(abs(N))) + 1", "def get_bitwidth(tensor):\r\n assert tensor.dtype in int_type\r\n max_bitwidth = 8\r\n max_value = torch.max(tensor)\r\n\r\n for k in range(1, max_bitwidth + 1):\r\n if max_value < 2 ** k - 1:\r\n return k\r\n\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
optimize number of bits for a digital direct formI implementation using two's complement representation fixed point words with all parts of the data path with same data representation
def opt_lf_num_bits(lf_params, min_bits, max_bits, rms_filt_error=0.1, noise_figure=1, sim_steps=1000, fpoints=512, mode="tdc", sigma_ph=0.1): print("\n********************************************************") print("Optimizing loop filter digital direct form-I implementation for") prin...
[ "def solve_wave_FD16(U,Up,h,c,ncpml,b,psi,phi):\n ny , nx = U.shape\n for i in range(8,ny-8):\n for j in range(8,nx-8):\n Up[i,j] = 2.0*U[i,j] - Up[i,j] + c[i-8,j-8]* \\\n ((-735*U[i-8,j]+15360*U[i-7,j]-156800*U[i-6,j]+1053696*U[i-5,j]-5350800*U[i-4,j]+22830080*U[i-3,j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
localize or attach the UTC timezone to the datetime object provided
def __make_utc(dt): return pytz.UTC.localize(dt)
[ "def set_timezone(unlocalized_datetime: datetime, target_timezone: Union[BaseTzInfo, str]) -> datetime:\n if unlocalized_datetime.tzinfo is not None:\n # remove current tz info and call this function again\n return set_timezone(unlocalized_datetime.replace(tzinfo=None), target_timezone)\n\n if i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a timedelta that represents forever
def forever(): return timedelta.max
[ "def delta(until=None, *, since=None):\n if not since:\n since = datetime.now()\n\n if since > until:\n return -1\n\n secs = (until - since).seconds\n mins = max(0, secs / 60)\n return int(mins)", "def remaining_time(self):\n return max(self.expiration_deadline - timezone.now()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a string representing a duration into a timedelta
def duration(string, context=None): if isinstance(string, timedelta): return string elif compat.is_string(string): delta = parse(string, context=context) if delta is None: raise FlumeException('unable to parse the duration "%s"' % string) return delta raise Fl...
[ "def convert_to_timedelta(duration_str):\n hours_str, minutes_str, seconds_str, milliseconds_str = re.split(\n r\"[:.]\", duration_str\n )\n return timedelta(\n hours=int(hours_str),\n minutes=int(minutes_str),\n seconds=int(seconds_str),\n milliseconds=int(milliseconds_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a datetime into the ISO8601 string that it represents
def datetime_to_iso8601(dt): return '%s.%03dZ' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), int(dt.microsecond / 1000))
[ "def ISO8601():", "def _datetime_as_isostr(dt: typing.Union[datetime, date, time, timedelta]) -> str:\n # First try datetime.datetime\n if hasattr(dt, \"year\") and hasattr(dt, \"hour\"):\n dt = typing.cast(datetime, dt)\n # astimezone() fails for naive times in Python 2.7, so make make sure d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indexes a movie into ES
def index_movie(self, movie): self.es.index(INDEX_MOVIES, 'movie', movie.to_dict(include_subs=False), id=movie.id) elasticsearch.helpers.bulk(self.es, [{ "_id": "%d-%d" % (movie.id, sub.sub_id), "_index": INDEX_SUBTITLES, "_type": "subtitle", "_source": su...
[ "def buildIndex():\n\n covid_index = Index('covid_index')\n if covid_index.exists():\n covid_index.delete() # Overwrite any previous version\n covid_index.document(Article) # register the document mapping\n covid_index.create() # create index with specified mapping and document\n\n \n art...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that allows you to delete a book from the library by id.
def delete_book(book_id): if len(MyLibrary.books) <= book_id or book_id < 0: abort(404) book = [MyLibrary.books[book_id]] MyLibrary.DeleteBook(book) return jsonify({'result': True})
[ "def delete_book(id):\n book = Book.query.get(id)\n if book is None:\n return jsonify({\"message\": \"ID does not exist!\"}), 400\n else:\n db.session.delete(book)\n db.session.commit()\n return book_Schema.jsonify(book)", "def del_Book(self, Book_id):\n command = u\"\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that allows you to update a book from the library by id.
def update_book(book_id): if len(MyLibrary.books) <= book_id or book_id < 0: abort(404) if not request.json: abort(400) for key in MyLibrary.book_attributes: if key not in request.json or request.json[key] == '': abort(400) try: if int(request.json['Pages']...
[ "def put(self, id: int):\n book = Book.query.get_or_404(id, description='Book not found')\n\n try:\n book_data_updates = book_schema.load(request.get_json())\n except ValidationError as e:\n return {\n 'message': e.messages\n }\n\n if 'titl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse data from the datastore into datatable.
def _build_datatable(self): def _get_value_metadata(cfg, data=None): """Get value from metadata.""" if cfg.get('key'): return self.metadata.get(cfg.get('key')) def _get_value_datastore(cfg, data=None): """Get value(s) from datastore.""" # ...
[ "def parse_data():\n for table_name in TABLE_NAMES_LIST:\n file_name = table_name + \".csv\"\n records = []\n\n with open(file_name, \"r\") as f:\n records_data = f.readlines()\n for record_data in records_data:\n record = record_data.split(\",\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get value from metadata.
def _get_value_metadata(cfg, data=None): if cfg.get('key'): return self.metadata.get(cfg.get('key'))
[ "def get_info_value(self, key):\n info = self.parse_info(self.get_info())\n if key in info:\n return info[key]\n else:\n return None", "def isolate_value_from_metadata(self, metadata_string):\n metadata_split = metadata_string.split(\":\")\n return float(metadata_split[1])", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dump the report dataframe to a CSV file.
def dump_to_csv(self): with open(self.output, 'w') as f: f.write(self.dataframe.to_csv())
[ "def dump_csv(df, filename):\n with open(filename, 'w') as f:\n df.to_csv(f)", "def export_df(self, path):\n self.df.to_csv(path)", "def df_to_csv(date,df,org_name):\n fileName= date+'_Transaction_Report_'+org_name+'.csv'\n df.to_csv(fileName,index= False)", "def save_csv(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dump the report dataframe to a HTML file.
def dump_to_html(self): with open(self.output, 'w') as f: f.write(self.dataframe.to_html())
[ "def export_html(self):\n self._html_exporter.export()", "def data_frame_to_html(data_frame: DataFrame) -> str:\n return data_frame.to_html(float_format=\"%.2f\", index=False,\n classes=[\"table table-striped table-sm\"])", "def write_html(self, fp, html=None):\n he...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the value of varibles to the stdout.
def show_vars(self): def _show(name, value): print('\n> _show(%s):\n' % name) print(value) _show('self.config', self.config) _show('self.datastore', self.datastore) _show('self.metadata', self.metadata) _show('self.output', self.output) _show('sel...
[ "def var_dump( varname, varval ):\n print( f'{varname}: {varval}' )", "def print_values(self):\n print(\"**\"*50)\n print(\"default values:\\nsenna path:\\n\", self.senna_path, \\\n \"\\nDependencie parser:\\n\", self.dep_par_path)\n print(\"Stanford parser clr\", \" \".join(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes out list elements to the screen, highlights the ones marked with '1' in the highlight list refresh = makes menu appear instantly on the screen spacing = the number of empty lines between elements
def draw_list(screen, x0, y0, elements, highlight, spacing=1, refresh=True): x = 0 # the list elements are in different lines, their x coordinate changes for el in elements: if x != highlight: screen.addstr(x0 + x*spacing, y0, " " + str(el) + " ") else: screen.addstr(x0 +...
[ "def display(self):\n self.window.erase()\n for idx, item in enumerate(self.items[self.top:self.top + self.max_lines]):\n # Highlight the current cursor line\n if idx == self.current:\n self.window.addstr(idx, 0, item, curses.color_pair(2))\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Some initial JSON. This is intended to be used as reference to a correctly input JSON.
def initial_json(cls): return json.dumps(cls.INITIAL)
[ "def from_json(self, json_str):", "def _post_process_json(self):\n pass", "def json_input():\n\n data = json.load(sys.stdin)\n global ID # pylint: disable=global-statement\n ID = data.get('files')[0][0] # (id, file)\n # json.dump({'Reason': 'OK', 'Code': 0, 'id': ID,\n # 'Listi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove field from message.
def delete_field(self, name): if 'idb_fields' in self.data: self.data['idb_fields'].remove(name) if name in self.data: del self.data[name]
[ "def removeField(field):", "def remove_field(self, field_key: str) -> None:\n self.add_operation({\n 'op': 'removeField',\n 'fieldKey': field_key,\n })", "def delete_field(self):\n self.exec_command(b'DeleteField')", "def remove_field(self, index: int) -> None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish KPIs to the grafana instance.
def publish(self, metrics): super().publish(metrics) url = config['cern_grafana_url'] resp = self.send(url, [self.data]) logger.debug('Response: %s' % resp)
[ "def publish(self, request, kf_id=None):\n release = Release.objects.get(kf_id=kf_id)\n django_rq.enqueue(publish_release, release.kf_id)\n return Response({'message': 'publishing'})", "def publish_values(self, labeled_values):\n metric_dicts = [\n Metric(time.time(), uuid.uuid4().h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to postgres and return both data tables.
def get_all_tables(config): pgconnect = connect_postgres(config) pgcursor = pgconnect.cursor() city_df = get_pg_data(pgcursor, 'city_forecast') area_df = get_pg_data(pgcursor, 'area_forecast') pgcursor.close() pgconnect.close() return city_df, area_df
[ "def init_postgresql_connection():\n connection = connect(user='test',\n password='test',\n host='localhost',\n port='5432',\n database='infrastructure')\n cursor = connection.cursor()\n return connection, curso...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
4 row multi plot of overall city data.
def make_city_chart(city_df): fig = make_subplots(rows=4, cols=1, shared_xaxes=True, vertical_spacing=0.05) # Top row: temperature and precipitation fig.add_trace(go.Scatter(x=city_df['time'], y=city_df['tdry'], name='Temperature, F', marker=dict(color...
[ "def plot_map(directory, cities, neurons, iteration):\n plt.scatter(*zip(*cities), color='red', s=3)\n plt.scatter(*zip(*neurons), color='green', s=2)\n\n plt.plot(*zip(*(neurons+[neurons[0]])), color='darkgreen')\n\n # Invert x axis to match representation at\n # http://www.math.uwaterloo.ca/tsp/wor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot area specific data in choropleth.
def make_area_map(area_df, time_slider, map_metric): # Create metric string for hovertext area_df['Metrics'] = '<br>' \ + area_df['d_mile'].map('${:,.2f}/mile<br>'.format) \ + area_df['d_min'].map('${:,.2f}/minute<br>'.format) \ + area_df['d_ride'].ma...
[ "def yaleclimatesurvey_map():\n\t\t\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(figsize=(10, 8))\n\tfig.subplots_adjust(hspace=0, wspace=0.1)\n\n\t### Read Yale Climate Survey CSV file to Pandas Dataframe\n\tdf = pd.read_csv(paths.climatesurvey_csv_uri)\n\t\n\t### Read ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test serialization of an account
def test_serialize_an_account(self): account = account( owner = "John Doe", account_id = 1, account_type = "credit card", institution_id = 4, balance = 500) data = account.serialize() self.assertNotEqual(data, None) self.assertIn('id', data) self.assertEqual(data['id'], None) ...
[ "def test_deserialize_an_account(self): #also had status\n data = {\"owner\" : \"John Doe\",\"account_id\" : 1,\"account_type\" : \"credit card\",\"institution_id\" : 4,\"balance\" : 500}\n account = account()\n account.deserialize(data)\n self.assertNotEqual(account, None)\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test deserialization of an account
def test_deserialize_an_account(self): #also had status data = {"owner" : "John Doe","account_id" : 1,"account_type" : "credit card","institution_id" : 4,"balance" : 500} account = account() account.deserialize(data) self.assertNotEqual(account, None) self.assertEqual(account.id,...
[ "def test_serialize_an_account(self):\n account = account( owner = \"John Doe\", account_id = 1, account_type = \"credit card\", institution_id = 4, balance = 500)\n data = account.serialize()\n self.assertNotEqual(data, None)\n self.assertIn('id', data)\n self.assertEqual(data['i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find an account by owner
def test_find_account_by_owner(self): account( owner = "John Doe", account_id = 1, account_type = "credit card", institution_id = 4, balance = 500).save() next_account = account( owner = "Jane Doe", account_id = 1, account_type = "credit card", institution_id = 4, balance = 500) next_account.sav...
[ "def get_owner(conn, owner_id):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM owners\n WHERE owner_id=?;\"\"\"\n c.execute(sql, (owner_id,))\n return c.fetchall()", "def get_owner_by_id(self, owner_id, **options):\n owners = self.get_owners(**options)\n for owner in owner...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
beta values for the sedov solution (coefficients of the polynomials of the similarity variables) v the similarity variable g the polytropic gamma nu the dimension
def calc_beta(v, g, nu=3): beta = (nu+2) * (g+1) * array((0.25, (g/(g-1))*0.5, -(2 + nu*(g-1))/2.0 / ((nu+2)*(g+1) -2*(2 + nu*(g-1))), -0.5/(g-1)), dtype=float64) beta = outer(beta, v) beta += (g+1) * array((0.0, -1.0/(g-1), (nu+2) / ((nu+2)*(g+1) -2.0*(2 + nu...
[ "def sedov(t, E0, rho0, g, n=1000, nu=3):\n\n # the similarity variable\n v_min = 2.0 / ((nu + 2) * g)\n v_max = 4.0 / ((nu + 2) * (g + 1))\n\n v = v_min + arange(n) * (v_max - v_min) / (n - 1)\n\n a = calc_a(g, nu)\n beta = calc_beta(v, g=g, nu=nu)\n lbeta = log(beta)\n \n r = exp(-a[0] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
solve the sedov problem t the time E0 the initial energy rho0 the initial density n number of points (10000) nu the dimension g the polytropic gas gamma
def sedov(t, E0, rho0, g, n=1000, nu=3): # the similarity variable v_min = 2.0 / ((nu + 2) * g) v_max = 4.0 / ((nu + 2) * (g + 1)) v = v_min + arange(n) * (v_max - v_min) / (n - 1.0) a = calc_a(g, nu) beta = calc_beta(v, g=g, nu=nu) lbeta = log(beta) r = exp(-a[0] * lbeta[0] - a[2...
[ "def sedov(t, E0, rho0, g, n=1000, nu=3):\n\n # the similarity variable\n v_min = 2.0 / ((nu + 2) * g)\n v_max = 4.0 / ((nu + 2) * (g + 1))\n\n v = v_min + arange(n) * (v_max - v_min) / (n - 1)\n\n a = calc_a(g, nu)\n beta = calc_beta(v, g=g, nu=nu)\n lbeta = log(beta)\n \n r = exp(-a[0] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
draw a 2d sedov solution
def test3(): import pylab as pl r,p,rho,u,r_s,p_s,rho_s,u_s,shock_speed = \ sedov(t=0.05, E0=5.0, rho0=5.0, g=5.0/3.0, nu=2) print 'rho shock', rho_s print 'p shock', p_s print 'u shock', u_s print 'r shock', r_s area = pi*r*r dv = area.copy() dv[1:] = diff(dv) # the...
[ "def diagram():\n g= GWGraphics()\n g.SCREEN( 8 )\n g.PRINT( \"INVOLUTE of a CIRCLE\" )\n g.LOCATE( 3,12 ); g.PRINT( \"Y\" )\n g.LOCATE( 12,50 ); g.PRINT( \"X\" )\n g.LOCATE( 12,11 ); g.PRINT( \"O\" )\n g.LOCATE( 7,11 ); g.PRINT( \"A\" )\n g.LOCATE( 17,11 ); g.PRINT( \"B\" )\n S=0.42 #! ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
old alias name for fraction_display'
def display(self) -> str: return self.fraction_display
[ "def __repr__(self):\n return \"Fractional ideal %s\"%self._repr_short()", "def fractionalize(value):\n f = Fraction(value)\n if f.denominator == f.numerator or f.denominator == 1:\n return value\n else:\n fraction = '<div class=\"fraction\"><span class=\"numerator\">' \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }