query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get's the campaigns that belong to this ad account Returns campaigns that are greater than 0 spend only
def campaigns(self): return BingCampaign.objects.filter(account=self, campaign_cost__gt=0).order_by('-campaign_cost')
[ "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params=params)", "def for_campaigns(self):\n return self.active().exclude(campaign__isnull=True).distinct()", "def get_campaigns(self, ad_account_id, query_parameters=None):\n return self.get_iterator(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mask defects using mask plane "BAD" [in,out] ccdExposure exposure to process
def maskDefect(self, ccdExposure): maskedImage = ccdExposure.getMaskedImage() ccd = cameraGeom.cast_Ccd(ccdExposure.getDetector()) defectBaseList = ccd.getDefects() defectList = measAlg.DefectListT() # mask bad pixels in the camera class # create master list of defects an...
[ "def guider(self, exposure):\n assert exposure, \"No exposure provided\"\n \n ccd = afwCG.cast_Ccd(exposure.getDetector()) # This is Suprime-Cam so we know the Detector is a Ccd\n ccdNum = ccd.getId().getSerial()\n if ccdNum not in [0, 1, 2, 6, 7]:\n # No need to mask\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mask defects and trim guider shadow exposure Exposure to process
def guider(self, exposure): assert exposure, "No exposure provided" ccd = afwCG.cast_Ccd(exposure.getDetector()) # This is Suprime-Cam so we know the Detector is a Ccd ccdNum = ccd.getId().getSerial() if ccdNum not in [0, 1, 2, 6, 7]: # No need to mask re...
[ "def guider(self, exposure):\n assert exposure, \"No exposure provided\"\n \n ccd = afwCG.cast_Ccd(exposure.getDetector()) # This is Suprime-Cam so we know the Detector is a Ccd\n ccdNum = ccd.getId().getSerial()\n if ccdNum not in [0, 1, 4, 5, 9]:\n # No need to mask\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mask defects and trim guider shadow exposure Exposure to process
def guider(self, exposure): assert exposure, "No exposure provided" ccd = afwCG.cast_Ccd(exposure.getDetector()) # This is Suprime-Cam so we know the Detector is a Ccd ccdNum = ccd.getId().getSerial() if ccdNum not in [0, 1, 4, 5, 9]: # No need to mask re...
[ "def guider(self, exposure):\n assert exposure, \"No exposure provided\"\n \n ccd = afwCG.cast_Ccd(exposure.getDetector()) # This is Suprime-Cam so we know the Detector is a Ccd\n ccdNum = ccd.getId().getSerial()\n if ccdNum not in [0, 1, 2, 6, 7]:\n # No need to mask\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that any code blocks in the rst file is present in at least one of the source files.
def verify_blocks(rst_file, source_files, source_dict): for block_type, source_type in source_dict.items(): # Extract code blocks from rst file. blocks = get_blocks(rst_file, block_type) for line, block in blocks: # Check if block is in the list of files of correct type. ...
[ "def block_in_source(line, block, source_files):\n\n present = False\n code = \"\"\n for sf in source_files:\n f = open(sf, \"r\")\n # Read code and remove whitespace before comparing block and code.\n code = remove_whitespace(f.read())\n\n if block in code:\n present...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the code block is present in at least one of the source files.
def block_in_source(line, block, source_files): present = False code = "" for sf in source_files: f = open(sf, "r") # Read code and remove whitespace before comparing block and code. code = remove_whitespace(f.read()) if block in code: present = True f.c...
[ "def verify_blocks(rst_file, source_files, source_dict):\n\n for block_type, source_type in source_dict.items():\n # Extract code blocks from rst file.\n blocks = get_blocks(rst_file, block_type)\n for line, block in blocks:\n # Check if block is in the list of files of correct ty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate layer for input x.
def evalulate(self, x): if self.bias_bool: return self.activation(self._layer_func(x) + self.bias) else: return self.activation(self._layer_func(x))
[ "def eval(self, input):\n\n ## Add bias to input\n input = np.array(input) if type(input) != np.array else input\n input = np.concatenate((input, [-1]))\n input = input.reshape((1, input.size))\\\n\n ## Regression Activations\n if self.activation_type == \"linear\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes all mappings for the specified bug.
def DeleteAllMappingsForBug(key_name): total_deleted = 0 bug = bugs.GetBugByKey(key_name) query = UrlBugMap.all(keys_only=True).filter('bug = ', bug) mappings = query.fetch(_MAX_RESULTS_CAP) while mappings: total_deleted += len(mappings) db.delete(mappings) mappings = query.fetch(_MAX_RESULTS_CAP)...
[ "def delete_all_maps(self):\n self._delete_all_maps()", "def Delete(id):\n mapping = UrlBugMap.get_by_id(id)\n if mapping:\n mapping.delete()", "def DeleteAllBreakpointMarkers(self):\n self._bpmarkers = {}\n self.MarkerDeleteAll(0)", "def deleteMapping(self, integration_site):\n f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a section and an action prefix, return the list of valid actions
def get_valid_actions(self, section, action): valid_actions = [] for candidate_action in sorted(self.actions[section]): if is_string(action) and \ not candidate_action.startswith(action): continue if isinstance(action, list) and candidate_action not...
[ "def get_default_actions(section):\n try:\n default_actions = dict(section['default_actions'])\n except IndexError:\n return {}, {}\n\n action_dict = {action.get_metadata().name: action for action in ACTIONS}\n invalid_action_set = default_actions.values() - action_dict.keys()\n invalid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format the possible options for a spectific action.
def format_options(self, section, action): desc = "" parser = OptionParserNoHelpOptions(formatter=self.formatter, add_help_option=False) for option in self.actions[section][action].get("options", []): parser.add_option(option) for option in self.global_options: pa...
[ "def format_options(self, ctx, formatter):\n ...", "def format_options(self, ctx, formatter):\n options = [('--' + param.name.replace('_', '-'), param.help) for param in self.params if type(param) == click.core.Option]\n help_option = self.get_help_option(ctx)\n options.append(('--' + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format an candidate action for the help message. The action message may or may include the possible options, dependendin on the value of the options parameter.
def format_action(self, section, action, options=True): fancya = self.prog + " " + action.replace('_', ' ') if self.colorize: desc = " " + rcColor.colorize(fancya, rcColor.color.BOLD) else: desc = " " + fancya desc += '\n\n' if self.async_actions.get(act...
[ "def format_digest(self, action=\"\"):\n action = action.rstrip(\"?\")\n desc = self.usage\n desc += \"Set --help with an action to display its description and supported options.\\n\\n\"\n\n for section in sorted(self.actions):\n valid_actions = self.get_valid_actions(section,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format and return a digest of supported actions matching
def format_digest(self, action=""): action = action.rstrip("?") desc = self.usage desc += "Set --help with an action to display its description and supported options.\n\n" for section in sorted(self.actions): valid_actions = self.get_valid_actions(section, action) ...
[ "def get_action_meanings():\n action_meanings = data_preprocessor.get_action_meanings()\n\n return action_meanings, 200, JSON_TYPE", "def request_digest(request):\n request = get_ordered_request(request)\n digest = r''\n for value in request:\n \n digest += unicode(len(value)).rjust(3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of actions supported by svcmgr.
def supported_actions(self): actions = [] for section in self.actions: actions += self.actions[section].keys() actions += self.deprecated_actions return actions
[ "def _available_actions(self, pkgs):\n actions = []\n\n for pkg in pkgs:\n if isinstance(pkg, str):\n if pkg not in sys.modules:\n try:\n __import__(pkg)\n except ImportError:\n raise RuntimeE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the parsed command args list has at least one element to be interpreted as an action. Raise if not, else return the action name formatted as a '_' joined string.
def get_action_from_args(self, args, options): if len(args) is 0: if options.parm_help: self.print_full_help() else: self.print_short_help() action = self.develop_action(args) if action in self.actions_translations: data = sel...
[ "def check_action_name():", "def name(self):\n name = getattr(self.action, \"__name__\", None)\n # ascii(action) not defined for all actions, so must only be evaluated if getattr fails\n return name if name is not None else ascii(self.action)", "def action(self) -> Union[str, Type[argparse....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse system's argv, validate options compatibility with the action and return options and action
def parse_args(self, argv=None): if argv is not None: self.args = argv else: self.args = sys.argv[1:] # parse a first time with all possible options to never fail on # undefined option. options, args = self.parser.parse_args(self.args) action, opt...
[ "def parse_cli():\n\n argv = sys.argv[:]\n argv.pop(0)\n options = {\"cmd\": argv.pop(0)}\n\n index = 0\n while index < len(argv):\n arg = argv[index]\n\n # check out options\n if \"-\" == arg[0]:\n k = re.sub(\"^-+\", \"\", argv.pop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the parser usage to the full actions list and their options. Then trigger a parser error, which displays the help message.
def print_full_help(self): if self.args is not None: return usage = self.format_desc() self.parser.error("no action specified\n"+usage)
[ "def make_parser_print_help_on_error(parser):\n def error(self, msg):\n print('error: %s\\n' % msg, file=stderr)\n self.print_help()\n exit(2)\n parser.error = MethodType(error, parser)", "def show_help_message(parser: Genbu) -> None:\n print(usage(parser))\n sys.exit()", "def usage(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes padding from an input string based on a given block size.
def old_unpad(s): if not s: return s try: return Padding.removePadding(s, blocksize=OLD_BLOCK_SIZE) except AssertionError: # if there's an error while removing padding, just return s. return s
[ "def _unpad_message(text):\n if not text:\n return \"\"\n\n padding_size = ord(text[-1])\n if padding_size > AES.block_size:\n return \"\"\n\n unpadded, padding = text[:-padding_size], text[-padding_size:]\n if any(ord(x) != padding_size for x in padding):\n return \"\"\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility method to decode a payload consisting of the hexed IV + the hexed ciphertext using the given key. See above for more details.
def decode_aes256(key, iv_plus_encrypted): # grab first AES.block_size bytes (aka 2 * AES.block_size characters of hex) - that's the IV iv_size = 2 * AES.block_size hexed_iv = iv_plus_encrypted[:iv_size] # grab everything else - that's the ciphertext (aka encrypted message) hexed_ciphertext = iv_plu...
[ "def aes_decrypt(msg, key):\n aes_object = AES.new(sha(key))\n return unpad(aes_object.decrypt(msg))", "def decrypt(ct:bytes, key:bytes)->bytes:\n iv = ct[:IV_SIZE]\n ct = ct[IV_SIZE:]\n # 2 - MODE_CBC\n crypto = aes(key, AES_CBC, iv)\n plain = crypto.decrypt(ct)\n # remove padding: \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build location service object to inquire about locations
def location_service(self) -> LocationService: assert_value(self.token) return LocationService(self.token, prod=self.prod)
[ "def create_location(self):\n return self.client().post('/api/organizations/1/locations/',\n data=self.location_data)", "def _make_locations(self, response):\n start_time = time.time()\n new_locations = {}\n\n for locationinfo in (response[\"Locations\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build catalog item service to inquire about catalog item
def catalog_item_service(self) -> CatalogItemService: assert_value(self.token) return CatalogItemService(self.token, prod=self.prod)
[ "def get_catalog(self):\n\n catalog = {}\n try:\n req = self.config.session.get(\n f\"https://{self.config.vcac_server}/catalog-service/api/consumer/entitledCatalogItems?limit=9999\",\n verify=self.config.verify,\n timeout=self.config.timeout)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build business service object to inquire about current business
def business_service(self) -> BusinessService: assert_value(self.token) return BusinessService(self.token, prod=self.prod)
[ "def get_business_service(cls) -> BusinessService:\n import services\n return services.business_service()", "def create(self, validated_data):\n\n business = models.Business(name=validated_data['name'],linked_to=validated_data['linked_to'])\n business.save()\n\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build directory customers service object to inquire about customers
def directory_customers_service(self) -> DirectoryCustomerService: assert_value(self.token) return DirectoryCustomerService(self.token, prod=self.prod)
[ "def _createCustomerObject(self, id, name):\n customer = Customer(name)\n customer.customerId = id\n return customer", "def extractCustomerData(parsedData):\n customersList = list()\n for data in parsedData:\n # sine we are reading a file where branch data is also present,this ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build country service object to inquire about countries
def country_service(self) -> CountryService: assert_value(self.token) return CountryService(self.token, prod=self.prod)
[ "def allCountries():", "def get_countries(self):\n return self._make_transferto_request(action=\"pricelist\", info_type=\"countries\")", "def setUp(self):\n if not self.all_countries:\n print(\"Loading all countries...\")\n country_names = CountryInfo().all()\n for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Camel case the given string
def camel(s): return s[0].upper() + s[1:]
[ "def _snake_to_camel(snake_case_string):\n words = snake_case_string.split(\"_\")\n return \"\".join((word.capitalize() for word in words))", "def to_camel_case(snake_str, is_lower=True, to_uppers=['url']):\n ret = ''\n components = snake_str.split('_')\n for component in components:\n if co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the article in the beginning of the given phrase
def removeArticle(s): if s.startswith("a "): return s[2:] elif s.startswith("an "): return s[3:] elif s.startswith("the "): return s[4:] return s
[ "def ignore_articles(string):\n\n str_low = string.lower()\n if str_low.startswith(\"the \"):\n return string[4:]\n elif str_low.startswith(\"an \"):\n return string[3:]\n elif str_low.startswith(\"a \"):\n return string[2:]\n else:\n return string", "def remove_articles...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a random joke of a random type of either N4, N2V2, NA, or N2
def randomJoke(): joke_type = random.randint(0,3) if joke_type == 0: jokes = generateN4(1) if len(jokes) > 0: return jokes[0] if joke_type == 1: jokes = generateN2V2(1) if len(jokes) > 0: return jokes[0] if joke_type == 2: jokes = genera...
[ "def trick_or_treat():\n return 'trick' if random.random() < .5 else 'treat'", "def get_random_game_type(self):\n numGameTypes = len(self.gameTypes)\n gameTypeIndex = self.np_random.randint(0, numGameTypes) \n gameType = self.gameTypes[gameTypeIndex]\n print('GAME TYPE: ', gameType)\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export jokeCount number of jokes for each type to a file of the given name
def generateJokeFile(filename="puns.txt", jokeCount=10): wf = open(filename, 'w') # get all N2A2 jokes wf.writelines("N2A2 Jokes:\n") for joke in generateN2A2(jokeCount): wf.writelines(joke + "\n") # get all N4 jokes wf.writelines("N4 Jokes:\n") for joke in generateN4(jokeCount): ...
[ "def write_results(filename):", "def write_tweets(self, path):\n with open(path, 'w') as f:\n for t in self.good:\n f.write(t + \"0\\n\")\n\n for t in self.bad:\n f.write(t + \"1\\n\")", "def save_flash_number_count(self, file_name):\n data_frame...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download all the packages from the specified list from the apt branch, including verifying them.
def verified_download_full(package_list: tuple) -> dict: apt_url = metadata.get_apt_url() try: verified_info = download_and_verify_package_list(apt_url) return {package_name: download_package(package_name, verified_info) for package_name in package_list} except urllib.error.HTTPError: ...
[ "def download_package_list(mirror_url, distro, arch, snapshot, sha256, packages_url, package_prefix):\n\n if bool(packages_url) != bool(package_prefix):\n raise Exception(\"packages_url and package_prefix must be specified or skipped at the same time.\")\n\n if (not packages_url) and (not mirror_url or...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the compatibility between `data_list` and the list of OMEGA/MEx observations. Raise `ValueError` if uncompatibility.
def check_list_data_omega(omega_list, data_list, disp=True): if len(omega_list) != len(data_list): raise ValueError("omega_list and data_list must have the same size") else: for i in range(len(omega_list)): if omega_list[i].lat.shape != data_list[i].shape: raise Value...
[ "def check_data_file_list(self):\n print 'check_data_file_list'\n if type(self.data_files) is not ListType:\n raise DistutilsSetupError, \\\n \"'data_files' option must be a list of tuples\"\n\n for lib in self.data_files:\n if type(lib) is not TupleType a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the compatibility between `mask_list` and the list of OMEGA/MEx observations. Raise `ValueError` if uncompatibility.
def check_list_mask_omega(omega_list, mask_list, disp=True): if len(omega_list) != len(mask_list): raise ValueError("omega_list and mask_list must have the same size") else: for i in range(len(omega_list)): if omega_list[i].lat.shape != mask_list[i].shape: raise Value...
[ "def check_list_data_omega(omega_list, data_list, disp=True):\n if len(omega_list) != len(data_list):\n raise ValueError(\"omega_list and data_list must have the same size\")\n else:\n for i in range(len(omega_list)):\n if omega_list[i].lat.shape != data_list[i].shape:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot previously picked spectra from interactive plots. If two spectra id are given, the ration sp1/sp2 is showed.
def plot_psp(sp1_id, *args, sp2_id=(None, None), Nfig=None, sp_dict=picked_spectra, **kwargs): nfig1, n_sp1 = sp1_id nfig2, n_sp2 = sp2_id if (n_sp2 is None) or (nfig2 is None): lam = sp_dict[nfig1][0] sp = sp_dict[nfig1][n_sp1] ylabel = 'Reflectance' else: lam1, lam2 = s...
[ "def show_plot(self, e):\n plots = plotting.Input(self.name)\n #if user pick in Plot Units section 'Rate' and 'Spectrum', plot:\n if self.var.get() == 'Rate':\n if e == 'spec':\n plots.plot_spectrum_rate()\n #Rate and Time Profile:\n elif e == 'ti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find equal opportunity thresholds acc for each estimator.
def get_eqopp_threshold_cache(estimator_index, estimator_name, clf_i, valid_data, results, inds): print('caching results for {} estimator'.format(estimator_name)) thr_cache = {} inds_0 = valid_data[:, 0] < 0.5 inds_1 = valid_data[:, 0] > 0.5 for ...
[ "def compute_accuracy(estimator, optimal_actions):\n predicted_actions = []\n actions_list = []\n for state, actions in optimal_actions:\n probs, _ = estimator(state)\n\n # Get the estimator's predicted action in the range 1 up to 7.\n predicted_action = max(probs, key=probs.get) + 1\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Populate users, channels and group cache from info which is returned on rtm.start
def _populate_cache(self, user_data, channel_data, group_data): for user in user_data.get('members', []): self._user_info_cache[user['id']] = user for channel in channel_data.get('channels', []): self._channel_info_cache[channel['id']] = channel for group in group_data...
[ "def setupCache(self):\n # We get these user data from userState(), or API calls\n self.userNametoID = {}\n self.userNametoDisplayName = {}\n self.IDtoDisplayName = {}\n self.displayNameToUserName = {}", "async def load_chats(self) -> None:\n self.users = await self._get_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle / process the result and return timestamp of the last message.
def _handle_result(self, result): existing_last_message_timestamp = self._get_last_message_timestamp() new_last_message_timestamp = existing_last_message_timestamp for item in result: item_type = item['type'] item_timestamp = int(float(item.get('ts', 0))) if...
[ "def result_timestamp(self, result) -> datetime:\n return datetime.utcnow()", "def extract_event_timestamp(self, query_result):\n return None", "def last_test_result():\n \n return Script.last_result", "def get_result(self):\n return self.result.get_last_result()", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a list of entity definitions.
def get_entities(request): logging.info('views.get_entities') return get_data_json('demo__definition', 'label,numeric', 'category=\'EntityType\'', None)
[ "def get_entity_definitions(self):\n session = self.get_session()\n query = session.query(distinct(DbEvent.objdef)).all()\n res = [i[0].lower() for i in query]\n \n if len(res) == 0:\n self.logger.error(u'No entity definitions found')\n raise SQLAlchemyError(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a list of valid options for a specific category and item.
def get_options(request, category, item): logging.info('views.get_options') return get_options_json(category, item)
[ "def test_catalog_category_attribute_option_management_v1_get_items_get(self):\n pass", "def get_item_options(self, item):\n\n if hasattr(item, 'height'):\n opts = self.options('given', item.height)\n else:\n opts = self.options()\n\n return opts", "def test_get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for printing the subtitles.
def subtitle(text): # The width of the subtitle string. width = len(text) + 2 # Text. sys.stdout.write("# %s\n" % text) # Bottom bar. sys.stdout.write("#" * width) # Spacing. sys.stdout.write("\n\n")
[ "def print_templates():\n\n print(\"\\n--> No subtitle:\\n\")\n print(DEFAULT_TEMPLATE)\n print(\"\\n--> With subtitle:\\n\")\n print(DEFAULT_TEMPLATE_SUBTITLE)\n print(\"\\n--> Supported placeholders:\")\n print(\" - banner: \" + PH_BANNER)\n print(\" - subtitle: \" + PH_SUBTITLE)\n print(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a summary line.
def summary_line(name, passed, width=100): # Passed. if passed == True: state = "OK" # Skipped. elif passed == 'skip': state = "Skipped" # Failed. else: state = "Failed" # Dots. dots = '' for j in range(width - len(name) - len(state) - 6): dots += ...
[ "def summary(self, line=''):\n\n if self.flag is None:\n self.calc_lifetimes()\n fmt = '%i %.3f %5.3f %.2f %.3f %.4g %.4g\\n'\n efmt = '# %.3f %s: %s \\n'\n if self.flag is not None:\n line += efmt % (self.Z, self.name, self.flag)\n elif self.hb:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the time of a segment overlaps with the times of existing segments.
def is_overlapping(segment_time,previous_segments): segment_start,segment_end = segment_time overlap = False for previous_start,previous_end in previous_segments: if segment_start <= previous_end and segment_end >= previous_start: overlap = True break r...
[ "def is_overlapping(segment_time,previous_segments):\n segment_start,segment_end=segment_time\n overlap=False\n for previous_start,previous_end in previous_segments:\n if segment_start<=previous_end and segment_end>=previous_start:\n overlap=True\n return overlap", "def is_overlappin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert a new audio segment over the background noise at a random time step, ensuring that the audio segment does not overlap with existing segments.
def insert_audio_clip(background, audio_clip, previous_segments): segment_ms = len(audio_clip) segment_time = get_random_time_segment(segment_ms) while is_overlapping(segment_time, previous_segments): segment_time = get_random_time_segment(segment_ms) previous_segments.append(...
[ "def insert_audio_clip(background,audio_clip,previous_segments):\n segment_ms=len(audio_clip)\n segment_time=get_random_time_segment(segment_ms)\n while is_overlapping(segment_time,previous_segments):\n segment_time=get_random_time_segment(segment_ms)\n previous_segments.append(segment_time)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a training example with a given background, activates, and negatives.
def create_training_example(background,activates,negatives): np.random.seed(18) # Make background quieter background = background - 20 y = np.zeros((1,Ty)) previous_segments = [] # Select 0-4 random "activate" audio clips from the entire list of "activates" recordings number_o...
[ "def create_training_example(background, actrivates, negatives, Ty):\n\n background = background - 20 ## make background quieter\n y = np.zeros((1, Ty))\n previous_segments = []\n number_of_activates = np.random.randint(0, 5)\n random_indices = np.random.randint(len(activates), size=number_of_activa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allow user to set the time function used for time measurements. By default the time function is Python's standard library time.time(), but the user can choose a different one, for example by using infi.monotonic_time.
def set_time_func(f): global _time _time = f
[ "def time(self, value):", "def _time(function):\n start_time = time.monotonic()\n function()\n return round(time.monotonic() - start_time, 2)", "def setTime():\n global local_time\n local_time = time.time()", "def _time_function(self, func, *args, **kwargs):\n start = time.time()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allow user to set the logger used to log the time measurements. By default the logger is a local logger created.
def set_logger(l): global _logger _logger = l
[ "def set_logger(self, logger): \n self.logger = logger\n self.agent.set_logger(logger) #share logger with agent", "def set_logger(self, logger):\n self.LOGGER = logger", "def __init__(self, logger=None):\n if logger is None:\n self.logger = logging.root\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a champion by its id.
def champion(champ_id, api_key=None, region=None, **kwargs): region = get_region(region) url = '{}{}/{}/champion/{}'.format(api_url, region, version, champ_id) return get_data_from_api(api_key, url, **kwargs)
[ "def get_champion_by_id(id):\n raw = get_static_data(\"champion.json\")\n champion_raw = next((x for x in raw['data'].values() if x['key'] == str(id)), None)\n if champion_raw is None:\n raise ValueError(\"No champion found with ID: {}\".format(id))\n\n return Champion(champion_raw)", "def getC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves rune by its unique id.
def rune(rune_id, api_key=None, region=None, **kwargs): region = get_region(region) url = '{}{}/{}/rune/{}'.format(api_url, region, version, rune_id) return get_data_from_api(api_key, url, **kwargs)
[ "def id_to_char(c_id):\n if c_id < n_letters:\n return all_letters[c_id]\n return None", "def get(self, character_id):\n character = CharacterModel.find_by_id(character_id)\n if character is None:\n ns.abort(404, message=\"Character not found.\")\n else:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves summoner spell list.
def summoner_spell_all(api_key=None, region=None, **kwargs): region = get_region(region) url = '{}{}/{}/summoner-spell'.format(api_url, region, version) return get_data_from_api(api_key, url, **kwargs)
[ "def summoner_spell(summoner_spell_id, api_key=None, region=None, **kwargs):\n region = get_region(region)\n url = '{}{}/{}/summoner-spell/{}'.format(api_url, region, version, summoner_spell_id)\n return get_data_from_api(api_key, url, **kwargs)", "def get_summoners(self, role: Role) -> Tuple[int, int]:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves summoner spell by its unique id.
def summoner_spell(summoner_spell_id, api_key=None, region=None, **kwargs): region = get_region(region) url = '{}{}/{}/summoner-spell/{}'.format(api_url, region, version, summoner_spell_id) return get_data_from_api(api_key, url, **kwargs)
[ "def summoner_spell_all(api_key=None, region=None, **kwargs):\n region = get_region(region)\n url = '{}{}/{}/summoner-spell'.format(api_url, region, version)\n return get_data_from_api(api_key, url, **kwargs)", "def getSpell(self, spellName):\n trueName = spellName.lower()\n return self.db[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retourne l'URL du serveur de provenance
def getServeurURL(self): return RESOURCES[self.server]['url'] \ if self.server in RESOURCES else ''
[ "def get_external_url():", "def _getURL(self):\n return \"http://%s.%s\" % (self.key, self.baseurl)", "def NsUrl(self) -> str:", "def request_url(self):\n return self._url", "def server_url(self):\n pass", "def url(self) -> str:\n return self._flask_request.url", "def get_url...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets up the parameters for time integration
def timeIntegration(params): dt = params["dt"] # Time step for the Euler intergration (ms) sqrt_dt = np.sqrt(dt) duration = params["duration"] # Simulation duration (ms) RNGseed = params["seed"] # seed for RNG startind = 1 # int(max_global_delay + 1) t = np.arange(1, round(duration, 6) / d...
[ "def initialize_time(self):\n self._cur_time = 0\n self._model_timestep = self.sim.model.opt.timestep\n self._control_timestep = 1.0 / self._control_freq", "def __init__(self):\n self.min_time = 6.0*60.0*60.0\n self.min_temp = -10.0\n self.max_temp = 10.0\n self.pe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new LiturgicalDate with day, month, and stuff (i.e., the dominicalLetter, goldenNumber, and romanDay). stuff should
def __init__(self, day, month, stuff={}): super(LiturgicalDate,self).__init__(day, month) self.stuff = stuff
[ "def new_date(d):\r\n return date(d.year, d.month, d.day)", "def __init__(self, date='now', days=0):\n self.valid = 1\n if isinstance(date, datetime): #trap a datetime\n self.datetime = date\n elif isinstance(date, DATE): #trap a DATE\n self.datetime = date.datetime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the LiturgicalDate for day and month plus the offset value; an offset of 0 (default) returns the LiturgicalDate for day/month.
def get(self,day,month,offset=0): ldate = LiturgicalDate(day,month) try: index = self.indices.index(ldate.to_string()) if index >= 0: return self.db[index + offset] except ValueError: print "BAD date: %s" % ldate.to_string()
[ "def calculateDateByOffset(offset, referenceDate=None):\n\n if not referenceDate:\n referenceDate = datetime.date.today()\n elif isinstance(referenceDate, (str, bytes)):\n separator = \"-\" if isinstance(referenceDate, str) else b\"-\"\n y, m, d = referenceDate.split(separator)\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For the given month, day, and offset, return a dict with the litrugical date and associated saints.
def date_with_saints(month,day,offset=0): date_obj = date_lookup.get(day,month,offset=offset) date = date_obj.to_string() saint_dict = dict() saint_dict["@id"] = "%s/%s/%s/%s" % (BASE_URL, "api/date", month, day) saint_dict.update(date_obj.to_dict()) saint_dict["primary_saints"] = [] saint_dict["secondary_saint...
[ "def comp_days_centered(ndays, offset=0):\n\n ndays = int(ndays)\n n1 = int(ndays // 2)\n n2 = ndays - n1\n\n reldays = collections.OrderedDict()\n reldays['pre'] = np.arange(-offset - n1 - ndays, -offset - n1)\n reldays['onset'] = np.arange(-n1, n2)\n reldays['post'] = np.arange(offset + n2, o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of SSL compression methods supported by SslClient.
def get_available_compression_methods(): # type: () -> List[Text] return _nassl_legacy.SSL.get_available_compression_methods()
[ "def getAvailableCompressionMethods(num: 'unsigned int &') -> \"SbName const *\":\n return _coin.SoOutput_getAvailableCompressionMethods(num)", "def GetSslCiphers(self): # pylint: disable=R0201\n return constants.OPENSSL_CIPHERS", "def SoOutput_getAvailableCompressionMethods(num: 'unsigned int &') -> ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initiate an SSL renegotiation.
def do_renegotiate(self): # type: () -> None if not self._is_handshake_completed: raise IOError('SSL Handshake was not completed; cannot renegotiate.') self._ssl.renegotiate() self.do_handshake()
[ "def _dataReceivedSSL(self, data):\n\n log.msg(\"SSL Response:\", data)\n \n if data == \"S\":\n # check the support for SSL\n tls = interfaces.ITLSTransport(self.transport, None)\n if tls is None:\n err = RuntimeError(\n \"PgPr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the negotiated Ephemeral EC Diffie Helmann parameters.
def get_ecdh_param(self): # type: () -> Dict[str, str] d = self._openssl_str_to_dic(self._ssl.get_ecdh_param(), ' ') d['GroupSize'] = d.pop('ECDSA_Parameters').strip('( bit)') d['Type'] = "ECDH" if 'Cofactor' in d : d['Cofactor'] = d['Cofactor'].split(' '...
[ "def get_hyper_parameters():\n hparams = HParams()\n if FLAGS.train_epochs:\n hparams.train_epochs = FLAGS.train_epochs\n if FLAGS.eval_steps:\n hparams.eval_steps = FLAGS.eval_steps\n return hparams", "def get_homing_params_block(self):\n params = MOT_HomingParameters()\n self.sdk.SCC_Get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the raw dataset from remote URL or local file
def raw(self, use_local=True): if use_local and self.is_local: return pkgutil.get_data('vega_datasets', self.pkg_filename) else: return urlopen(self.url).read()
[ "def load_dataset(self):", "def load_dataset(self) -> None:\n raise NotImplementedError", "def load_dataset():\n data_path = request.args.get('data', type=str)\n path = os.path.join(\"data\", data_path+\".pickle\")\n\n with open(path, \"rb\") as fin:\n global data\n data = pickle.l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal helper for creating a requests `session` with the correct authentication handling.
def _build_session(self, auth_class, *args, **kwargs): session = requests.session() if auth_class: session.auth = auth_class(*args, **kwargs) session.headers.update({requests.utils.to_native_string('CB-VERSION'): self.API_VERSION}) session.headers.update({'Accept': 'applicati...
[ "def create_session():\n session = requests.Session()\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Authorization': 'Basic NzJkNTBhZDctNjk4MC00OTQxLWFiNGQtNThkYzM0NjVmMDY5OjczMGUyNzgwMDMxNTkwNWMwYThiYzE0ODRmYTUz...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load pickled representation of votes.
def load(self): self.votes = {} if os.path.isfile(self.store): poll = pickle.load(open(self.store, 'r')) assert self.title == poll['title'], \ 'Stored poll is not the same as this one.' self.votes = dict([(k, v) for k, v in poll['votes'].iteritems()...
[ "def load(self, votes):\n\t\t# Case of JSON string, otherwise assume list\n\t\tif type(votes) == str:\n\t\t\ttry:\n\t\t\t\tvotes = _json.loads(votes)\n\t\t\texcept ValueError as e:\n\t\t\t\traise e\n\t\t\n\t\ttry:\n\t\t\tassert(type(votes) == list and type(votes[0]) == dict)\n\t\texcept AssertionError:\n\t\t\traise...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield the name of the macro based on the class name.
def get_macros(self): name = self.__class__.__name__ if name.endswith('Macro'): name = name[:-5] yield name
[ "def class_abbrev(type):\n ...", "def get_class_name(self):\n return self.name[:-6]", "def class_name(string):\n name = camel(string, capitalize=True)\n if iskeyword(name) or (name in BUILTINS_DICT):\n name += '_'\n return name", "def _class_name_to_command(self):\r\n\r\n comm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ingests news for states in the specified range. States are in alphabetical order.
def ingest_news(start, end): states = list(read_config("states").keys()) states = [state.lower() for state in states] load_news(states)
[ "def list_news_range(query_params):\n date_start = query_params.get(\"start_date\").split(\"-\")\n date_end = query_params.get(\"end_date\").split(\"-\")\n try:\n s_year = int(date_start[0])\n s_month = int(date_start[1])\n s_day = int(date_start[2])\n e_year = int(date_end[0])\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a list of legislators for a state and saves the results as a CSV file.
def people_to_csv(state, outfile): people = get_all_people(state, per_page=25, links=True) data = {"name": [], "party": [], "role": [], "district": [], "link": []} for person in people: data["name"].append(person["name"]) data["party"].append(person["party"]) data["role"].append(per...
[ "def write_train_csv(self):\n smiles_only = pd.DataFrame({\"SMILES\": list(self.assays[self.smiles_type])})\n smiles_only.to_csv(self.ligands_csv)", "def group_by_legislators(file_name):\n\n # Load the data\n my_XLSIO = XLSIO(file_name)\n worksheet = my_XLSIO.load()\n\n # Sort the parsed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does User.signup() fail correctly with bad data?
def test_user_signup_bad_input(self): good_test_signup = User.signup(username='duplicate_test_username', email='test_signup@gmail.com', password='hahaplaintextpassword', image_url='www.google.com') with self.assertRaises(IntegrityError): bad_test_signup = User.signup(username='duplicate_tes...
[ "def test_user_signup_fail_same_email(self):\n\n signupUser = User.signup(username=\"same_email_user\",\n email=\"test@test.com\",\n password=\"HASHED_PASSWORD\",\n image_url=\"\")\n\n with self.assertRaise...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use production environment on remote host
def production(): env.root = root = '/opt/www.commcarehq.org_project' env.virtualenv_root = _join(root, 'env/cchq_www') env.code_root = _join(root, 'src/commcare-hq') env.pre_code_root = _join(root, 'src/_commcare-hq') env.code_branch = 'master' env.sudo_user = 'cchqwww' env.hosts = ...
[ "def staging():\n env.root = root = '/home/dimagivm/'\n env.virtualenv_root = _join(root, 'cchq')\n env.code_root = _join(root, 'commcare-hq')\n env.code_branch = 'staging'\n env.sudo_user = 'root'\n env.hosts = ['192.168.7.223']\n env.environment = 'staging'\n env.user = prompt(\"User...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pull from staging branch into production to do a data migration
def migration(): production() env.code_branch = 'staging' env.restart_server = False
[ "def copy_production_to_environment():\n require('environment', provided_by=[staging, production])\n\n if env.environment == \"production\":\n print(red(\"You cannot run this command on the production environment\"))\n return\n\n if not exists('~/.pgpass'):\n print(\n \"In order...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use staging environment on remote host
def staging(): env.root = root = '/home/dimagivm/' env.virtualenv_root = _join(root, 'cchq') env.code_root = _join(root, 'commcare-hq') env.code_branch = 'staging' env.sudo_user = 'root' env.hosts = ['192.168.7.223'] env.environment = 'staging' env.user = prompt("Username: ", defau...
[ "def production():\n env.root = root = '/opt/www.commcarehq.org_project'\n env.virtualenv_root = _join(root, 'env/cchq_www')\n env.code_root = _join(root, 'src/commcare-hq')\n env.pre_code_root = _join(root, 'src/_commcare-hq')\n env.code_branch = 'master'\n env.sudo_user = 'cchqwww'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Our production server in India.
def india(): env.root = root = '/home/commcarehq' env.virtualenv_root = _join(root, '.virtualenvs/commcarehq') env.code_root = _join(root, 'src/commcare-hq') env.pre_code_root = _join(root, 'src/_commcare-hq') env.code_branch = 'master' env.sudo_user = 'commcarehq' env.hosts = ['220....
[ "def isProdHost():\n\n return _Control.TIER.name == \"PROD\"", "def server_url(self):\n pass", "def mondrian_server_internal_url():\n return 'http://127.0.0.1:8080'", "def server_information(self):", "def is_local():\n return 'sinwoo' in socket.gethostname()", "def production():\n env.r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
modify path to use virtualenv's python
def enter_virtualenv(): return prefix('PATH=%(virtualenv_root)s/bin/:$PATH' % env)
[ "def setup_virtualenv(path='~/.env'):\n if not exists(path):\n v = '1.11.4'\n tarball = 'virtualenv-' + v + '.tar.gz'\n run('curl --insecure '\n '-O https://pypi.python.org/packages/source/v/virtualenv/' +\n tarball)\n run('tar xvfz ' + tarball)\n with cd(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
restart cchq_www service on remote host. This will call a stop, reload the initctl to have any config file updates be reloaded into intictl, then start cchqwww again.
def service_restart(): require('root', provided_by=('staging', 'production')) with settings(sudo_user="root"): sudo('stop cchq_www', user=env.sudo_user) sudo('initctl reload-configuration', user=env.sudo_user) sudo('start cchq_www', user=env.sudo_user)
[ "def restart_webserver():\n require('service_name')\n sudo('service nginx reload')\n try:\n sudo('stop %(service_name)s' % env)\n except: # Might be already stopped\n pass\n try:\n sudo('start %(service_name)s' % env)\n except: # Might be already started\n pass", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stop cchq_www service on remote host.
def service_stop(): require('root', provided_by=('staging', 'production')) with settings(sudo_user="root"): sudo('stop cchq_www', user=env.sudo_user)
[ "def stop():\n site = _env.hostout.options.get('hostname')\n if _env.hostout.options.get('remote-sudo') == 'true':\n _sudo('supervisorctl stop {0:s}:*'.format(site))\n else:\n _run('supervisorctl stop {0:s}:*'.format(site))", "def stop_webserver():\r\n _webserver_do('stop')", "def stop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of time features that will be appropriate for the given frequency string.
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: features_by_offsets = { offsets.YearEnd: [], offsets.QuarterEnd: [MonthOfYear], offsets.MonthEnd: [MonthOfYear], offsets.Week: [DayOfMonth, WeekOfYear], offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],...
[ "def time_features_from_frequency_str(offset) -> List[TimeFeature]:\n\n features_by_offsets = (\n (Timedelta(seconds=60), [\n SecondOfMinute,\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear]), # 6 for second - minutes\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the value of the use_deep attribute
def set_deep(self, deep): self.use_deep = deep
[ "def disable_deep_image(self) -> bool:\n return self._disable_deep_image", "def setdeepattr(d, keys, value):\n if not isinstance(keys, (list, tuple)):\n keys = (keys,)\n\n top, *rest = keys\n\n if rest:\n if top not in d:\n d[top] = {}\n\n setdeepattr(d[top], rest, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the value of the mix attribute
def set_mix(self, mix): self.mix = mix
[ "def mix(self, mixin, volume = -6):\n mp = lowlevel.SM_PATH_MIX_PARMS()\n mp.path = self.path\n mp.enable = True\n mp.mixin = mixin.get_datafeed()\n mp.volume = volume\n\n log.debug('%s mix(%s, volume: %d)', self.name, mixin.name, volume)\n\n rc = lowlevel.sm_path_mi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the value of the display_only attribute
def set_display_only(self, display_only): self.display_only = display_only
[ "def visible(self, val):\n self.set_visible(val)", "def add_to_readonly_fields():\n return ['show_publish_status']", "def verifiedHide(self):\n return ''", "def noEdit(self):\r\n self.first_name.configure(state='readonly')\r\n self.last_name.configure(state='readonly')\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a shallow copy of this singly linked list. The copy is made by copying the individual fields of this singly linked list into a new singly linked list object.
def __copy__(self): return self.do_copy(SinglyLinkedList())
[ "def __copy__(self):\n result = SinglyLinkedListElement(None, None, None)\n result.list = self.list\n result.data = self.data\n result.next = self.next\n return result", "def copy(self, l: 'SoBaseList') -> \"void\":\n return _coin.SoBaseList_copy(self, l)", "def copy(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the specified item at the specified index in this singly linked list. If an element is already present at the specified index, this element is replaced by the new element. If the singly linked list is empty prior to inserting the new element, the new element is inserted in front of the singly linked list.
def insert_at(self, index, item): ptr = self.head if ptr is None: self.head = SinglyLinkedListElement(self, item, None) self.tail = self.head self.size += 1 return i = 0 while ptr is not None and ptr.data is not None: if i == in...
[ "def insert_at_index(self, index, item):\n if not (0 <= index <= self.size):\n raise ValueError('List index out of range: {}'.format(index))\n if index == self.size:\n self.append(item)\n return\n if index == 0:\n self.prepend(item)\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the specified item before the specified element in this singly linked list.
def insert_before_element(self, item, element): if item is not None and element is not None: element.insert_before(item) else: raise IndexError
[ "def insert_before(self, item):\n new_element = SinglyLinkedListElement(self.list, item, None)\n current_element = self.list.head\n was_inserted = False\n # Insertion happens before the first element\n if self == current_element:\n # This is the case where there is only...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the specified item aftere the specified element in this singly linked list.
def insert_after_element(self, item, element): if item is not None and element is not None: element.insert_after(item) else: raise IndexError
[ "def insert_after(self, item):\n new_element = SinglyLinkedListElement(self.list, item, None)\n\n if self == self.list.tail:\n self.list.tail = new_element\n\n current_element = self.list.head\n was_inserted = False\n # Insertion happens after the first element\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the specified item from this singly linked list.
def remove(self, item): tmp = SinglyLinkedListElement(self, item, None) tmp.remove()
[ "def remove(self, item):\n if self.is_empty():\n return False\n return self.recursive_remove(self.head, item)", "def delete(self, item):\n curr = self.head\n found = False\n while not found and curr is not None:\n if curr.data == item:\n foun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a shallow copy of the singly linked list element. The copy is made by copying all fields of this singly linked list element into a new object holding the new singly linked list element.
def __copy__(self): result = SinglyLinkedListElement(None, None, None) result.list = self.list result.data = self.data result.next = self.next return result
[ "def __copy__(self):\n return self.do_copy(SinglyLinkedList())", "def insert(self, item):\n new_element = SinglyLinkedListElement(self.list, item, self.next)\n # The singly linked list is empty\n if self.list.head is None:\n self.list.head = new_element\n self.lis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the specified item into this singly linked list element. The element already present is overwritten by the new element. If the list is empty before insertion the new element is inserted at the front of the linked list.
def insert(self, item): new_element = SinglyLinkedListElement(self.list, item, self.next) # The singly linked list is empty if self.list.head is None: self.list.head = new_element self.list.tail = new_element self.list.size += 1 return # T...
[ "def insert_before(self, item):\n new_element = SinglyLinkedListElement(self.list, item, None)\n current_element = self.list.head\n was_inserted = False\n # Insertion happens before the first element\n if self == current_element:\n # This is the case where there is only...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts a singly linked list element containing the specified item before this singly linked list element.
def insert_before(self, item): new_element = SinglyLinkedListElement(self.list, item, None) current_element = self.list.head was_inserted = False # Insertion happens before the first element if self == current_element: # This is the case where there is only one elemen...
[ "def insert_before_element(self, item, element):\n if item is not None and element is not None:\n element.insert_before(item)\n else:\n raise IndexError", "def insert_after(self, item):\n new_element = SinglyLinkedListElement(self.list, item, None)\n\n if self == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts a singly linked list element containing the specified item after this singly linked list element.
def insert_after(self, item): new_element = SinglyLinkedListElement(self.list, item, None) if self == self.list.tail: self.list.tail = new_element current_element = self.list.head was_inserted = False # Insertion happens after the first element if self == cu...
[ "def insert_after_element(self, item, element):\n if item is not None and element is not None:\n element.insert_after(item)\n else:\n raise IndexError", "def insert(self, item):\n new_element = SinglyLinkedListElement(self.list, item, self.next)\n # The singly lin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes this singly linked list element from the singly linked list.
def remove(self): current_element = self.list.head was_removed = False # The first element is being removed if self == current_element: # This is the case where there is only one element in the list if current_element.next is None: self.list.head =...
[ "def remove(self, item):\n tmp = SinglyLinkedListElement(self, item, None)\n tmp.remove()", "def deleteFirst(self):\n if self.is_empty():\n raise Empty('LinkedList is empty')\n # to do\n else:\n answer = self._head._element\n self._head = self._head._next\n self._s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overwrites the nil_slot (first row) of the input Tensor with zeros. The nil_slot is a dummy slot and should not be trained and influence the training algorithm.
def zero_nil_slot(t, name=None): with tf.name_scope(name, "zero_nil_slot", [t]) as name: # pdb.set_trace() t = tf.convert_to_tensor(t, name="t") s = tf.shape(t)[1] z = tf.zeros(tf.stack([1, s])) return tf.concat(axis=0, values=[z, tf.slice(t, [1, 0], [-1, -1])], name=name)
[ "def zero_nil_slot(t, name=None):\n with name_scope(values=[t], name=name, default_name=\"zero_nil_slot\") as name:\n t = tf.convert_to_tensor(t, name=\"t\")\n s = tf.shape(t)[1]\n z = tf.zeros(tf.stack([1, s]))\n return tf.concat(\n axis=0, values=[z, tf.slice(t, [1, 0], [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load morphometrics measured from field data for Bogue Banks for the given year
def load_field_data(year): # Set a path to the data fname = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv') # Load the data into a dataframe df = pd.read_csv(fname, delimiter=',', header=0) # Add a column for the dune shape df['Ratio'] = (df['yCrest'] - df['yToe']...
[ "def load_year(observatory=None, year=None, path=None):\n dates_in_year = pd.date_range(\n start=f'{year}-01-01', end=f'{year}-12-31', freq='D'\n )\n df = pd.DataFrame()\n for date in dates_in_year:\n ymd = date.strftime('%Y%m%d')\n file_name = f'{observatory}{ymd}dmin.min'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the initial height, dune ratio, and stretches for the current experiment
def load_plot_data(experiment, backup=False): # Set the stretches dune_stretches = [-60, -40, -20, 1, 20, 40, 60] storm_stretches = [1, 12, 18, 24, 36, 48] # Set empty lists dune_ratio, init_height, use_stretches, final_dune_ratio = [], [], [], [] # Set an empty dict volume_time...
[ "def initSizes(self):\r\n self.width = 180\r\n self.height = 240\r\n self.edge_roundness = 10.0\r\n self.edge_padding = 10.0\r\n self.title_height = 24.0\r\n self.title_horizontal_padding = 4.0\r\n self.title_vertical_padding = 4.0", "def set_height(self, height: f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the phase diagrams for the data based on differences in volume loss. Add contour lines with labels Set overwash to True to modify the color range for overwash measurements
def delta_volume_loss_phases(ax, x, y, z, overwash=False): # Set the levels if overwash: clevels = np.linspace(0, 50, 100) contours = [0, 10, 20, 30, 40, 50] vlimit = max(contours) else: clevels = np.linspace(-50, 50, 100) contours = [-50, -40, -30, -20, -10...
[ "def volume_loss_phases(ax, x, y, z):\r\n\r\n # Use the \"afmhot\" colormap for the plots. Darker colors will\r\n # signify more volume loss\r\n cmap = plt.cm.get_cmap('afmhot')\r\n\r\n # Plot a filled contour of the data\r\n plot = ax.tricontourf(x, y, z,\r\n levels=np.linsp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a tight and transparent background for the figure, then save and close it
def save_and_close(fig, title, tight=True): # Set a tight layout and a transparent background if tight: plt.tight_layout() fig.patch.set_color('w') fig.patch.set_alpha(0.0) # Save and close the figure title_w_extension = os.path.join(FIGURE_DIR, f'{title}.png') pl...
[ "def save(f,path_or_page,dpi=None):\n f.patch.set_alpha(0)\n if type(path_or_page) is str:\n f.savefig(path_or_page,dpi=dpi,bbox_inches=\"tight\",transparent=True,interpolation='none',pad_inches=0,tight=True)\n else:\n path_or_page.savefig(f,dpi=dpi,bbox_inches=\"tight\",transparent=True,interpolation='non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make ticks with timestamps from a DataFrame columns of times
def time_ticks(df, nticks): time = df['Time'] start = pd.Timestamp(time.iloc[0]) + dt.timedelta(hours=12) end = pd.Timestamp(time.iloc[-1]) + dt.timedelta(hours=12) t = np.linspace(start.value, end.value, 7) t = pd.to_datetime(t) return [tval.strftime('%Y-%m-%d') for tval in t]
[ "def from_data_frame_time_intervals(data_frame):\n ans = ''\n for column in data_frame:\n ans += from_values_to_time_intervals(data_frame[column].values.tolist())\n return ans", "def timeBin(df, width, col = 'te'):\n # col = 'te'\n df['timebin'] = df.apply (lambda row: createTimebin(row, wid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the phase diagrams for the data based on volume loss. Add contour lines with labels
def volume_loss_phases(ax, x, y, z): # Use the "afmhot" colormap for the plots. Darker colors will # signify more volume loss cmap = plt.cm.get_cmap('afmhot') # Plot a filled contour of the data plot = ax.tricontourf(x, y, z, levels=np.linspace(-70, 10, 40), ...
[ "def phase_plane(self):\n plt.figure(figsize=(8, 5))\n plt.plot(self.V, self.W, color='cornflowerblue')\n plt.plot(self.V, self.V - (self.V**3)/3 + self.I, color=\"slateblue\")\n plt.plot(self.V, (self.V + self.a)/(self.b), color=\"red\")\n plt.xlabel('Voltage [V]', fontsize=12)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot volume loss versus dune shape for Bogue Banks field profiles measured in October 2017 and October 2018
def figure_10(): # Load field data from 2017 and 2018 data_2017, data_2018 = load_field_data(2017), load_field_data(2018) # Setup the figure fig, ax = plt.subplots(figsize=(figure_inches * 1.5, figure_inches), dpi=figure_dpi) # Add a grid add_grids(axs=ax) # Plot the data ...
[ "def visualizeBuildingVolume(GDF):\n f, ax = plt.subplots(1, figsize=(10, 10))\n ax.set_title('Buildings on the WUR campus and their volume')\n GDF.plot(ax=ax,column = 'volume', scheme='fisher_jenks', k=6, \n cmap=plt.cm.viridis, linewidth=1, edgecolor='black', legend=True)\n ax.set_fac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the CDF of dune volumes on Bogue Banks measured from LiDAR and mark off where 52m3/m falls
def supp_figure_1(): # Load the data df = pd.read_csv(os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')) # Set bins edges data_set = sorted(set(df['Volume'].dropna())) bins = np.append(data_set, data_set[-1] + 1) # Use the histogram function to bin the data and find...
[ "def visualizeBuildingVolume(GDF):\n f, ax = plt.subplots(1, figsize=(10, 10))\n ax.set_title('Buildings on the WUR campus and their volume')\n GDF.plot(ax=ax,column = 'volume', scheme='fisher_jenks', k=6, \n cmap=plt.cm.viridis, linewidth=1, edgecolor='black', legend=True)\n ax.set_fac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_coords_from_nodes takes a list of disparately found nodes and returns their coordinate pairs(x, y)
def get_coords_from_nodes(penetration_nodes): dim_coords = [] try: dim_coords = [dimi[0].children[dimi[1]].unpack_coords for dimi in penetration_nodes] except: for coord_pair in penetration_nodes: try: temp_coords = coord_pair[0].children[coord_pair[1]].unpack_c...
[ "def getNodeXY(id):\n for n in nodes:\n if n[0] == id:\n return (n[2], n[3])", "def _get_coordinates(self, node):\n return [\n round(float(node.attrib['xmin']), 3),\n round(float(node.attrib['ymax']), 3),\n round(float(node.attrib['xmax']), 3),\n round(float(node.attrib['ym...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If you only have a branch id send that into get_cid_from_bid and search the cluster_dict and return the cluster id that contains that branch id
def get_cid_from_bid(graph, bid): check = [] kokey = graph.cluster_dict.keys() for item in kokey: if bid in graph.cluster_dict[item].keys(): return item return check
[ "def cluster_identifier(self) -> str:\n ...", "def get_cluster_id():\n resolver = dns.resolver.Resolver()\n cluster_id = resolver.query('dataproc.rasterfoundry.com', 'TXT')[0]\n return cluster_id.to_text().strip('\"')", "def get_city_cluster_id(df, ix=5, city_num=30, json_path=None):\n df = d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the matrix integer or flat_value into its coordinate pair. Should only be used with nonnode values. Nodes have their one fuction unpackCoords
def unpack_coords(flat_value, width): # this rounds the y value which makes the x value a whole number as well y = flat_value/width x = abs((y*width)-flat_value) return x, y
[ "def node_to_tuple(self,node_num):\n row = (node_num-1) / self.cols\n col = (node_num-1) % self.cols\n return (row,col)", "def unpack_coords(self):\n y = self.flat_value/Point.width\n x = abs((y * self.width) - self.flat_value)\n return x, y", "def int_pair(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
unpackCoords converts the flat values found in the cluster dict to thier appropriate coordinate pair.
def unpack_coords(self): y = self.flat_value/Point.width x = abs((y * self.width) - self.flat_value) return x, y
[ "def unpack_coords(flat_value, width):\n\n # this rounds the y value which makes the x value a whole number as well\n y = flat_value/width\n x = abs((y*width)-flat_value)\n return x, y", "def _parse_coordinates(self, vars: Dict[str, VariableDefinition]) -> Tuple[Dict[str, VariableDefinition], Dict[str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
angle_span used only with two penetration lists
def angle_span(self): try: angle_one, angle_two = self.angle_list dif = abs(angle_one - angle_two) if dif > 180: dif -= 360 return dif except: return -1
[ "def ang_sep(l1,b1,l2,b2):\n sin_theta = np.sqrt((np.cos(b2 * _d2r) * np.sin((l1 - l2) * _d2r)) ** 2 +\n (np.cos(b1 * _d2r) * np.sin(b2 * _d2r) - \n np.sin(b1 * _d2r) * np.cos(b2 * _d2r) * np.cos((l1 - l2) * _d2r)) ** 2)\n cos_theta = (np.cos(b1 * _d2r) * np.cos(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This adds a node to a branch. It either appends the new node to the existing list or simply adds it if there is no list
def add_node(cls, cluster_id, branch_id, node): try: # try to pull the node list out node_list = cls.cluster_dict[cluster_id][branch_id] if node not in node_list: node_list.append(node) cls.cluster_dict[cluster_id][branch_id]= node_list exce...
[ "def add_branch(self, tree):\n# print 'ADDING BRANCH', tree\n self._insert_branch(None, tree)\n self._cleanup_branches()", "def addBranch(self, branch):\n if self._isLeaf:\n return false\n else:\n self._branches.append(branch)\n self._branches[-1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getLastGen retrieves a list of the last line of possible fathers. These fathers will then be tested against the new children for paternity
def get_last_gen(Graph, current_line): last_generation = [] # saved_roots = [] cluster_keys = Graph.cluster_dict.keys() for ckey in cluster_keys: branch_keys = Graph.cluster_dict[ckey].keys() #keys for branch if len(branch_keys) == 1: if ckey ...
[ "def realGenMothers(gp):\n ret = []\n for i in range(gp.numberOfMothers()):\n mom = gp.mother(i)\n if mom.pdgId() == gp.pdgId():\n ret += realGenMothers(mom)\n else:\n ret.append(mom)\n return ret", "def make_gf_graph(self):\n stack = [([(1, tuple(self.sa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If we find orphans then we create a new node out of the point class objects and insert it into the cluster dictionary at the top level we send in a node already. The node's parent points to itself as its cluster_id that's how we know its a cluster later when assembling the drawing
def create_cluster_branch(self, orphan, line_num): # create cluster try: cluster_id = orphan.flat_value except: cluster_id = tuple([x.flat_value for x in orphan]) # create a node instance and add it to the graph orphan_node = Node(cluster_id, cluster_id, cluste...
[ "def __expand_cluster(self, point):\r\n \r\n cluster = None;\r\n self.__visited[point] = True;\r\n neighbors = self.__neighbor_indexes(point);\r\n \r\n if (len(neighbors) >=self.__neighbors):\r\n \r\n cluster = [];\r\n cluster.append(point...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }