query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
plot_clex_hull_data_1_x(fit_dir, hall_of_fame_index, full_formation_energy_file='full_formation_energies.txt') Function to plot DFT energies, cluster expansion energies, DFT convex hull and cluster expansion convex hull. | def plot_clex_hull_data_1_x(
fit_dir,
hall_of_fame_index,
full_formation_energy_file="full_formation_energies.txt",
):
# TODO: Definitely want to re-implement this with json input
# Pre-define values to pull from data files
# title is intended to be in the form of "casm_root_name_name_of_specifi... | [
"def calculate_hull(self):\n\n coords = np.array([atom[1:] for atom in self.molecule.coords['input']])\n\n hull = ConvexHull(coords)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=Axes3D.name)\n\n ax.plot(coords.T[0], coords.T[1], coords.T[2], 'ko')\n\n for s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read_corr_and_formation_energy(datafile) Reads and returns data from json containing correlation functions and formation energies. | def read_corr_and_formation_energy(datafile):
with open(datafile) as f:
data = json.load(f)
corr = []
formation_energy = []
scel_names = []
for entry in data:
corr.append(np.array(entry["corr"]).flatten())
formation_energy.append(entry["formation_energy"])
scel_names... | [
"def read_comp_and_energy_points(datafile):\n with open(datafile) as f:\n data = json.load(f)\n points = [\n [x[0] for x in entry[\"comp\"]] + [entry[\"formation_energy\"]] for entry in data\n ]\n points = np.array(points)\n return points",
"def read_data_feather(path):\n jsonf = o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive a list of interviews for company. | def get_interviews(self, obj):
interviews = Interview.for_company(obj.id)
return BaseInterviewSerializer(
interviews, many=True, read_only=True
).data | [
"def scrape_interview_data(self, num_result):\n videos_id = self._get_video_list(self.investor, num_result)\n basis = math.ceil(num_result / 100)\n \n # Get the caption of each vedio iteratively using \"get api\"\n for i, video_id in enumerate(videos_id):\n fetched_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select random user agent from self._userAgents | def getRandomUserAgent(self):
if not self._hasUserAgents:
self._createUserAgents()
return random.choice(self._userAgents) | [
"def useragent():\n agents = db.get_from_data_json([\"useragents\"])\n return random.choice(agents)",
"def random_user_agent():\n software_names = [SoftwareName.CHROME.value]\n operating_systems = [OperatingSystem.WINDOWS.value, OperatingSystem.LINUX.value]\n user_agent_rotator = UserAgent(software... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates self._userAgents and changes _hasUserAgents flag | def _createUserAgents(self):
pool = Pool(5)
results = pool.map(self._parseUserAgents, self.SOFTWARE.values())
pool.close()
pool.join()
for element in results:
self._appendToUserAgents(element)
if not self._userAgents:
self._userAgents = DEFAULT_U... | [
"def _appendToUserAgents(self, userAgents):\n for userAgent in userAgents:\n self._userAgents.append(userAgent)",
"def _clearUserAgents(self):\n self._userAgents = []\n self._hasUserAgents = False",
"def syncUserAgents(self):\n raise NotImplementedError",
"def _set_usera... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clears self._userAgents and changes _hasUserAgents flag | def _clearUserAgents(self):
self._userAgents = []
self._hasUserAgents = False | [
"def _appendToUserAgents(self, userAgents):\n for userAgent in userAgents:\n self._userAgents.append(userAgent)",
"def cleanup_agents(self):\n self._agents = [a for a in self._agents if not a.finished]",
"def syncUserAgents(self):\n raise NotImplementedError",
"def clear(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the website and finds associated user agents with the software | def _parseUserAgents(self, software):
if software not in self.SOFTWARE.values():
raise ValueError('Invalid function parameter')
try:
response = requests.get(software)
soup = BeautifulSoup(response.content, "html.parser")
userAgents = soup.find_all('td', c... | [
"def parse_tools(url):\n\tdoc = urllib2.urlopen(url)\n\tlines = doc.readlines()\n\n\t# Find the indicies in the list of HTML lines that separate hardware / software sections\n\tsection_regex = re.compile(\"What hardware do you use?|And what software?|What would be your dream setup?\")\n\theader_inds = [i for i, l i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append elements of user agents list to self._userAgents | def _appendToUserAgents(self, userAgents):
for userAgent in userAgents:
self._userAgents.append(userAgent) | [
"def syncUserAgents(self):\n raise NotImplementedError",
"def _createUserAgents(self):\n pool = Pool(5)\n results = pool.map(self._parseUserAgents, self.SOFTWARE.values())\n pool.close()\n pool.join()\n\n for element in results:\n self._appendToUserAgents(eleme... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a candidate format field to the dictionary | def add_format(self, key, value):
# type: (str, str) -> None
self.format_fields[key] = value | [
"def add_format_field(self, chrom, coord, ref, alt, fname, fvalue):\n self.vcf[(chrom, coord, ref, alt)][8] = ':'.join([self.vcf[(chrom, coord, ref, alt)][8],\n fname])\n self.vcf[(chrom, coord, ref, alt)][9] = ':'.join([self.vcf[(chrom, coord, ref, alt)]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method collects roster data from 2006 onward | def collect_new_and_prior_roster_data(self):
with open('roster_data.csv', 'w') as f: #writing header
fieldNames = ['Roster_Year', 'Player_Number', 'First_Name', 'Last_Name', 'Year',
'Position', 'Height', 'Weight', 'Home_Town', 'State_or_Country', 'High_School',
'Previou... | [
"def __collect_duke_starter_data(self):\r\n roster_year = CurrentRosterYear.get_current_roster_year().split('-')[0]\r\n fileName = '../starter_data/Duke' + roster_year + '.txt'\r\n with open(fileName, 'r') as f:\r\n stat_year = fileName[re.search('\\d', fileName).start():fileName.ind... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
removes duplicate starters in stater data set who have lower amount games started than their duplicate | def eliminate_duplicates_starter_data_set(self, fileName):
players = []
with open(fileName, 'r') as f:
for line in f:
data = line.strip().split(',')
try:
starter = Starter(data[0], data[1], data[2],
... | [
"def remove_stagnating_species(self):\n for s in self.species:\n imp = False \n\n for o in s.organisms:\n if o.fitness > s.max_fitness:\n imp = True\n\n s.max_fitness = o.fitness\n\n s.age_since_imp = 0\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Appends duke's current roster year data to starter_data.txt. Make sure duke's file is named 'Duke'.txt where is the current roster year | def __collect_duke_starter_data(self):
roster_year = CurrentRosterYear.get_current_roster_year().split('-')[0]
fileName = '../starter_data/Duke' + roster_year + '.txt'
with open(fileName, 'r') as f:
stat_year = fileName[re.search('\d', fileName).start():fileName.index('.txt')] #e... | [
"def addYear2Yeartxt(self):\n # r+: read and write\n File = open(os.path.join(self.PATH[\"ABM_Path\"], \"year.txt\"), \"r+\") \n # Read\n y = int(File.readline()) # current year\n y_start = File.readline() # already include \"\\n\"\n y_end = File.readline()\n # Writ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
appends the incoming starter data for this roster year to starter_data.csv | def append_new_starter_data(self):
self.collect_new_starter_data()
new_data = []
with open('incoming_starter_data.csv', 'r') as f:
for line in f:
data = line.split(',')
data[-1] = data[-1].strip() #stripping newline char
new_data.ap... | [
"def collect_new_and_prior_roster_data(self):\r\n\r\n with open('roster_data.csv', 'w') as f: #writing header\r\n fieldNames = ['Roster_Year', 'Player_Number', 'First_Name', 'Last_Name', 'Year',\r\n 'Position', 'Height', 'Weight', 'Home_Town', 'State_or_Country', 'High_School',\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls all the extract conference methods to form one data set in accolades.csv | def extract_prior_conference_data(self, debug):
if debug: print('-'* 20, 'LOADING PATRIOT LEAGUE CONFERENCE DATA', '-'*20, '\n')
self.extract_prior_patriot_league_conference_data(debug)
if debug: print('-'* 20, 'LOADING IVY LEAGUE CONFERENCE DATA', '-'*20, '\n')
self.extract_prior_iv... | [
"def extract_prior_caa_conference_data(self, debug):\r\n with open('../conference_data/CAA.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extracts data out of the patriot league conference txt file in conference_data folder | def extract_prior_patriot_league_conference_data(self, debug):
with open('../conference_data/Patriot.txt') as f:
year = accolade = first_name = last_name = college = None
for line in f:
if self.has_date(line):
year = line.strip()
else:... | [
"def extract_prior_ivy_league_conference_data(self, debug):\r\n with open('../conference_data/Ivy.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extracts data out of the ivy league conference txt file in conference_data folder | def extract_prior_ivy_league_conference_data(self, debug):
with open('../conference_data/Ivy.txt') as f:
year = accolade = first_name = last_name = college = None
for line in f:
if self.has_date(line):
year = line.strip()
else:
... | [
"def extract_prior_patriot_league_conference_data(self, debug):\r\n with open('../conference_data/Patriot.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extracts prior CAA conference data and sends to accolades.csv debug whether to print debug messages (bool) | def extract_prior_caa_conference_data(self, debug):
with open('../conference_data/CAA.txt') as f:
year = accolade = first_name = last_name = college = None
for line in f:
if self.has_date(line):
year = line.strip()
else:
... | [
"def extract_prior_acc_conference_data(self, debug):\r\n year = accolade = first_name = last_name = college = None\r\n with open('../conference_data/ACC.txt', 'r') as f:\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method processes acc conference data. | def extract_prior_acc_conference_data(self, debug):
year = accolade = first_name = last_name = college = None
with open('../conference_data/ACC.txt', 'r') as f:
for line in f:
if self.has_date(line):
year = line.strip()
elif 'Team' in... | [
"def extract_prior_caa_conference_data(self, debug):\r\n with open('../conference_data/CAA.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method processes A10 conference data. | def extract_prior_a10_conference_data(self, debug):
year = accolade = first_name = last_name = college = None
with open('../conference_data/A10.txt', 'r') as f:
for line in f:
if self.has_date(line):
year = line.strip()
elif 'Team' in... | [
"async def conference(\n self, event_id: str, day_id: str,\n conference_id: str, speaker_id: str) -> None:\n query = {\n \"eventId\": event_id,\n \"agenda.dayId\": day_id}\n array = \"agenda.$.conferences\"\n condition = {\"conferenceId\": conference_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts conference data for Stanford University from Stanford.txt | def extract_prior_stanford_conference_data(self, debug):
with open('../conference_data/Stanford.txt', 'r') as f:
for line in f:
college = 'Stanford University'
year = line.split()[0].strip()
first_name = line.split()[1].strip()
... | [
"def extract_prior_caa_conference_data(self, debug):\r\n with open('../conference_data/CAA.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts conference data for WAC conference from WAC.txt | def extract_prior_wac_conference_data(self, debug):
with open ('../conference_data/WAC.txt', 'r') as f:
year = college = first_name = last_name = accolade = None
for line in f:
if self.has_date(line):
year = line.split()[0].strip()
... | [
"def extract_prior_caa_conference_data(self, debug):\r\n with open('../conference_data/CAA.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the index where the college name occurs in a chunk of patriot league conference data player a string representing a player | def __get_patriot_or_ivy_college_index(self, player):
index = 0
for chunk in player.split():
if '(' in chunk:
break
index += 1
return index | [
"def get_player_index(data, name, hide_stats=None):\n hide_stats = hide_stats or []\n criterion = (\n data.index.get_level_values(\"player\")\n .map(simplify_name)\n .str.contains(simplify_name(name))\n )\n filtered = data[criterion]\n if filtered.empty:\n logging.error(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
converting a college's shortened name to full | def __convert_to_full_college_name(self, college):
if '.' in college:
college = college[0:college.index('.')]
index = {
'Lehigh' : 'Lehigh University',
'Holy Cross' : 'College of the Holy Cross',
'Bucknell' : 'Bucknell... | [
"def full_name(first_name, last_name):\n return (first_name + ' ' + last_name).title()",
"def get_short_organism_name(self, full_name):\n return self.full_name_to_short_name_map[full_name]",
"def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prints a debug message for the allconference data collection given a list of attributes | def __print_conference_data_debug_message(self, data, attributes):
print('-'*20 + 'Extracted Player Data Based On Split' + '-'*20)
print(data)
print('-'*20 + 'What Will Go to CSV File (Decoded)' + '-'*20)
print(attributes) | [
"def show_conferences(self):\n print(\"Bonjour voici la liste des conferences\")\n conferences = self.model.display_conferences() # the variable conference recover data from method display_conference\n if conferences: # if conferences exists\n for conference in conferences:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
appends parsed attributes to allconference data to 'accolades.csv' input to function are the parsed parameters | def __append_conference_datum(self, year, first_name, last_name,
accolade, college):
with open('accolades.csv', 'a', newline = '\n') as f:
writer = csv.writer(f, lineterminator = '\n')
row = [year, unidecode.unidecode(first_name), unidecode.unideco... | [
"def extract_prior_caa_conference_data(self, debug):\r\n with open('../conference_data/CAA.txt') as f:\r\n year = accolade = first_name = last_name = college = None\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n els... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
mycampaignstat view var data = [ | def mycampaignstat(request):
res = []
mycampaigns__ids = request.user.profile.zones.values_list(
'campaign__id', flat=True).distinct().all()
for cam in Campaign.objects.filter(
id__in=mycampaigns__ids).order_by('-start_at'):
con = Plantation.objects.filter(
user=reque... | [
"def get_campaigns_over_time(self, project, start, end):\n mc_man = MailchimpManager(project.mailchimp_api_token)\n json = mc_man.get_campaigns(start, end, project.mailchimp_list_id)\n result = []\n for item in json.get('data'):\n if(item.get('status') == 'sent'):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch a token by name from the given user. | def get_by_name(self, user_id: Union[UUID, str], name: str) -> AccessToken:
raise NotImplementedError() | [
"def fetch_username_from_token() -> Any:\n import json\n user = User.query.filter(User.id == g.user.id).first()\n if user.github_token is None:\n return None\n url = 'https://api.github.com/user'\n session = requests.Session()\n session.auth = (user.email, user.github_token)\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch a token by its jti from the given user. | def get_by_jti(self, user_id: Union[UUID, str], jti: str) -> AccessToken:
raise NotImplementedError() | [
"def fetch_username_from_token() -> Any:\n import json\n user = User.query.filter(User.id == g.user.id).first()\n if user.github_token is None:\n return None\n url = 'https://api.github.com/user'\n session = requests.Session()\n session.auth = (user.email, user.github_token)\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Revoke a token from the given user | def revoke(self, user_id: Union[UUID, str],
token_id: Union[UUID, str]) -> NoReturn:
raise NotImplementedError() | [
"def revoke_auth_token(token):\n url = settings.BASE_URL + const.OAUTH_REVOKE_TOKEN_URL\n auth = (settings.CLIENT_ID, settings.CLIENT_SECRET)\n payload = {\n 'token': token\n }\n if requests.post(url, data=payload, auth=auth).status_code == status.HTTP_200_OK:\n return Response({'result... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a token from the given user | def delete(self, user_id: Union[UUID, str],
token_id: Union[UUID, str]) -> NoReturn:
raise NotImplementedError() | [
"def delete_access_token(user_key: str):\n logger.info(\"Deleting an access token from DynamoDB\")\n table.delete_item(Key={'user_key': user_key})",
"def delete_reset_token(userid: int):\n reset_token = models.ResetTokens.query.get(userid)\n\n if reset_token is not None:\n db.session.delete(res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the OAuth token of the given user. | def set_for_user(self, user_id: Union[UUID, str], token: str) -> NoReturn:
raise NotImplementedError() | [
"def set_user(self, user):\n self.user = user",
"def set_auth_token(token):\n global _auth_token\n _auth_token = token",
"def set_se_token(self, user, code):\n token = self.se_client.get_token(code)\n se_profile = StackExchangeProfile(**token)\n se_profile.save()\n user.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the OAuth token of the given user. | def get_by_user(self, user_id: Union[UUID, str]) -> OAuthToken:
raise NotImplementedError() | [
"def get_user_token(user):\n\n token, created = Token.objects.get_or_create(user=user)\n\n return token",
"def get_oauth_token(self):\n return oauth.OAuthToken.from_string(self.token)",
"def url_token(user_id):\n return user_manager.token_manager.generate_token(user_id)",
"def get_api_token(us... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the OAuth token for the given provider/providerid pair which should be uniq. | def get_by_provider(self, provider: str, provider_id: str) -> OAuthToken:
raise NotImplementedError() | [
"def get_oauth_token_key_name(self, provider):\n for _provider in self.oauth_providers:\n if _provider[\"name\"] == provider:\n return _provider.get(\"token_key\", \"oauth_token\")",
"def get_oauth_token(self):\n return oauth.OAuthToken.from_string(self.token)",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to assign the moves to an existing picking that has not been reserved yet and has the same procurement group, locations and picking type (moves should already have them identical). Otherwise, create a new picking to assign them to. | def _assign_picking(self):
Picking = self.env['stock.picking']
for move in self:
move.branch_id = self.group_id.sale_id.branch_id.id
recompute = False
picking = move._search_picking_for_assignation()
if picking:
if picking.partner_id.id != ... | [
"def _picking_assign(self, cr, uid, move_ids, context=None):\n context = context or {}\n context = dict(context)\n move = self.browse(cr, uid, move_ids, context=context)[0]\n \n pick_obj = self.pool.get(\"stock.picking\")\n picks = pick_obj.search(cr, uid, [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the endpoint value for an action. | def get_endpoint(self, action: Union[UspsEndpoint, str]) -> str:
if isinstance(action, UspsEndpoint):
action = action.value
else:
try:
action = UspsEndpoint[action.upper()].value
except KeyError:
raise ValueError(f'Invalid action: {acti... | [
"def _get_action(self, action, conn):\n if not hasattr(conn, action):\n raise AttributeError\n return getattr(conn, action)",
"def get_action(self, action):\n if self.parsed_workflow['action'].get(action, None):\n return self.parsed_workflow['action'][action]\n el... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a request to the USPS API. | async def send_request(self, action: Union[UspsEndpoint, str], xml: etree.Element):
api = self.get_endpoint(action)
xml = etree.tostring(xml, encoding=USPS_ENCODING, pretty_print=self.test).decode(USPS_ENCODING)
response = await self.session.post(
url=USPS_BASE_URL,
data=... | [
"def _haloapi_request(self, url, params=None):\n headers = {'Ocp-Apim-Subscription-Key': self.api_key}\n response = requests.get(url, params=params, headers=headers)\n response.raise_for_status()\n return response",
"def _do_api_call(self, session, url):\n headers = {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate an iterable of addresses. | async def validate_addresses(self, addresses: Iterable[Address]):
# It's undocumented, but the USPS API will only validate 5 addresses
# at a time. As such, we need to chunk the addresses into groups of
# 5 and do calls for 5 addresses at a time.
for xml in self._build_address_xml(addres... | [
"def validate_ip_addresses(cls, v: str) -> list[str]:\n ip_addresses = v.split(',')\n for ip_address in ip_addresses:\n if not IP_ADDRESS_REGEX.match(ip_address.strip()):\n raise ValueError(f'{ip_address} is not a valid ip address')\n return ip_addresses",
"def test_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a dataframe of emissions from power generation by fuel type in each region. kwargs would include the upstream emissions dataframe (upstream_df) if upstream emissions are being included. | def get_generation_process_df(use_alt_gen_process=None, regions=None, **kwargs):
if use_alt_gen_process is None:
use_alt_gen_process = model_specs['use_alt_gen_process']
if regions is None:
regions = model_specs['regional_aggregation']
if use_alt_gen_process is True:
try:
... | [
"def get_generation_mix_process_df(regions=None):\n from electricitylci.egrid_filter import (\n electricity_for_selected_egrid_facilities,\n )\n from electricitylci.generation_mix import (\n create_generation_mix_process_df_from_model_generation_data,\n create_generation_mix_process_df... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a dataframe of generation mixes by fuel type in each subregion. This function imports and uses the parameter 'gen_mix_from_model_generation_data' from globals.py. If the value is False it cannot currently handle regions other than 'all', 'NERC', 'US', or a single eGRID subregion. | def get_generation_mix_process_df(regions=None):
from electricitylci.egrid_filter import (
electricity_for_selected_egrid_facilities,
)
from electricitylci.generation_mix import (
create_generation_mix_process_df_from_model_generation_data,
create_generation_mix_process_df_from_egrid... | [
"def get_generation_process_df(use_alt_gen_process=None, regions=None, **kwargs):\n if use_alt_gen_process is None:\n use_alt_gen_process = model_specs['use_alt_gen_process']\n if regions is None:\n regions = model_specs['regional_aggregation']\n\n if use_alt_gen_process is True:\n try... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send one or more process dictionaries to be written to jsonld | def write_process_dicts_to_jsonld(*process_dicts):
from electricitylci.olca_jsonld_writer import write
all_process_dicts = dict()
for d in process_dicts:
all_process_dicts = {**all_process_dicts, **d}
olca_dicts = write(all_process_dicts, namestr)
return olca_dicts | [
"def send_data_dict(self, d):\n\t\tself.send_data(**d)",
"def write_upstream_dicts_to_jsonld(upstream_dicts):\n upstream_dicts = write_process_dicts_to_jsonld(upstream_dicts)\n return upstream_dicts",
"def send_updates(self):\n\n for client in self.client:\n for item in self.items:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Automatically load all of the upstream emissions data from the various modules. Will return a dataframe with upstream emissions from coal, natural gas, petroleum, and nuclear. | def get_upstream_process_df():
import electricitylci.coal_upstream as coal
import electricitylci.natural_gas_upstream as ng
import electricitylci.petroleum_upstream as petro
import electricitylci.nuclear_upstream as nuke
import electricitylci.power_plant_construction as const
from electricitylci... | [
"def combine_upstream_and_gen_df(gen_df, upstream_df):\n\n import electricitylci.combinator as combine\n import electricitylci.import_impacts as import_impacts\n\n print(\"Combining upstream and generation inventories\")\n combined_df = combine.concat_clean_upstream_and_plant(gen_df, upstream_df)\n c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Conver the upstream dataframe generated by get_upstream_process_df to dictionaries to be written to jsonld. | def write_upstream_process_database_to_dict(upstream_df):
import electricitylci.upstream_dict as upd
print("Writing upstream processes to dictionaries")
upstream_dicts = upd.olcaschema_genupstream_processes(upstream_df)
return upstream_dicts | [
"def write_upstream_dicts_to_jsonld(upstream_dicts):\n upstream_dicts = write_process_dicts_to_jsonld(upstream_dicts)\n return upstream_dicts",
"def write_gen_fuel_database_to_dict(\n gen_plus_fuel_df, upstream_dict, subregion=None\n):\n from electricitylci.alt_generation import olcaschema_genprocess\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the upstream dictionary to jsonld. | def write_upstream_dicts_to_jsonld(upstream_dicts):
upstream_dicts = write_process_dicts_to_jsonld(upstream_dicts)
return upstream_dicts | [
"def _dump(self):\n if self._json is None:\n self._json = json.dumps(self._data, separators=(',', ':'))",
"def _write(self, content):\n val = json.dumps(content)\n P4Key.set(self.p4, self.owners_key, val)",
"def save_JSON(self):\n\n try:\n with open(self.destina... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine the generation and upstream dataframes into a single dataframe. The emissions represented here are the annutal emissions for all power plants. This dataframe would be suitable for further analysis. | def combine_upstream_and_gen_df(gen_df, upstream_df):
import electricitylci.combinator as combine
import electricitylci.import_impacts as import_impacts
print("Combining upstream and generation inventories")
combined_df = combine.concat_clean_upstream_and_plant(gen_df, upstream_df)
canadian_gen = ... | [
"def fusion_df(self):\n a = self.scrap_foxton()\n b = self.scrap_dexters()\n c = self.scrap_hamptons()\n \n return pd.concat([a,b,c], ignore_index=True)",
"def combine_df(self):\n \n observations_tables, header_tables, era5fb_tables = [], [], [] \n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This will combine the netl life cycle data for solar, geothermal, and wind, which will include impacts from construction, etc. that would be omitted from the regular sources of emissions. It also uses the alternate generation module to get power plant emissions. The two different dataframes are combined to provide a si... | def get_alternate_gen_plus_netl():
from electricitylci.model_config import eia_gen_year
import electricitylci.alt_generation as alt_gen
from electricitylci.combinator import (
concat_map_upstream_databases,
concat_clean_upstream_and_plant,
)
import electricitylci.geothermal as geo
... | [
"def combine_upstream_and_gen_df(gen_df, upstream_df):\n\n import electricitylci.combinator as combine\n import electricitylci.import_impacts as import_impacts\n\n print(\"Combining upstream and generation inventories\")\n combined_df = combine.concat_clean_upstream_and_plant(gen_df, upstream_df)\n c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the upstream fuels to the generation dataframe as fuel inputs. | def add_fuels_to_gen(gen_df, fuel_df, canadian_gen, upstream_dict):
from electricitylci.combinator import add_fuel_inputs
print("Adding fuel inputs to generator emissions...")
gen_plus_fuel = add_fuel_inputs(gen_df, fuel_df, upstream_dict)
gen_plus_fuel = pd.concat([gen_plus_fuel, canadian_gen], ignore... | [
"def get_upstream_process_df():\n import electricitylci.coal_upstream as coal\n import electricitylci.natural_gas_upstream as ng\n import electricitylci.petroleum_upstream as petro\n import electricitylci.nuclear_upstream as nuke\n import electricitylci.power_plant_construction as const\n from ele... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the generation dataframe that has been augmented with fuel inputs to a dictionary for conversion to openlca. | def write_gen_fuel_database_to_dict(
gen_plus_fuel_df, upstream_dict, subregion=None
):
from electricitylci.alt_generation import olcaschema_genprocess
if subregion is None:
# subregion = model_specs['regional_aggregation']
#Another change to accomodate FERC consumption pulling BAs.
s... | [
"def gar_dict(df_gar, df_vf):\n \n df_gar['FileName'] = df_gar['FileName'].astype(str)\n df_vf['FileName'] = df_vf['FileName'].astype(str)\n \n df_merge = df_vf.merge(df_gar, on = 'FileName')\n \n select_columns = ['GAR_vf', \"StructuralPeriod\", 'FileName', 'OED Code', 'ID_set']\n \n df ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reverse vowels in a string. Characters which are not vowels do not change position in string, but all vowels (y is not a vowel), should reverse their order. >>> reverse_vowels("Hello!") 'Holle!' >>> reverse_vowels("Tomatoes") 'Temotaos' >>> reverse_vowels("Reverse Vowels In A String") 'RivArsI Vewols en e Streng' rever... | def reverse_vowels(s):
# make a list of vowels then start popping them off the list
# as a vowel is encountered in the string.
VOWELS = "aAeEiIoOuU"
s_vowels = [letter for letter in s if (letter in VOWELS)]
s_out = []
for letter in s:
if (letter in VOWELS):
s_out.append(s... | [
"def reverse_vowels(s):\n vowels = 'aeiou'\n vowelslist = [char for char in s if char.lower() in vowels]\n reverse = vowelslist[::-1]\n stringlist = list(s)\n for i in range(len(stringlist)):\n if stringlist[i].lower() in vowels:\n stringlist[i] = reverse[0]\n reverse.pop... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to get a list containing the distinct letters from the data | def get_distinct_letters(data):
dist_letters = []
for word in data.word_lst:
for letter in word:
if letter not in dist_letters:
dist_letters.append(letter)
for letter in data.result:
if letter not in dist_letters:
dist_letters.append(letter)
return... | [
"def unique_characters(data):\n chars = []\n\n for text in data:\n chars_current = list(dict.fromkeys(text))\n chars = list(dict.fromkeys(chars + chars_current))\n\n return np.array(chars)",
"def get_alphabet() -> List:\n return list(string.ascii_lowercase)",
"def get_unique_chars(text... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return backend name used for this job | def backend_name(self):
return self._backend_name | [
"def backend_name():\n return backend()['name']",
"def backend_name(self):",
"def backend_name(self) -> str:\n c_name = self.properties.get(\"backend_name\")\n if c_name is None:\n raise ParameterError(\n f\"Adsorbate '{self.name}' does not have a property named \"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the view of the board from the perspective of a particular player. | def player_view(self, player: Player) -> torch.Tensor:
return self.board if player == self.active_player else self.board[(1, 0), :, :] | [
"def project(self, win_width, win_height, vision_field=512, viewer_dist=5):\n factor = vision_field / (viewer_dist + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)",
"def getView(self):\n camera = self.ren.GetActiv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write log file content to cloud logging | def _write_to_cloud_logging(log_id, log_file_path):
# TODO(ethanbao): Turn conductor into a python object so that the logging
# client can be instance variable not global variable.
global CLOUD_LOGGING_CLIENT
if not CLOUD_LOGGING_CLIENT:
CLOUD_LOGGING_CLIENT = cloud_logging.Client()
cloud_lo... | [
"def _write_log(self,level,msg):\n with open(self.file_name,'a') as log_file:\n log_file.write(\"[{0}] {1}\\n\".format(level,msg))",
"def write_to_logs(self, data):\n time_now = str(datetime.now())\n time_now = time_now[:time_now.index(\".\")]\n try:\n with open(f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute the remote artman tasks. It execute the artman command with a customized artman user config and additional pipeline arguments. | def _execute_task(artman_user_config, task):
task_payload = base64.b64decode(task['pullTaskTarget']['payload'])
artman_args = task_payload.decode("utf-8").split(' ')
artman_args.append('--user-config')
artman_args.append(artman_user_config)
main.main(*artman_args) | [
"def cli_cmd(host, module, args):\n if len(args) > 1:\n args = [' '.join(args)]\n\n command = ['ansible', host, '-m', module, '-a'] + args\n process = LocalExec(command)\n process.run_and_wait()\n return process",
"def execute(self, env, args):\n\n tasks = env.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare the temporary folder to task execution. It downloads the googleapis repo and adds a onetime artman config yaml. | def _prepare_dir(source_repo="https://github.com/googleapis/googleapis.git"):
task_id = str(uuid.uuid4())[0:8]
repo_root = '/tmp/artman/%s' % task_id
logger.info('Prepare a temporary root repo: %s' % repo_root)
try:
os.makedirs(repo_root)
except OSError as e:
raise e
logger.info... | [
"def setup_environment():\n\trootFolder = get_root_folder()\n\tfolderRawJSON = rootFolder + '/data/raw/json/'\n\tfolderRawXML = rootFolder + '/data/raw/xml/'\n\tfolderRawCSV = rootFolder + '/data/raw/csv/'\n\tfolderSQLite3 = rootFolder + '/data/sqlite3/'\n\tif not os.path.exists(folderRawCSV):\n\t\tos.makedirs(fold... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shows if robot is still dying, or already dead. | def dying(self):
return len(self.death_sprites) > int(self.death_sprite_current) >= 0 | [
"def checkDead(self):\n if self.char.y > self.platforms[self.lowestPlatIndex].y + 1000 or \\\n self.char.health <= 0:\n self.char.lives -= 1\n self.char.hasDied = True\n self.gameWindow.fill(BLACK)\n lifeWord = \"\"\n # Grammar for the win\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks the field's data array is broadcastable to a shape. | def _is_broadcastable(self, shape):
shape0 = getattr(self, "shape", None)
if shape is None:
return False
shape1 = shape
if tuple(shape1) == tuple(shape0):
# Same shape
return True
ndim0 = len(shape0)
ndim1 = len(shape1)
if no... | [
"def _check_can_broadcast_to(shape, target_shape):\n ndim = len(shape)\n ndim_target = len(target_shape)\n if ndim > ndim_target:\n return False\n for i, j in zip(reversed(shape), reversed(target_shape)):\n if i not in (1, j):\n return False\n return True",
"def _is_broadca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the given axes to their positions in the data. Any domain axes that are not spanned by the data are ignored. If there is no data then an empty list is returned. | def _axis_positions(self, axes, parse=True, return_axes=False):
data_axes = self.get_data_axes(default=None)
if data_axes is None:
return []
if parse:
axes = self._parse_axes(axes)
axes = [a for a in axes if a in data_axes]
positions = [data_axes.index(a... | [
"def data_to_axes(self, x, y):\n # pylint: disable=E0633 # unpacking-non-sequence\n x_geo, y_geo = self.data_to_geo(x, y)\n x, y = self.geo_to_axes(x_geo, y_geo)\n return (x, y)",
"def axes_to_data(self, x, y):\n # pylint: disable=E0633 # unpacking-non-sequence\n x_geo,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implement binary arithmetic and comparison operations on the master data array with metadataaware broadcasting. It is intended to be called by the binary arithmetic and comparison methods, such as `__sub__`, `__imul__`, `__rdiv__`, `__lt__`, etc. | def _binary_operation(self, other, method):
debug = is_log_level_debug(logger)
if isinstance(other, Query):
# --------------------------------------------------------
# Combine the field with a Query object
# --------------------------------------------------------
... | [
"def _binary_op(self, operator, other):\n if isinstance(other, list):\n other = np.asarray(other)\n if isinstance(other, self.__class__):\n if self.check_dimensions(other):\n inner_res = operator(self._raw_ws, other._raw_ws)\n else:\n rais... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Changes the axes of the field's cell methods so they conform. | def _conform_cell_methods(self):
axis_map = {}
for cm in self.cell_methods(todict=True).values():
for axis in cm.get_axes(()):
if axis in axis_map:
continue
if axis == "area":
axis_map[axis] = axis
... | [
"def _update_cell_methods(\n self,\n method=None,\n domain_axes=None,\n input_axes=None,\n within=None,\n over=None,\n verbose=None,\n ):\n debug = is_log_level_debug(logger)\n\n original_cell_methods = self.cell_methods(todict=True)\n if debu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Conforms the field with another, ready for data broadcasting. Note that the other field, other, is not changed inplace. | def _conform_for_data_broadcasting(self, other):
other = self._conform_for_assignment(other, check_coordinates=True)
# Remove leading size one dimensions
ndiff = other.ndim - self.ndim
if ndiff > 0 and set(other.shape[:ndiff]) == set((1,)):
for i in range(ndiff):
... | [
"def __ior__(self, other):\r\n self.update(other)\r\n return self",
"def _binary_operation(self, other, method):\n debug = is_log_level_debug(logger)\n\n if isinstance(other, Query):\n # --------------------------------------------------------\n # Combine the fiel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate area weights from X and Y dimension coordinate constructs. | def _weights_area_XY(
self,
comp,
weights_axes,
auto=False,
measure=False,
radius=None,
methods=False,
):
xkey, xcoord = self.dimension_coordinate(
"X", item=True, default=(None, None)
)
ykey, ycoord = self.dimension_coordin... | [
"def polygon_area(x, y):\r\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))",
"def get_area(x_array, y_array):\n dist_x = max(x_array) - min(x_array)\n dist_y = max(y_array) - min(y_array)\n return dist_x * dist_y",
"def AreaForShape(shape):\n total_A = 0\n\n for polygon ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a scalar field of weights with long_name ``'weight'``. | def _weights_field_scalar(self, methods=False):
data = Data(1.0, "1")
f = type(self)()
f.set_data(data, copy=False)
f.long_name = "weight"
f.comment = f"Weights for {self!r}"
return f | [
"def _weight_readout(weight: _sb.ArrayLike) -> la.lnarray:\n if weight is None:\n return None\n return np.unique(weight, return_inverse=True)[1].astype(int)",
"def weight_field(self, f):\n if f is None:\n raise ValueError(\"Field name cannot be None.\")\n self._assert_ml_fiel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates volume weights for polygon geometry cells. | def _weights_geometry_volume(
self,
comp,
weights_axes,
auto=False,
measure=False,
radius=None,
great_circle=False,
methods=False,
):
axis, aux_X, aux_Y, aux_Z = self._weights_yyy(
"polygon", methods=methods, auto=auto
)
... | [
"def weighted_bin_volumes(self, attach_units=True):\n meshgrid = self.meshgrid(entity='weighted_bin_widths', attach_units=False)\n volumes = reduce(mul, meshgrid)\n if attach_units:\n # NOTE we use the units from `weighted_bin_widths` because these\n # can be different fro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scale the weights so that they are <= scale. | def _weights_scale(self, w, scale):
if scale is None:
return w
if scale <= 0:
raise ValueError(
"Can't set 'scale' parameter to a negative number. "
f"Got {scale!r}"
)
w = w / w.max()
if scale != 1:
w = w *... | [
"def scale_weight(w, scale=2.):\n\treturn scale * np.abs(w)",
"def applyScale(self, scale):\n pass",
"def adjust_weight(self, new_weight):\n self.weight = new_weight",
"def normalize_weight(w):\n return w.numpy() / np.linalg.norm(w.numpy())",
"def mutate_weight(self):\n self.weig... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A `Flags` object containing selfdescribing CF flag values. Stores the `flag_values`, `flag_meanings` and `flag_masks` CF properties in an internally consistent manner. Examples >>> f.Flags | def Flags(self):
try:
return self._custom["Flags"]
except KeyError:
raise AttributeError(
f"{self.__class__.__name__!r} object has no attribute 'Flags'"
) | [
"def flag_values(self):\n try:\n return self.Flags.flag_values\n except AttributeError:\n raise AttributeError(\n f\"{self.__class__.__name__!r} doesn't have CF property \"\n \"'flag_values'\"\n )",
"def getFlags(self):\n\n # Gene... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A numpy array view of the data array. Deprecated at version 3.14.0. Data are now stored as `dask` arrays for which, in general, a numpy array view is not robust. Changing the elements of the returned view changes the data array. | def varray(self):
_DEPRECATION_ERROR_ATTRIBUTE(
self,
"varray",
message="Data are now stored as `dask` arrays for which, "
"in general, a numpy array view is not robust.",
version="3.14.0",
removed_at="5.0.0",
) # pragma: no cover | [
"def numpy_array(self):\n result = self._data.view()\n result.setflags(write=False)\n return result",
"def numpy_array(self, data):\n self._data = data.view()\n self._data_needs_writeback = True\n data.setflags(write=False)",
"def data(self):\n if self._data is N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag_values CF property. Provides a list of the flag values. Use in conjunction with | def flag_values(self):
try:
return self.Flags.flag_values
except AttributeError:
raise AttributeError(
f"{self.__class__.__name__!r} doesn't have CF property "
"'flag_values'"
) | [
"def GetAllFlags(self):\n return self.flags.values()",
"def get_util_flag_choices(self):\n return list(self.handler_flags.keys())",
"def values(self) -> List[CatalogDataProductCustomOptionValuesInterface]:\n return self._values",
"def allowed_values(cls: Type[_ActBlockStyle]) -> List[str]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag_masks CF property. Provides a list of bit fields expressing Boolean or enumerated | def flag_masks(self):
try:
return self.Flags.flag_masks
except AttributeError:
raise AttributeError(
f"{self.__class__.__name__!r} doesn't have CF property "
"'flag_masks'"
) | [
"def GetAllFlags(self):\n return self.flags.values()",
"def parse(cls, flags):\n masks = []\n remain_flags = flags\n # pylint - Non-iterable value cls is used in an iterating context\n for flag in cls: # pylint: disable=E1133\n if flags & flag.value:\n rem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag_meanings CF property. Use in conjunction with `flag_values` to provide descriptive words or phrases for each flag value. If multiword phrases are used to describe the flag values, then the words within a phrase should be connected with underscores. See | def flag_meanings(self):
try:
return " ".join(self.Flags.flag_meanings)
except AttributeError:
raise AttributeError(
f"{self.__class__.__name__!r} doesn't have CF property "
"'flag_meanings'"
) | [
"def check_flags(self, ds):\n ret_val = []\n\n for k, v in ds.dataset.variables.iteritems():\n\n flag_values = getattr(v, \"flag_values\", None)\n flag_masks = getattr(v, \"flag_masks\", None)\n flag_meanings = getattr(v, \"flag_meanings\", None)\n\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Conventions CF property. The name of the conventions followed by the field. See | def Conventions(self):
return self.get_property("Conventions") | [
"def cindex(self):\n return self.short_spec_name.capitalize()",
"def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)",
"def get_constraint_name(self, cons):\n if cons.name is not None:\n ret = cons.name\n else:\n ret ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The featureType CF property. The type of discrete sampling geometry, such as ``point`` or | def featureType(self):
return self.get_property("featureType", default=AttributeError()) | [
"def get_type(self):\n return FeatureType.VALUE",
"def get_feature_type(\n dtype):\n return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind)",
"def get_type(self):\n return FeatureType.VALUE\n # pretty sure its never interpreter type\n # TODO: think about that",
"def feature_type... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The institution CF property. Specifies where the original data was produced. See | def institution(self):
return self.get_property("institution") | [
"def _xml_institution(self):\n institution = E.institution(E.institution_name(self.institution_name))\n\n if self.institution_acronym:\n institution.append(E.institution_acronym(self.institution_acronym))\n\n if self.institution_place:\n institution.append(E.institution_pl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The references CF property. Published or webbased references that describe the data or methods used to produce it. See | def references(self):
return self.get_property("references") | [
"def dam_cfm_reference_properties(self) -> ConfigNodePropertyArray:\n return self._dam_cfm_reference_properties",
"def references(self):\n return ( rd for rd in ReferenceDatum.all() if self == rd.property )",
"def getRefs(self, **kwargs):\n return []",
"def references(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The standard_error_multiplier CF property. If a data variable with a `standard_name` modifier of ``'standard_error'`` has this attribute, it indicates that the values are the stated multiple of one standard error. See | def standard_error_multiplier(self):
return self.get_property("standard_error_multiplier") | [
"def error_rate(self):\n return None",
"def get_combined_error(self):\n return np.sqrt(self.gauss_error ** 2 + self.max_error ** 2)",
"def std_penalty(x, multiplier=1.0):\n x = x.reshape(-1, x.shape[-1])\n mean = jnp.mean(x, axis=0)\n var = ((x - mean)**2).mean(0)\n # L2 loss on std < 1.0\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all of the CFAnetCDF file name substitutions. | def cfa_clear_file_substitutions(
self,
):
out = super().cfa_clear_file_substitution()
for c in self.constructs.filter_by_data(todict=True).values():
out.update(c.cfa_clear_file_substitutions())
return out | [
"def clean_filename(filename, replace='', acceptable_chars=('.', '_')):\n return \"\".join([c if c.isalpha() or c.isdigit() or c in acceptable_chars else replace for c in filename])",
"def clean_sontek_files(self):\r\n\r\n file_list = copy.deepcopy(self.files)\r\n for filename in file_list:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a file location inplace. All data definitions that reference files will have references to files in the given location removed from them. | def del_file_location(
self,
location,
constructs=True,
):
location = abspath(location).rstrip(sep)
super().del_file_location(location)
if constructs:
for c in self.constructs.filter_by_data(todict=True).values():
c.del_file_location(locat... | [
"def remove_location(self, location=None):\n raise NotImplementedError",
"def remove_file(fname):\r\n os.remove(fname)\r\n __remove_pyc_pyo(fname)",
"def remove(self, file):\n pass",
"def delete_data_file(fname):\n targ = os.path.join(os.path.dirname(__file__), 'data', fname)\n if os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the radius of a latitudelongitude plane defined in spherical polar coordinates. The radius is taken from the datums of any coordinate reference constructs, but if and only if this is not possible then a default value may be used instead. | def radius(self, default=None):
radii = []
for cr in self.coordinate_references(todict=True).values():
r = cr.datum.get_parameter("earth_radius", None)
if r is not None:
r = Data.asdata(r)
if not r.Units:
r.override_units("m", i... | [
"def radius(self):\n return self.get_planet_radius(unit='Rjup')",
"def get_radius(self):\n return np.degrees(self.bounding_circle[2])",
"def sphere_radius(self) -> float:\n return self.GetSphereRadius()",
"def getRadius(self) -> \"float\":\n return _coin.SbSphere_getRadius(self)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Map the axis identifiers of the field to their equivalent axis identifiers of another. | def map_axes(self, other):
s = self.analyse_items()
t = other.analyse_items()
id_to_axis1 = t["id_to_axis"]
out = {}
for axis, identity in s["axis_to_id"].items():
if identity in id_to_axis1:
out[axis] = id_to_axis1[identity]
return out | [
"def map_to_axes(self, x, y):\n # pylint: disable=E0633 # unpacking-non-sequence\n x_geo, y_geo = self.map_to_geo(x, y)\n x, y = self.geo_to_axes(x_geo, y_geo)\n return (x, y)",
"def axis(self, key):\n raise NotImplementedError(\"Axis depends on indexing convention of \"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Close all files referenced by the field construct. Deprecated at version 3.14.0. All files are now automatically closed when not being accessed. Note that a closed file will be automatically reopened if its contents are subsequently required. | def close(self):
_DEPRECATION_ERROR_METHOD(
self,
"close",
"All files are now automatically closed when not being accessed.",
version="3.14.0",
removed_at="5.0.0",
) # pragma: no cover | [
"def close_files(self):\n\t\tpass",
"def close(self):\n for f in self.fs: f.close()",
"def close(self):\n # Free memory as best we can\n del self._file\n self._file = None",
"def _close_file(self):\n self._iostream.close()\n self._fileobj.close()",
"def close(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the specified axis is cyclic. | def iscyclic(self, *identity, **filter_kwargs):
axis = self.domain_axis(
*identity, key=True, default=None, **filter_kwargs
)
if axis is None:
raise ValueError("Can't identify unique domain axis")
return axis in self.cyclic() | [
"def iscyclic(self, axis):\n axis = self._parse_axes(axis)\n if len(axis) != 1:\n raise ValueError(\"TODO\")\n\n return axis[0] in self.cyclic()",
"def _get_isCircular(self) -> \"bool\" :\n return _core.EllipticalArc2D__get_isCircular(self)",
"def is_dependency_acyclic(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Join a sequence of fields together. This is different to `cf.aggregate` because it does not account for all metadata. For example, it assumes that the axis order is the same in each field. | def concatenate(
cls, fields, axis=0, cull_graph=False, relaxed_units=False, copy=True
):
if isinstance(fields, cls):
return fields.copy()
field0 = fields[0]
if copy:
out = field0.copy()
if len(fields) == 1:
return out
new_data =... | [
"def papa_join(*fields):\n strings = []\n for field in fields:\n if field:\n if isinstance(field, list):\n strings += field\n else:\n strings.append(field)\n else:\n strings.append('')\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collapse the data values that lie in Ndimensional bins. The data values of the field construct are binned according to how they correspond to the Ndimensional histogram bins of another set of variables (see `cf.histogram` for details), and each bin of values is collapsed with one of the collapse methods allowed by the ... | def bin(
self,
method,
digitized,
weights=None,
measure=False,
scale=None,
mtol=1,
ddof=1,
radius="earth",
great_circle=False,
return_indices=False,
verbose=None,
):
debug = is_log_level_debug(logger)
if... | [
"def _compute_binned_metric(out, y, bins, num_bins, metric_f):\n metric_vals = []\n for bin_idx in range(num_bins):\n mask = bins == bin_idx\n if np.sum(mask) > 0:\n vals = metric_f(out[mask], y[mask])\n metric_vals.append(vals)\n else:\n metric_vals.appen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collapse axes of the field. Collapsing one or more dimensions reduces their size and replaces the data along those axes with representative statistical values. The result is a new field construct with consistent metadata for the collapsed values. By default all axes with size greater than 1 are collapsed completely (i.... | def collapse(
self,
method,
axes=None,
squeeze=False,
mtol=1,
weights=None,
ddof=1,
a=None,
inplace=False,
group=None,
regroup=False,
within_days=None,
within_years=None,
over_days=None,
over_years=No... | [
"def _collapse_grouped(\n self,\n method,\n axis,\n within=None,\n over=None,\n within_days=None,\n within_years=None,\n over_days=None,\n over_years=None,\n group=None,\n group_span=None,\n group_contiguous=False,\n mtol=Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implements a grouped collapse on a field. A grouped collapse is one for which an axis is not collapsed completely to size 1. | def _collapse_grouped(
self,
method,
axis,
within=None,
over=None,
within_days=None,
within_years=None,
over_days=None,
over_years=None,
group=None,
group_span=None,
group_contiguous=False,
mtol=None,
ddof=No... | [
"def collapse(\n self,\n method,\n axes=None,\n squeeze=False,\n mtol=1,\n weights=None,\n ddof=1,\n a=None,\n inplace=False,\n group=None,\n regroup=False,\n within_days=None,\n within_years=None,\n over_days=None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepares for a collapse where the group is a data interval. | def _data_interval(
classification,
n,
coord,
interval,
lower,
upper,
lower_limit,
upper_limit,
group_by,
extra_condition=None,
):
group_by_coords = group_by == "coords"
... | [
"def _collapse_grouped(\n self,\n method,\n axis,\n within=None,\n over=None,\n within_days=None,\n within_years=None,\n over_days=None,\n over_years=None,\n group=None,\n group_span=None,\n group_contiguous=False,\n mtol=Non... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the cell methods. | def _update_cell_methods(
self,
method=None,
domain_axes=None,
input_axes=None,
within=None,
over=None,
verbose=None,
):
debug = is_log_level_debug(logger)
original_cell_methods = self.cell_methods(todict=True)
if debug:
lo... | [
"def CellMethods(self):\n raise DeprecationError(\n f\"{self.__class__.__name__} attribute 'CellMethods' has been \"\n \"deprecated at version 3.0.0 and is no longer available \"\n \"and will be removed at v4.0.0. \"\n \"Use 'cell_methods' instead.\"\n )",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create indices that define a subspace of the field construct. The subspace is defined by identifying indices based on the metadata constructs. Metadata constructs are selected by conditions specified on their data. Indices for subspacing are then automatically inferred from where the conditions are met. The returned tu... | def indices(self, *mode, **kwargs):
if "exact" in mode:
_DEPRECATION_ERROR_ARG(
self,
"indices",
"exact",
"Keywords are now never interpreted as regular expressions.",
version="3.0.0",
removed_at="4.0.0",... | [
"def isel(self, idx=None, axis=0, **kwargs) -> \"DataArray\":\n if isinstance(self.geometry, Grid2D) and (\"x\" in kwargs and \"y\" in kwargs):\n idx_x = kwargs[\"x\"]\n idx_y = kwargs[\"y\"]\n return self.isel(x=idx_x).isel(y=idx_y)\n for dim in kwargs:\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute nonparametric vertical coordinates. When vertical coordinates are a function of horizontal location as well as parameters which depend on vertical location, they cannot be stored in a vertical dimension coordinate construct. In such cases a parametric vertical dimension coordinate construct is stored and a coor... | def compute_vertical_coordinates(
self, default_to_zero=True, strict=True, inplace=False, verbose=None
):
f = _inplace_enabled_define_and_cleanup(self)
detail = is_log_level_detail(logger)
debug = is_log_level_debug(logger)
for cr in f.coordinate_references(todict=True).val... | [
"def calculate_vertical_components(self, points, prev_horizontal, prev_vertical, px_size):\n\n if prev_horizontal is None:\n # It's a new species, so it starts at the bottom\n points[1] = 0\n points[3] = 0\n else:\n points[1] = prev_horizontal['quad'].points... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform moving window calculations along an axis. Moving mean, sum, and integral calculations are possible. By default moving means are unweighted, but weights based on the axis cell sizes, or custom weights, may applied to the calculation via the weights parameter. By default moving integrals must be weighted. When ap... | def moving_window(
self,
method,
window_size=None,
axis=None,
weights=None,
mode=None,
cval=None,
origin=0,
scale=None,
radius="earth",
great_circle=False,
inplace=False,
):
method_values = ("mean", "sum", "integ... | [
"def movingsum(x, window, skip=0, axis=-1, norm=False):\n return mov_sum(x, window, skip=skip, axis=axis, norm=norm)",
"def window(w, func=hanning, axis=0): ###\n if isnumpyarray(w):\n y = w\n elif hasattr(w, \"y\"):\n w = w.copy()\n y = w.y\n else:\n raise TypeError(\"don... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convolve the field construct along the given axis with the specified filter. The magnitude of the integral of the filter (i.e. the sum of the window weights defined by the window parameter) affects the convolved values. For example, window weights of ``[0.2, 0.2 0.2, 0.2, 0.2]`` will produce a nonweighted 5point runnin... | def convolution_filter(
self,
window=None,
axis=None,
mode=None,
cval=None,
origin=0,
update_bounds=True,
inplace=False,
weights=None,
i=False,
):
if weights is not None:
_DEPRECATION_ERROR_KWARGS(
se... | [
"def convolve_complex_1d(\n tensor: tf.Tensor,\n filter: tf.Tensor,\n stride: int = 1,\n padding: str = \"VALID\",\n):\n if tensor.dtype != filter.dtype:\n raise ValueError(\"`tensor` and `filter` must have same dtype got `{}`\"\n \"\".format([tensor.dtype, filter.dtype]))\n fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the field cumulatively summed along the given axis. The cell bounds of the axis are updated to describe the range over which the sums apply, and a new "sum" cell method construct is added to the resulting field construct. | def cumsum(
self, axis, masked_as_zero=False, coordinate=None, inplace=False
):
# TODODASKAPI
if masked_as_zero:
_DEPRECATION_ERROR_KWARGS(
self,
"cumsum",
{"masked_as_zero": None},
message="",
versio... | [
"def cumsum(self, axis=0):\n return self.apply(lambda x: x.cumsum(), axis=axis)",
"def cumsum(x, axis=0):\n\treturn tf.cumsum(x, axis=axis)",
"def cumsum(a, axis=None, dtype=None, out=None):\n return _math.scan_core(a, axis, _math.scan_op.SCAN_SUM, dtype, out)",
"def cumsum(self, dim, dtype=None): #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the indices of the maximum values along an axis. If no axis is specified then the returned index locates the maximum of the whole data. In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. Performance If the data index is returned as a `tuple` (se... | def argmax(self, axis=None, unravel=False):
if axis is not None:
axis = self.domain_axis(axis, key=True)
axis = self.get_data_axes().index(axis)
return self.data.argmax(axis=axis, unravel=unravel) | [
"def extract_max_inds(grid, axis=-1):\n\n # get shape of `grid` without `axis`\n shape = np.delete(np.asarray(grid.shape), axis)\n\n # get indices to index maximum values along `axis`\n iind = np.meshgrid(*(range(f) for f in shape[::-1]))\n if len(iind) > 1:\n iind = [iind[1], iind[0], *iind[2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the indices of the minimum values along an axis. If no axis is specified then the returned index locates the minimum of the whole data. In case of multiple occurrences of the minimum values, the indices corresponding to the first occurrence are returned. Performance If the data index is returned as a `tuple` (se... | def argmin(self, axis=None, unravel=False):
if axis is not None:
axis = self.domain_axis(axis, key=True)
axis = self.get_data_axes().index(axis)
return self.data.argmin(axis=axis, unravel=unravel) | [
"def min(self,*,axis=1):\n try:\n mins = np.amin(self.data,axis=axis).squeeze()\n if mins.size == 1:\n return np.asscalar(mins)\n return mins\n except ValueError:\n raise ValueError(\"Empty RegularlySampledAnalogSignalArray cannot calculate mi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Permute the axes of the data array. By default the order of the axes is reversed, but any ordering may be specified by selecting the axes of the output in the required order. By default metadata constructs are not transposed, but they may be if the constructs parameter is set. | def transpose(
self,
axes=None,
constructs=False,
inplace=False,
items=True,
i=False,
**kwargs,
):
if not items:
_DEPRECATION_ERROR_KWARGS(
self,
"transpose",
{"items": items},
... | [
"def flip_axes(data, perms, flips):\n data = np.transpose(data, perms)\n for axis in np.nonzero(flips)[0]:\n data = nib.orientations.flip_axis(data, axis)\n return data",
"def __rearrange_axes(self, new_dataset, new_order = None):\n new_dataset._axes = {}\n\n\n for i,dim in enumerate... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the position in the data of a domain axis construct. | def domain_axis_position(self, *identity, **filter_kwargs):
key = self.domain_axis(*identity, key=True)
return self.get_data_axes().index(key) | [
"def _get_origin(self, axis):\n return axis.values[0]",
"def index_of(self, axis):\n return _ffi_api.LayoutIndexOf(self, axis) # type: ignore",
"def getPosOnAxis(axSlope,axX,axY,dataX,dataY):\n newY = axY + (axSlope**2 * dataY) - axSlope*(axX - dataX)\n newY /= axSlope**2 + 1\n newX = (axS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the size of a domain axis construct. | def axis_size(
self, *identity, default=ValueError(), axes=None, **filter_kwargs
):
if axes:
_DEPRECATION_ERROR_KWARGS(
self,
"axis_size",
"Use keyword 'identity' instead.",
version="3.0.0",
removed_at="4.0.0... | [
"def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector\": 3,\n \"negative particle\": 5,\n \"positive particle\": 7,\n \"negative electrode\": 11,\n \"separator\": 13,\n \"positive electrode\": 17,\n \"negative particle size\": 19,\n \"pos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand the field construct by adding a halo to its data. The halo may be applied over a subset of the data dimensions and each dimension may have a different halo size (including zero). The halo region is populated with a copy of the proximate values from the original data. The metadata constructs are similarly extende... | def halo(
self,
depth,
axes=None,
tripolar=None,
fold_index=-1,
inplace=False,
verbose=None,
size=None,
):
f = _inplace_enabled_define_and_cleanup(self)
# TODODASKAPI
if size is not None:
_DEPRECATION_ERROR_KWARGS(
... | [
"def halo(\n self,\n size,\n axes=None,\n tripolar=None,\n fold_index=-1,\n inplace=False,\n verbose=None,\n ):\n _kwargs = [\"{}={!r}\".format(k, v) for k, v in locals().items()]\n _ = \"{}.halo(\".format(self.__class__.__name__)\n logger.inf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute percentiles of the data along the specified axes. The default is to compute the percentiles along a flattened version of the data. If the input data are integers, or floats smaller than float64, or the input data contains missing values, then output data type is float64. Otherwise, the output data type is the s... | def percentile(
self,
ranks,
axes=None,
method="linear",
squeeze=False,
mtol=1,
interpolation=None,
):
# TODODASKAPI: interpolation -> method
if interpolation is not None:
_DEPRECATION_ERROR_KWARGS(
self,
... | [
"def percentile_per_dim(x, q):\n out = np.zeros(x.shape[1])\n for i in range(len(out)):\n out[i] = np.percentile(x[:,i], q)\n return out",
"def calculate_percentile(data_list, percentile):\n return numpy.percentile(data_list, percentile)",
"def calculate_percentiles(self):\n self.perce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flatten axes of the field. Any subset of the domain axes may be flattened. The shape of the data may change, but the size will not. Metadata constructs whose data spans the flattened axes will either themselves be flattened, or else removed. Cell method constructs that apply to the flattened axes will be removed or, if... | def flatten(self, axes=None, return_axis=False, inplace=False):
f = _inplace_enabled_define_and_cleanup(self)
data_axes = self.get_data_axes()
if axes is None:
axes = data_axes
else:
if isinstance(axes, (str, int)):
axes = (axes,)
ax... | [
"def reshape_flat_field(self, f = None):\n\n if f is None:\n if self.data.ndim == 2:\n new_shape = [self.data.shape[0]] + list((self.lats.shape[0], self.lons.shape[0]))\n self.data = np.reshape(self.data, new_shape)\n else:\n raise Exception(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Roll the field along a cyclic axis. A unique axis is selected with the axes and kwargs parameters. | def roll(self, axis, shift, inplace=False, i=False, **kwargs):
# TODODASK: Consider allowing multiple roll axes, since Data
# now supports them.
axis = self.domain_axis(
axis,
key=True,
default=ValueError(
f"Can't roll: Bad axis spec... | [
"def roll(self, iaxis, shift, inplace=False, i=False):\n return self._apply_data_oper(\n _inplace_enabled_define_and_cleanup(self),\n \"roll\",\n (iaxis, shift),\n inplace=inplace,\n i=i,\n )",
"def incrementalRotateBy(axis, angle):\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |