query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Prepare the files and organizes the directories Returns a dictionary of directory keys and their respective locations
def prepare_directories() -> dict: original_dataset_dir = os.path.join(data_dir, 'original') original_train_dir = os.path.join(original_dataset_dir, 'train') original_test_dir = os.path.join(original_dataset_dir, 'test1') base_dir = os.path.join(data_dir, 'cats_and_dogs_small') train_dir = os.path....
[ "def file_folder_specs():\n\n root = 'D:\\KST\\proj\\template\\template'\n files_folders = {\n 'root' : root,\n #'data' : os.path.abspath(\n #os.path.join(root, 'data')\n #)\n }\n\n # we can also check for existence here to put everything in place\n #if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy batches files from one directory to another The file names to be copied must fit a mask with numbers (start and end)
def copy_files_mask(mask: str, start: int, end: int, from_dir: str, to_dir: str): fnames = [mask.format(i) for i in range(start, end)] for fname in fnames: src = os.path.join(from_dir, fname) dst = os.path.join(to_di...
[ "def extend(source_filename, target_filename, batch_size=1000):\n\n with MBtiles(target_filename, \"r+\") as target, MBtiles(source_filename) as source:\n for batch in source.list_tiles_batched(batch_size):\n tiles_to_copy = [tile for tile in batch if not target.has_tile(*tile)]\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms an ECEF position into an ECI position.
def ecef2eci(r_ecef, GMST): DCM = ROT3(-GMST) # Rotation matrix r_eci = DCM.dot(r_ecef) return r_eci
[ "def ecef2eci(R_ECEF,time): \n #\n # T is the Julian Date in julian centuries\n #\n d = time - 2451545.0;\n T = d/ 36525;\n #\n # Compute Greenwich Mean sidereal Time (in hours)\n #\n GMST = 2*np.pi*(0.7790572732640 + 1.00273781191125448*d)\n # \n # Compute Rotation Matr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes Azimuth (angle from north measured to east), Elevation, and range of the position r_ecef with respect to a reference r_local.
def ecef2AzElRange(r_ecef, r_local, R_eq, e_planet): tol = 0.001 * np.pi/180.0 # Tolerance (0.001 deg) lla = ecef2lla(r_local, R_eq, e_planet, tol) # Compute Latitude, Longitude, Altitude r_sez = ecef2sez(r_ecef, lla[0], lla[1], lla[2], R_eq, e_planet) azElRange = sez2AzElRange(r_sez) return azE...
[ "def _az_alt( self, lat, lon, utc_offset, sun ):\n # Sun's Mean Longitude, L, gives us GMT at midnight.\n # GMST0 = (L + 180)/15\n GMST0 = ((sun.L+180)/15) % 24\n\n # Local Sidereal Time = GMST0 + UT + LON/15\n self.LST = GMST0 + utc_offset + lon/15\n\n # Hour Angle (in deg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms ECEF position into SEZ (South, East, Zenith) using LLA of a reference position and an ellipsoid model for the planet.
def ecef2sez(r_ecef, latitude, longitude, altitude, R_eq, e_planet): r_site = lla2ecef(latitude, longitude, altitude, R_eq, e_planet) r_sez = ROT2(np.pi/2-latitude).dot(ROT3(longitude)).dot(r_ecef-r_site) return r_sez
[ "def geodetic2ecef(lon, lat, alt=0):\n lat = np.radians(lat)\n lon = np.radians(lon)\n xi = np.sqrt(1 - ESQ * np.sin(lat))\n x = (A / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (A / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * np.sin(lat)\n return x, y, z", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms the SEZ position (SouthEastZenith) into Azimuth, Elevation, Range.
def sez2AzElRange(r_sez): range = np.linalg.norm(r_sez) rx = r_sez[0] ry = r_sez[1] rz = r_sez[2] elevation = np.arcsin(rz/range) azimuth = np.arctan2(ry, -rx) if azimuth < 0: azimuth = azimuth + 2*np.pi return np.array([azimuth, elevation, range])
[ "def convert_coords(self, stz):\n return np.array(\n [stz[0], np.mod(stz[1], 2.0 * np.pi), np.mod(stz[2], 2.0 * np.pi)],\n dtype=np.float64,\n )", "def seg_z_range(self,t_secs):\n vols=self.volumes(t_secs)\n areas=self.planform_areas().data # a top-down area for e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tranforms from ECI to right ascension, declination, and range.
def eci2RightAscensionDeclinationRange(r_eci): x = r_eci[0] y = r_eci[1] z = r_eci[2] r_xy = np.sqrt(x**2+y**2) r = np.sqrt(x**2+y**2+z**2) rightAs = np.arctan2(y, x) dec = np.arctan2(z,r_xy) # declination is between -90 and 90 return np.array([rightAs, dec, r])
[ "def read_euler(self):\n data = self.bus.read_i2c_block_data(self.address, 0x1A, 6)\n return self.parse_axis(data, 16)", "def prove_range(amount, last_mask=None):\n C, a, R = tcry.gen_range_proof(amount, last_mask)\n\n # Trezor micropython extmod returns byte-serialized/flattened rsig\n nrs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the time derivatives of a unit vector given by a/a_norm.
def computeUnitVectorDerivatives(a, a_dot, a_ddot = None): a_inner = np.inner(a,a) a_norm = np.sqrt(a_inner) a_outer = np.outer(a,a) a_dot_inner = np.inner(a_dot, a_dot) a_dot_outer = np.outer(a_dot, a_dot) r = a/a_norm r_dot = a_dot/a_norm - a_outer.dot(a_dot)/...
[ "def _derivadot(self, a):\n #verified correct by putting 5 different a's into mathematica and comparing.\n numerator = - (self._Om) + 2 * (1 - self._Om) * (a ** 3)\n denominator = 2 * np.sqrt((a ** 3) * (self._Om) + (1 - self._Om) * (a ** 6))\n return numerator/denominator", "def DtDt(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 1st axis by an Euler Angle alpha
def ROT1(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[1, 0, 0], [0, cos_al, sin_al], [0, -sin_al, cos_al]]) return DCM
[ "def rotate(self,alpha):\n\n alpha=alpha*(np.pi/180.0)\n return Point(self.x*np.cos(alpha)-self.y*np.sin(alpha),self.y*np.cos(alpha)+self.x*np.sin(alpha))", "def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 2nd axis by an Euler Angle alpha
def ROT2(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[cos_al, 0, -sin_al], [0, 1, 0], [sin_al, 0, cos_al]]) return DCM
[ "def rotate(self,alpha):\n\n alpha=alpha*(np.pi/180.0)\n return Point(self.x*np.cos(alpha)-self.y*np.sin(alpha),self.y*np.cos(alpha)+self.x*np.sin(alpha))", "def euler(ex, ey, ez, angl):\n\n s = math.sqrt(ex**2 + ey**2 + ez**2)\n ex = ex/s\n ey = ey/s\n ez = ez/s\n beta = math.acos(ez...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Basic Rotation through 3rd axis by an Euler Angle alpha
def ROT3(alpha): cos_al = np.cos(alpha) sin_al = np.sin(alpha) DCM = np.array([[cos_al, sin_al, 0], [-sin_al, cos_al, 0], [0, 0, 1]]) return DCM
[ "def rotate(self, alpha):\r\n\r\n if self.z is None:\r\n self._logger.warn('Z array is \"None\" - I cannot rotate that')\r\n return\r\n\r\n # check for iterable list/set of angles - if so, it must have length\r\n # 1 or same as len(tipper):\r\n if np.iterable(alpha)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
As the name says, count the number of wall collisions of n ball in t seconds in the box of dimensions 2s x 2s. To avoid division by zero, vx and vy should be checked if they are zero. Function basically counts the collisions for vx and vy separately, as all collisions are elastic
def count_collisions(point_vector, n, k, t, s): num_of_collisions = 0 point_col = [] for i in range(n): curr_col = 0 time_x, time_y = t, t x, y, vx, vy = point_vector[i] if vx != 0: if vx > 0: line_len = s - x time_x -= line_len / a...
[ "def count_obstacles_in_my_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_enemy_portals():\n if portal.distance(elf) + portal.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates "the beginning of time" by getting the average of time it takes for all points to travel back to their origin and rounding it up
def calculate_the_beginning(point_vector, n): avg_time = 0 for i in range(n): s = np.sqrt(point_vector[i, 0] ** 2 + point_vector[i, 1] ** 2) v = np.sqrt(point_vector[i, 2] ** 2 + point_vector[i, 3] ** 2) avg_time += s / v avg_time /= n return round(avg_time)
[ "def average_time(self):\n return int((sum(self.times) / max(1, len(self.times))) * 1000)", "def total_time(self):\n t = timedelta()\n for step in self.steps:\n if ('time' in step):\n t += self.parsetime(step['time'])\n return(t)", "def get_travel_time_in_mi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check CloudFormation Ref/GetAtt for Conditions
def match(self, cfn): matches = [] # Start with Ref checks ref_objs = cfn.search_deep_keys('Ref') for ref_obj in ref_objs: value = ref_obj[-1] if value not in PSEUDOPARAMS: scenarios = cfn.is_resource_available(ref_obj, value) for...
[ "def _check_rule_has_attribute(self, data_sources, conditions):\n return hasattr(data_sources['asset'], conditions['attribute']) and \\\n getattr(data_sources['asset'], conditions['attribute']) is not None", "def is_ca_external(self, obj_dict):\n return (self.endpoint == objects.get_singular(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Objects should not be automatically associated with a particular site when ``PHOTOLOGUE_MULTISITE`` is ``True``.
def test_auto_add_sites(self): with self.settings(PHOTOLOGUE_MULTISITE=False): gallery = GalleryFactory() photo = PhotoFactory() self.assertEqual(list(gallery.sites.all()), [self.site1]) self.assertEqual(list(photo.sites.all()), [self.site1]) photo.delete() ...
[ "def is_singleton(item):\n return isinstance(item, Item) and not item.album_id", "def check_magento_structure(self):\n for backend in self:\n websites = backend.website_ids\n if not websites:\n backend.synchronize_metadata()\n return True", "def test_exclude...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to return calibrated values, this is done when the calibration is done
def Calibrated(self): peaklist = self.PeakValues.copy() try: peaklist = np.array(peaklist) * self.k return peaklist except Exception as E: raise E
[ "def perform_ground_calibration(self): \n zero = 0\n noise = 0\n #TODO implement\n return zero, noise", "def get_camera_calibration_values():\n\tcalibration_images = glob.glob('./camera_cal/calibration*.jpg')\n\treturn __calibrate_camera(calibration_images)", "def getCalibrate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the vrrp_port (instance port) in case nova didn't This can happen if a failover has occurred.
def deallocate_vip(self, vip): for amphora in six.moves.filter(self._filter_amphora, vip.load_balancer.amphorae): try: self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, ...
[ "def deallocate_vip(self, vip):\n try:\n for amphora in vip.load_balancer.amphorae:\n try:\n self.network_proxy.delete_port(amphora.vrrp_port_id)\n except os_exceptions.ResourceNotFound:\n LOG.debug(\n 'VIP ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Waits for the amphora ports device_id to be unset. This method waits for the ports on an amphora device_id parameter to be '' or None which signifies that nova has finished detaching the port from the instance.
def wait_for_port_detach(self, amphora): interfaces = self.get_plugged_networks(compute_id=amphora.compute_id) ports = [] port_detach_timeout = CONF.networking.port_detach_timeout for interface_ in interfaces: port = self.get_port(port_id=interface_.port_id) ips ...
[ "def detach_and_delete_ports(connection, node, created_ports, attached_ports):\n for port_id in set(attached_ports + created_ports):\n LOG.debug('Detaching port %(port)s from node %(node)s',\n {'port': port_id, 'node': _utils.log_res(node)})\n try:\n connection.baremetal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> stair(0) 0 >>> stair(1) 1 >>> stair(2) 2 >>> stair(3) 3 >>> stair(4) 5
def stair(stair_n): def stair_s(stair_num): if stair_num in range(0, 2): return stair_num else: return stair_s(stair_num-1) + stair_s(stair_num-2) if stair_n == 0: return 0 else: return stair_s(stair_n+1)
[ "def stairs(n):\n ### Your code here ###\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return stairs(n-1) + stairs(n-2)", "def stairs(n):\n if n <= 2:\n return n\n if n == 3:\n return 4\n return stairs(n-1) +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert object columns to categorical integers.
def obj_as_cat_int(df, ignore=[]): obj_cols = df.select_dtypes(include='object').columns for col in obj_cols: if col not in ignore: df[col] = df[col].astype('category') df[col] = df[col].cat.codes.astype("int16") df[col] -= df[col].min() return df
[ "def int_categorize(df):\n if \"Dx?\" in df.columns:\n df[\"Dx?\"] = df[\"Dx?\"].fillna(False).astype(bool)\n up = []\n for c in list(df.columns):\n if(str(df[c].dtype) == \"object\"):\n up.append(c)\n dicts = [dict() for u in up]\n df = update_encoding(df, dicts, up, 'catego...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert sales from wide to long format, and merge sales with calendar and prices to create one dataframe.
def melt_and_merge(calendar, prices, sales, submission=False): id_cols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] if submission: last_day = int(sales.columns[-1].replace('d_', '')) sales.drop(sales.columns[6:-MAX_LAG], axis=1, inplace=True) for day in range(last_day...
[ "def index_sales(sale_count):\r\n data['index'] = list(range(sale_count))\r\n \r\n date = 0 \r\n price = 1\r\n \r\n for i in data['index']:\r\n sales['sale_' + str(i)] = [data['sales'][date], data['sales'][price]]\r\n date += 2\r\n price += 2", "def transform(self, X,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize by setting the emotion and cause values
def __init__(self, emotion, cause, tweet, glove_size): self.emotion = emotion self.cause = cause self.tweet = tweet self.glove_size = glove_size
[ "def __init__(self, eman, game, entity_path=None):\n self.eman = eman\n self.game = game\n if entity_path:\n self.update_presets(entity_path)", "def initialize(self):\n if self.particle.params.T < self.particle.decoupling_temperature and not self.particle.in_equilibrium:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate context score with GLoVe embedding
def calc_glove_score(self, context): context_embedding = np.full(self.glove_size, 1.e-28) for word in context: if word in Seed.glove_embeddings.keys(): word_vec = np.array(Seed.glove_embeddings[word]) context_embedding += word_vec return context_embed...
[ "def encode_glove_average(X_train , X_test , embedding_dim , word_index):\r\n import os\r\n import numpy as np\r\n #Embedding the vector in this step\r\n EMBEDDING_DIM = embedding_dim\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim) + \"d.txt\"\r\n FILE_NAME = \"glove.6B.\" + str(embedding_dim)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the context before the given relation emotion or cause
def get_context_before(self, reln1): before = self.tweet.tokens[0:reln1[0].idx-1] if before: self.bef = self.calc_glove_score(before) else: self.bef = np.full(self.glove_size, 1.e-28)
[ "def get_prev_sentence(self, sentence):\n for prev_sentence, sentence_node, relation_type in self.graph.in_edges(sentence[\"id\"], keys=True):\n if relation_type == self.sentence_order_edge_type:\n return self.graph.node[prev_sentence]\n return None", "def context(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Traverses a path to find files matching the specified glob patterns.
def GRRFind(path, patterns): for directory, sub_directories, files in os.walk(path): for pattern in patterns: directory_pattern = os.path.join(directory, pattern) for pattern_match in glob.iglob(directory_pattern): if os.path.isfile(pattern_match): yield pattern_match
[ "def _find_paths(dir_path, file_pattern):\n pattern = os.path.join(dir_path, \"**\", file_pattern)\n return glob.glob(pattern, recursive=True)", "def all_files(pattern, search_path, pathsep=os.pathsep):\r\n for path in search_path.split(pathsep):\r\n for match in glob.glob(os.path.join(path, patte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the package path prefix from the package name.
def GRRGetPackagePrefix(package_name): package_components = package_name.split(".") if len(package_components) > 2: package_path_prefix = os.path.join(package_components[1:]) elif len(package_components) == 2: package_path_prefix = package_components[1] else: package_path_prefix = "" return pack...
[ "def GRRGetRelativeFilename(package_path_prefix, filename):\n if package_path_prefix:\n filename = os.path.relpath(filename, package_path_prefix)\n\n return filename", "def _get_package_name(self, path, root_path):\n\n return path_utils.get_package_name(path, root_path)", "def package_name(self, nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the package path from the package path prefix and sub path.
def GRRGetPackagePath(package_path_prefix, sub_path): if package_path_prefix and sub_path: package_path = os.path.join(package_path_prefix, sub_path) elif sub_path: package_path = sub_path else: package_path = package_path_prefix return package_path
[ "def GRRGetPackagePrefix(package_name):\n package_components = package_name.split(\".\")\n\n if len(package_components) > 2:\n package_path_prefix = os.path.join(package_components[1:])\n elif len(package_components) == 2:\n package_path_prefix = package_components[1]\n else:\n package_path_prefix = \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the filename relative to the package path prefix.
def GRRGetRelativeFilename(package_path_prefix, filename): if package_path_prefix: filename = os.path.relpath(filename, package_path_prefix) return filename
[ "def GRRGetPackagePrefix(package_name):\n package_components = package_name.split(\".\")\n\n if len(package_components) > 2:\n package_path_prefix = os.path.join(package_components[1:])\n elif len(package_components) == 2:\n package_path_prefix = package_components[1]\n else:\n package_path_prefix = \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find data files as defined by the specifications.
def GRRFindDataFiles(data_files_specs): data_files = {} for package_name, sub_paths, patterns in data_files_specs: package_path_prefix = GRRGetPackagePrefix(package_name) package_data_files = [] for sub_path in sub_paths: package_path = GRRGetPackagePath(package_path_prefix, sub_path) fo...
[ "def find_data_files():\n\n if \"freebsd\" in sys.platform:\n manpagebase = pjoin('man', 'man1')\n else:\n manpagebase = pjoin('share', 'man', 'man1')\n\n # Simple file lists can be made by hand\n manpages = [f for f in glob(pjoin('docs','man','*.1.gz')) if isfile(f)]\n if not manpages:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Truncates/pads a float f to n decimal places without rounding
def truncate(f, n): s = '{}'.format(f) if 'e' in s or 'E' in s: return '{0:.{1}f}'.format(f, n) i, p, d = s.partition('.') return '.'.join([i, (d+'0'*n)[:n]])
[ "def truncate_values(f, n=3):\n if not np.isnan(f):\n if type(f) is not np.ndarray:\n s = '{}'.format(f) # convert float to string\n if 'e' in s or 'E' in s:\n return float('{0:.{1}f}'.format(f, n))\n else:\n s = '{}'.format(f[0]) # convert np.ndarr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bans the user, deleting x day worth of messages. reinvite him
async def bansoft(self, ctx, user: discord.Member, *, reason: str, days: int): server = ctx.message.server channel = ctx.message.channel can_ban = channel.permissions_for(server.me).kick_members author = ctx.message.author if author == user: await self.bot.say...
[ "async def ban(ctx, members : commands.Greedy[discord.Member],\n delete_days : typing.Optional[int] = 0, *,\n reason : str):\n for member in members:\n await member.ban(delete_message_days=delete_days, reason=reason)\n await ctx.send(f'Banned {member.mention}')",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables auto deletion of repeated messages
async def delrepeats(self, ctx): server = ctx.message.server if not self.settings[server.id]["delete_repeats"]: self.settings[server.id]["delete_repeats"] = True await self.bot.say("Messages repeated up to 3 times will " "be deleted.") ...
[ "def mail_clear_deleted(self):\n self._dels.clear()", "def delete(self):\n if self.is_deleted:\n return\n if self.is_question:\n self.topic.is_deleted = True\n for tag in self.topic.tags:\n atomic_add(tag, 'tagged', -1)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables auto ban for messages mentioning X different people
async def banmentionspam(self, ctx, max_mentions: int=False): server = ctx.message.server if max_mentions: if max_mentions < 5: max_mentions = 5 self.settings[server.id]["ban_mention_spam"] = max_mentions await self.bot.say("Autoban for mention s...
[ "async def auto_bans(self, ctx):\n config = hf.database_toggle(ctx, self.bot.db['auto_bans'])\n if config['enable']:\n await ctx.send('Enabled the auto bans module. I will now automatically ban all users who join with '\n 'a discord invite link username or who joi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes channel from ignore list Defaults to current one
async def unignore_channel(self, ctx, channel: discord.Channel=None): current_ch = ctx.message.channel server = ctx.message.server if not channel: if current_ch.id in self.ignore_list["CHANNELS"]: self.ignore_list["CHANNELS"].remove(current_ch.id) ...
[ "def removes_channel(channel):", "async def _watignore_channel(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings['ignore_channels']:\n self.settings['ignore_channels'].remove(channel.id)\n await self.bot.say(\"wut? Ok, I will no longer \"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes current server from ignore list
async def unignore_server(self, ctx): server = ctx.message.server if server.id in self.ignore_list["SERVERS"]: self.ignore_list["SERVERS"].remove(server.id) dataIO.save_json(self._ignore_list_path, self.ignore_list) await self.bot.say("This server has been remove...
[ "async def _watignore_server(self, ctx):\n\n server = ctx.message.server\n if server.id in self.settings['ignore_servers']:\n self.settings['ignore_servers'].remove(server.id)\n await self.bot.say(\"wot? Ok boss, I will no longer \"\n \"ignore this s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of words between the start and stop word
def count_words_between(start, stop, words): word_list = words.lower().split(' ') count = 0 counting = False for word in word_list: if word == stop.lower(): return count if counting: count += 1 if word == start.lower(): counting = True retu...
[ "def word_count(self):\n\n # Split by non-alphanumerical boundaires\n split_text = re.split('\\W',self.text.lower())\n\n # Count occurences\n counts = {}\n for word in split_text:\n if word:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws the state of the game to the drawing surface
def draw_game(self): self.surface.fill((0, 0, 0)) self.ship.draw(self.surface) self.aliens.draw(self.surface) pygame.display.flip() # update the surface
[ "def draw(self):\n\n if self.finish:\n self.draw_end_screen()\n else:\n pyxel.cls(COL_BACKGROUND)\n self.sparkler.display()\n self.l_paddle.display()\n self.r_paddle.display()\n self.pickups.display()\n self.ball.display()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the ship by 'step' (Vector2)
def move(self, step): self.position += step * self.speed
[ "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def step(self):\n tmp = self.path[-1].copy()\n tmp += self.direction\n self.path.append(tmp)\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
By chance generate an alien at a random position at the top of 'surface'
def generate_alien(self, surface): if random.random() < self.generation_chance: size = surface.get_size() position = pygame.Vector2(random.randint(0, size[0]), 0) self.aliens.append(Alien(position))
[ "def random(self):\r\n if self.ate_apple:\r\n self.x = 20 * random.randint(0, 23)\r\n self.y = 20 * random.randint(3, 23)", "def _randomize_asteroid(self):\n def randomize(vel):\n return vel * rand.choice([1, -1]) * rand.uniform(.5, 2)\n # randomly choose an i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves all Aliens in this container
def move(self): for alien in self.aliens: alien.move()
[ "def aliMove(self):\n listr = []\n listl = []\n for row in self._aliens:\n for alien in row:\n if alien != None:\n listr = listr + [alien.right]\n listl = listl + [alien.left]\n self.moveAlien(listr, listl)", "def _move_as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw all Aliens in this container
def draw(self, surface): for alien in self.aliens: alien.draw(surface)
[ "def draw(self):\n for z in Zulu:\n z.shape.draw(z.position,z.color)", "def _draw_asteroids(self):\n for asteroid in self.__asteroids:\n x, y = asteroid.get_coordinates()\n self.__screen.draw_asteroid(asteroid, x, y)", "def draw_all(self):\n pass", "def al...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if 'ship' is in collision with any of the Aliens in this container
def has_collision(self, ship): for alien in self.aliens: if alien.has_collision(ship): return True return False
[ "def check_aliens_ship_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n ship_alien_collision = pygame.sprite.spritecollideany(ship, aliens)\n alien_on_the_bottom = alien_on_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)\n if ship_alien_collision or alien_on_the_bottom:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True of 'ship' is in collision this alien, False otherwise
def has_collision(self, ship): distance = (self.position - ship.position).length() return distance < self.radius + ship.radius
[ "def has_collision(self, ship):\n for alien in self.aliens:\n if alien.has_collision(ship):\n return True\n return False", "def InShip(ships, x, y):\n coord = (x, y)\n for ship in ships:\n if coord in ship: \n return True\n return False", "def c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the time frequency and prediction length parameters. This method must be called before being able to use `predict`.
def set_prediction_parameters(self, freq, prediction_length): self.freq = freq self.prediction_length = prediction_length
[ "def classifierSetVariables(self, setSize, time):\r\n\r\n \tself.prediction = cons.predictionIni\r\n \tself.predictionError = cons.predictionErrorIni\r\n \tself.fitness = cons.fitnessIni\r\n\r\n \tself.numerosity = 1\r\n \tself.experience = 0\r\n \tself.actionSetSize = set...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Requests the prediction of for the time series listed in `ts`, each with the (optional) corresponding category listed in `cat`.
def predict(self, ts, cat=None, encoding="utf-8", num_samples=100, quantiles=["0.1", "0.75", "0.9"]): prediction_times = [x.index[-1]+1 for x in ts] req = self.__encode_request(ts, cat, encoding, num_samples, quantiles) res = super(DeepARPredictor, self).predict(req) return self.__decode...
[ "def predict(self, ts, cat=None, encoding=\"utf-8\", num_samples=100, quantiles=[\"0.1\", \"0.5\", \"0.9\"], content_type=\"application/json\"):\n \n prediction_times=[]\n req=[]\n if type(ts)==list:\n prediction_times = [x.index[-1]+pd.Timedelta(1, unit=self.freq) for x in ts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse an individual show page.
def parse_show_page(response): # Parse the html soup = BeautifulSoup(response.text) # Find the data on the page venue_el = soup.find('h3').a venue = venue_el.string h4_els = soup.findAll('h4') date_el = h4_els[0] date = date_el.string location_el = h4_els[1] location = locat...
[ "def parse_detail(self, response):\n text = \"\".join(response.css(\".article-body p\")[0].css(\"p *::text\").getall())\n yield {\n \"url\": response.url,\n \"title\": get_clean_investopedia_title(\n response.css(\"h1.article-heading::text\").get().strip()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawl the show listings pages. Return structured show info.
def crawl_show_listings(): # Crawl concerts in order, starting with the first show base_url = "http://www.dead.net" next_url = "http://www.dead.net/show/may-05-1965" results = [] while next_url: response, cached = cache_request(next_url) status = response.status_code logging....
[ "def parse_show_page(response):\n # Parse the html\n soup = BeautifulSoup(response.text)\n\n # Find the data on the page\n\n venue_el = soup.find('h3').a\n venue = venue_el.string\n\n h4_els = soup.findAll('h4')\n\n date_el = h4_els[0]\n date = date_el.string\n\n location_el = h4_els[1]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of data dicts scraped from dead.net. Returns a list of unique geocodable locations.
def unique_show_locations(listings): listing_geocodable = ['%s, %s' % (listing['venue'], listing['location']) for listing in listings] unique_geocodable = sorted(set(listing_geocodable)) return unique_geocodable
[ "def go_get_data(postcodes,dataset,pathToData=''):\n results = []\n geoAreas = []\n for postcode in postcodes:\n pc = adjustpostcode(postcode)\n pathToData = ''\n conn = lite.connect(pathToData+'geo.db')\n geodb = conn.cursor() \n c_oa = geodb.execute(\"SELECT oa11...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the max item + removes it from the heap. Check if node is in correct position not violating heap properties.
def removeMax(self): max = self.get_max() #swap last element with root node self.swap(0,self.heap_size-1) #update the size self.heap_size = self.heap_size - 1 #move the root node down the heap to not violate heap properties. ...
[ "def pop(self):\n if len(self._items) == 0:\n raise LookupError('pop from empty heap')\n # else:\n # swap top item with the last item of self._items, and remove it\n _swap(self._items, 0, -1)\n min_item = self._items.pop()\n # now repair the heap property\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sort N nodes in heap. Every removeMax operation called takes O(logN) because of downHeap()
def heap_sort(self): tempList = [] #store size of heap size = self.heap_size for i in range(0,size): #call removeMax N times to return max element and remove max every iteration max = self.removeMax() ...
[ "def max_heap_sort(heap):\n build_max_heap(heap)\n result=[]\n\n for index in range(heap_size(heap)-1, -1, -1):\n heap[0], heap[-1] = heap[-1], heap[0]\n result += [heap.pop()]\n max_heapify(heap, 0)\n\n return result", "def heap_sort(arr):\n max_heapify(arr)\n for i in rang...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the a,b,c,d variables to the equation at the right spots.
def add_variables(equation, variables): for i, j in enumerate(range(2, 17, 4)): equation[j] = variables[i]
[ "def equation_p(self):\n\t\treturn f\"{self.a}x + {self.b}y + {self.c}z − {self.d} = 0\"", "def add_operations(equation, operations):\n for i, j in enumerate(range(3, 17, 5)):\n equation[j] = operations[i]", "def addEquations(self, node, makeEquations):\n nodeName = node.output[0]\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the operations to the equation at the right spots.
def add_operations(equation, operations): for i, j in enumerate(range(3, 17, 5)): equation[j] = operations[i]
[ "def quad_add_oper(self, oper):\n\n if IdleCompiler.__should_gen_quads:\n IdleCompiler.__interp.add_operator(oper)", "def apply_operators(operators, expression):\n\n i = 1\n while i < len(expression) - 1:\n\n if expression[i] in operators:\n operator = expression[i]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the brackets to the equation at the right spots.
def add_brackets(equation, brackets): for pos, brace in brackets.items(): equation[pos] = brace
[ "def _solve_brackets(self, terms):\n while self._check_brackets(terms): # solve all terms inbetween brackets\n start = terms.index('(') # opening bracket\n end = self._find_closing_bracket(terms, start) # closing bracket related to start\n val = self.calc_term(terms[start+1:e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return chain length of sequence starting on one.
def chain_length(seq): compare = 1 for i in range(len(seq)): if not compare == seq[i]: return compare else: compare += 1 return compare
[ "def calc_chain_len(num):\n\n # Check if the length has been calculated before\n length = chain_len.get(num)\n if length:\n return length\n\n # If not, calculate it recursively\n length = calc_chain_len(next_number(num)) + 1\n\n # Keep track of the chain length\n chain_len[num] = length\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit the best fitting plane p u + q v = w
def fit_uvwplane_only(vis: Visibility) -> (float, float): su2 = numpy.sum(vis.u * vis.u) sv2 = numpy.sum(vis.v * vis.v) suv = numpy.sum(vis.u * vis.v) suw = numpy.sum(vis.u * vis.w) svw = numpy.sum(vis.v * vis.w) det = su2 * sv2 - suv ** 2 p = (sv2 * suw - suv * svw) / det q = (...
[ "def fit_plane(x, y, z):\n pts = np.isfinite(z)\n if len(z.shape) > 1:\n x, y = np.meshgrid(x, y)\n xx, yy = x[pts].flatten(), y[pts].flatten()\n else:\n xx, yy = x, y\n\n flat = np.ones(xx.shape)\n\n coefs = np.linalg.lstsq(np.stack([xx, yy, flat]).T, z[pts].flatten(), rcond=Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit and optionally remove the best fitting plane p u + q v = w
def fit_uvwplane(vis: Visibility, remove=True) -> (Image, float, float): nvis = len(vis.data) before = numpy.max(numpy.std(vis.w)) p, q = fit_uvwplane_only(vis) residual = vis.data['uvw'][:, 2] - (p * vis.u + q * vis.v) after = numpy.max(numpy.std(residual)) log.debug('fit_uvwplane: Fit to %d ro...
[ "def fit_uvwplane_only(vis: Visibility) -> (float, float):\n \n su2 = numpy.sum(vis.u * vis.u)\n sv2 = numpy.sum(vis.v * vis.v)\n suv = numpy.sum(vis.u * vis.v)\n suw = numpy.sum(vis.u * vis.w)\n svw = numpy.sum(vis.v * vis.w)\n det = su2 * sv2 - suv ** 2\n p = (sv2 * suw - suv * svw) / ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert using time slices (top level function) Use the image im as a template. Do PSF in a separate call.
def invert_timeslice(vis: Visibility, im: Image, dopsf=False, normalize=True, **kwargs) -> (Image, numpy.ndarray): log.info("invert_timeslice: inverting using time slices") return invert_with_vis_iterator(vis, im, dopsf, vis_iter=vis_timeslice_iter, normalize=normalize, inver...
[ "def scale_invert(raw_path, proc_path,height,width):\n \n im = Image.open(raw_path)\n \n # rescale\n raw_width, raw_height = im.size\n new_width = int(round(raw_width * (height / raw_height)))\n im = im.resize((new_width, height), Image.NEAREST)\n im_map = list(im.getdata())\n im_map = np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict using time slices.
def predict_timeslice(vis: Visibility, model: Image, **kwargs) -> Visibility: log.info("predict_timeslice: predicting using time slices") return predict_with_vis_iterator(vis, model, vis_iter=vis_timeslice_iter, predict=predict_timeslice_single, **kwargs)
[ "def predict(model, ts_test):\r\n n_periods = ts_test.shape[0]\r\n df_dates = model.make_future_dataframe(periods=n_periods, include_history=False)\r\n model_prediction = model.predict(df_dates)\r\n y_pred = model_prediction[['ds', 'yhat']]\r\n y_pred = y_pred.set_index('ds')\r\n y_pred['yhat'] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict using a single time slices. This fits a single plane and corrects the image geometry.
def predict_timeslice_single(vis: Visibility, model: Image, predict=predict_2d_base, **kwargs) -> Visibility: log.debug("predict_timeslice_single: predicting using single time slice") inchan, inpol, ny, nx = model.shape vis.data['vis'] *= 0.0 if not isinstance(vis, Visibility): avis =...
[ "def predict_timeslice(vis: Visibility, model: Image, **kwargs) -> Visibility:\n log.info(\"predict_timeslice: predicting using time slices\")\n\n return predict_with_vis_iterator(vis, model, vis_iter=vis_timeslice_iter,\n predict=predict_timeslice_single, **kwargs)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does a ttest between the scores calculated for each survey rating. Also plots a histogram of each rating to check the distribution of the scores
def scores_vs_rating(): rating_comparison = { 1: [], 2: [], 3: [], 4: [], 5: [] } rating_key = "like_rating_specific" for user, session in Session.get_users_with_surveys(): boundary = HistogramBoundary(user) survey = user.get_survey() for playlist_index, playlist in...
[ "def overall_score_eda(df, figure_path):\n # values counts for each review score\n print('\\nValue counts for each review score (1-5)')\n print(df['score'].value_counts())\n # visualize review scores distribution\n fig, ax = plt.subplots()\n df['score'].value_counts().plot(ax=ax, kind='bar')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function calculates the power coefficient for a given tip speed ratio
def calculate_C_p(tip_speed_ratio): a_min = get_induction_factor(0.0) a_max = get_induction_factor(tip_speed_ratio) # Calculate integral integral = lambda a: ((1 - a) * (1 - 2 * a) * (1 - 4 * a) / (1 - 3 * a)) ** 2 a = np.linspace(a_min, a_max, 100000) da = a[1] - a[0] dCp = integral(a) * d...
[ "def calcPower(speed, resistance_level):\r\n satoridata = [\r\n {\r\n 'level': 1,\r\n 'slope': 3.73,\r\n 'intercept': -28.67\r\n },\r\n {\r\n 'level': 2,\r\n 'slope': 5.33,\r\n 'intercept': -36.67\r\n },\r\n {\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the input vector is uniform, then collapse it to a single value, otherwise raise a warning.
def collapse(vec, name=None, exclude=[]): if name is None: name = '**unkown**' if isuniform(vec): return vec[0] elif isuniform(vec, exclude=exclude): return list(set(np.unique(vec)) - set(exclude))[0] else: warnings.warn("The variable {} is expected to be uniform," ...
[ "def uniform(self):\n a = np.random.uniform()\n while a == 1.0: a = np.random.uniform()\n return a", "def uniform_solution():\n return BridgeFactory.solution_from_indices(lambda _1, _2: uniform(0, 1))", "def lecun_uniform(seed=None):\n return VarianceScaling(\n scale=1., mode='fan_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the configuration information (e.g., number of pings, number of beams, struct types, etc.) from the index data. Returns =======
def calc_config(index): ids = np.unique(index['ID']) config = {} for id in ids: if id not in [21, 24, 26]: continue inds = index['ID'] == id _config = index['config'][inds] _beams_cy = index['beams_cy'][inds] # Check that these variables are consistent ...
[ "def get_simple_info_for_index(self, index=None, params={}, **kwargs):\n raw = self.client.cat.indices(index, params=params, **kwargs).split('\\n')\n list = []\n for r in raw:\n alter = r.split(' ')\n if len(alter) < 10: continue\n dict = {\n 'hea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get random proxy from proxypool
def get_random_proxy(): return requests.get(proxypool_url).text.strip()
[ "def get_proxy():\n conn = get_conn()\n return conn.random()", "def get_random_proxie(proxies=None):\n i_proxies = proxies or []\n if i_proxies:\n return random.choice(proxies or [])\n return None", "def choose_proxy():\n global RECENT_PROXIES\n\n proxies = get_proxies()\n\n chose...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kospi stock buy order completed method
def post_koa_normal_buy_kp_ord(self, trcode, rqname, next): self.logger.info("kospi stock buy order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_buy_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_sell_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock sell order is complete...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kosdaq stock buy order completed method
def post_koa_normal_buy_kq_ord(self, trcode, rqname, next): self.logger.info("kosdaq stock buy order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_buy_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock buy order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def did_complete_buy_order(self, order_completed_event):\n self.log_complete_order(order_completed_event)", "def po...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kospi stock sell order completed method
def post_koa_normal_sell_kp_ord(self, trcode, rqname, next): self.logger.info("kospi stock sell order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_sell_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock buy order is complete...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kosdaq stock sell order completed method
def post_koa_normal_sell_kq_ord(self, trcode, rqname, next): self.logger.info("kosdaq stock sell order is completed. (rqname: {})".format(rqname)) self.tr_ret_data = []
[ "def post_koa_normal_sell_kp_ord(self, trcode, rqname, next):\n self.logger.info(\"kospi stock sell order is completed. (rqname: {})\".format(rqname))\n self.tr_ret_data = []", "def post_koa_normal_buy_kq_ord(self, trcode, rqname, next):\n self.logger.info(\"kosdaq stock buy order is complete...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a view and its associated object lookup and validator to the server's registry.
def add_view_to_registry(self, view, target_lookup, target_validator): self._view_registry[view] = (target_lookup, target_validator)
[ "def register_view(self, view):\n if isinstance(view, type):\n # Instantiate the view, if needed\n view = view()\n\n class_name = view.__class__.__name__\n if not hasattr(view, \"url\"):\n raise AttributeError(f'{class_name} missing required attribute \"url\"')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a view and its object lookup and validator, wrapping the view to provide autodiscovery headers when appropriate.
def register_view(self, view, target_lookup, target_validator): def wrapper(request, *args, **kwargs): response = view(request, *args, **kwargs) absolute_uri = self.get_absolute_uri(request) if absolute_uri: try: target_uri = request....
[ "def register_view(self, view):\n if isinstance(view, type):\n # Instantiate the view, if needed\n view = view()\n\n class_name = view.__class__.__name__\n if not hasattr(view, \"url\"):\n raise AttributeError(f'{class_name} missing required attribute \"url\"')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look up a target object from an absolute URI.
def get_target_object(self, target_uri, *args, **kwargs): view, args, kwargs = self.lookup_view(target_uri) try: target_lookup, target_validator = self._view_registry[view] except KeyError: raise BacklinkTargetNotPingable try: return target_look...
[ "def find_target(request, pk):\n target = Target.objects.get(pk=pk)\n\n # We only let users get their own targets, unless a superuser.\n if target.user == request.user or request.user.is_superuser:\n return target\n else:\n raise ValueError(\"Accessing target %d not allowed\" % pk)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a target object.
def validate_target(self, target_uri, target_object): view, args, kwargs = self.lookup_view(target_uri) try: target_lookup, target_validator = self._view_registry[view] if not target_validator(target_uri, target_object): raise BacklinkTargetNotPingable ...
[ "def validate_object(self, objectString, target, isStatus = False):\n # example 5 > 2\n targetObject = target \n\n if objectString.isdigit():\n return True\n \n #list of valid terms other than digit i.e d20\n if re.search(self._regexForDice, objectString) is not ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform XMLRPC (de)serialization of the request and called ping method.
def xmlrpc_dispatch(self, request): try: params, method = xmlrpclib.loads(request.raw_post_data) if method != 'pingback.ping': raise Exception('Method "%s" not supported' % method) source_uri, target_uri = params response = self.register_ping...
[ "def do_POST(self):\n\t\ttry:\n\t\t\t# get arguments\n\t\t\tdata = self.rfile.read(int(self.headers[\"content-length\"]))\n\t\t\t# In previous versions of SimpleXMLRPCServer, _dispatch\n\t\t\t# could be overridden in this class, instead of in\n\t\t\t# SimpleXMLRPCDispatcher. To maintain backwards compatibility,\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clones a proposal. Only the fields that are supposed to be input by the employee are copied. Name of the clone will indicate that it is indeed a clone.
def __copy__(self): copy = super().__copy__() # Zeroes the fields that are invisible for employee. copy.owner = None copy.short_name = None copy.teaching_unit = None copy.major = None copy.level = None copy.year = None # Resets the status back to...
[ "def clone(self, name='', datastore_id=-1):\n self.client.call(self.METHODS['clone'], self.id, name, datastore_id)", "def clone(self):\n clone_patch = Patch(self.program)\n clone_patch.edit_list = deepcopy(self.edit_list)\n clone_patch.test_result = None\n return clone_patch", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We need the factory to dynamically load daemon config, to support HA failovers
def _get_daemon_factory(): # Dealing with circular dependency try: from cloudify_agent.api.factory import DaemonFactory except ImportError: # Might not exist in e.g. the REST service DaemonFactory = None return DaemonFactory
[ "def load_cfg(cfg_path=CONFIG_FILEPATH):\n try:\n with open(cfg_path, 'r') as fo:\n loaded_config = json.load(fo)\n except (ValueError, IOError, OSError):\n pass\n else:\n try:\n global daemon_host\n daemon_host = loaded_config['cmd_address']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schedule a channel method to be called from the connection thread. Use this to schedule a channel method such as .publish or .basic_ack to be called from the connection thread.
def channel_method(self, method, channel=None, wait=True, timeout=None, **kwargs): if wait and self._consumer_thread \ and self._consumer_thread is threading.current_thread(): # when sending from the connection thread, we can't wait because # then w...
[ "def on_channel_open(self, channel):\n logger.info('Channel opened..')\n\n self._channel = channel\n self._channel.basic_qos(prefetch_count=10)\n self.add_on_channel_close_callback()\n if self._exchange:\n self.setup_exchange(self._exchange, True)", "def publish(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make the queue name for this handler based on the correlation id
def _queue_name(self, correlation_id): return '{0}_response_{1}'.format(self.exchange, correlation_id)
[ "def name(self):\n return self._queue.name", "def get_queue_name():\r\n return getattr(settings, 'SEARCH_QUEUE_NAME', 'haystack_search_queue')", "def get_panda_queue_name(self, panda_resource):\n try:\n panda_queue = self.get(panda_resource).get('nickname')\n return panda...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This test checks the situation where we're currently present in a child directory of the project root. The paths in the default map should be configured to be relative to the project root, and NOT from the current working directory.
def _(project_root: Path = fake_project_pyproject): fake_context = types.SimpleNamespace( params={"path": (str(project_root),)}, default_map={}, ) with mock.patch.object(Path, "cwd", return_value=project_root / "a" / "d"): assert set_defaults_from_config(fake_context, None, None) == ...
[ "def testPaths():\n for path in config.main.paths:\n assert(os.path.exists(config.main.paths[path]))", "def check_path():\n root = os.path.abspath(os.path.curdir)\n assert os.path.basename(root) == \"treelite\", \"Must be run on project root.\"", "def test_template_lookup_path(self):\n lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The getSshTriggerCounters task connects to the Node parameter and executes all associated SshCounter from the Trigger parameter
def getSshTriggerCounters(node,trigger): logger.debug('SSH Getting ' + trigger.name + ' SshCounter counters from ' + node.name) output=[] #Checking if the trigger has got SshCounter counters = trigger.counters.all().select_subclasses() hascounters=False for counter in counters: if i...
[ "def _cx_counters_psutil(self):\n for iface, counters in psutil.net_io_counters(pernic=True).iteritems():\n metrics = {\n 'bytes_rcvd': counters.bytes_recv,\n 'bytes_sent': counters.bytes_sent,\n 'packets_in.count': counters.packets_recv,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Carries out the event of current next_active_node, and return the next next_active_node
def event_and_return_nextnode(simself, next_active_node): next_active_node.have_event() for node in simself.transitive_nodes: node.update_next_event_date() self.assertEqual( node.number_of_individuals, len(node.all_individua...
[ "def next_node(self):\n self.current_idx += 1\n return self.suggested_node()", "def get_next(self):\r\n return self.next_node", "def next_node(self):\n return self.suggested_node()", "def next(self):\n if self.is_complete():\n return None\n return self.tree...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function when we prioritise the server that was less busy throughout the simulation.
def test_server_priority_function_allocate_to_less_busy(self): def get_server_busy_time(server, ind): return server.busy_time ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.dists.Exponential(1)], service_distributions=[c...
[ "def test_server_priority_function_allocate_to_last_server_first(self):\n def get_server_busy_time(server, ind):\n return -server.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function when we prioritise the server with the highest id number.
def test_server_priority_function_allocate_to_last_server_first(self): def get_server_busy_time(server, ind): return -server.id_number ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.dists.Exponential(1)], service_distrib...
[ "def custom_server_priority(srv, ind):\n if ind.customer_class == 0:\n priorities = {1: 0, 2: 1}\n return priorities[srv.id_number]\n if ind.customer_class == 1:\n priorities = {1: 1, 2: 0}\n return priorities[srv.id_number]", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the server priority function with two nodes that each has a different priority rule.
def test_server_priority_function_two_nodes(self): def prioritise_less_busy(srv, ind): return srv.busy_time def prioritise_highest_id(srv, ind): return -srv.id_number ciw.seed(0) Q = ciw.Simulation(ciw.create_network( arrival_distributions=[ciw.d...
[ "def test_server_priority_function_allocate_to_less_busy(self):\n def get_server_busy_time(server, ind):\n return server.busy_time\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distributions=[ciw.dists.Exponential(1)],\n service_dist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the server id is recorded correctly.
def test_records_correct_server_id(self): def custom_server_priority(srv, ind): """ A custom server priority function that priortises server 1 for customer class 0 and server 2 for customer class 1. """ if ind.customer_class == 0: prio...
[ "def get_server_id(self):", "def test_server_region_and_id(appliance_ip):\n region = store.current_appliance.server_region()\n if region == 0:\n pytest.skip(\"Can't check this if the region is 0\")\n assert str(store.current_appliance.server_id()).startswith(str(region))", "def testID(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A custom server priority function that priortises server 1 for customer class 0 and server 2 for customer class 1.
def custom_server_priority(srv, ind): if ind.customer_class == 0: priorities = {1: 0, 2: 1} return priorities[srv.id_number] if ind.customer_class == 1: priorities = {1: 1, 2: 0} return priorities[srv.id_number]
[ "def test_server_priority_function_two_nodes(self):\n def prioritise_less_busy(srv, ind):\n return srv.busy_time\n\n def prioritise_highest_id(srv, ind):\n return -srv.id_number\n\n ciw.seed(0)\n Q = ciw.Simulation(ciw.create_network(\n arrival_distri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that when reneging the correct next event time is detected.
def test_reneging_next_event(self): N = ciw.create_network( arrival_distributions=[ciw.dists.Deterministic(7)], service_distributions=[ciw.dists.Deterministic(11)], number_of_servers=[1], reneging_time_distributions=[ciw.dists.Deterministic(3)] ) Q...
[ "def test_update_time_tracking_entry(self):\n pass", "def test_next_occurrence():\n schedule_start = timezone.now()\n schedule_every = timedelta(hours=1)\n schedule = Schedule(start=schedule_start, every=schedule_every)\n expected = schedule_start + schedule_every\n assert schedule.next_occu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Only one type of customer arrive (Class 0), but if they wait more than 4 time units they change to Class 1. Services last exactly 4.5 time units. Simulate until 26 time units.
def test_class_change_while_waiting(self): N = ciw.create_network( arrival_distributions={'Class 0': [ciw.dists.Deterministic(3)], 'Class 1': [ciw.dists.NoArrivals()]}, service_distributions={'Class 0': [ciw.dists.Deterministic(4.5)], ...
[ "def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
One server. Two classes of customer, 0 and 1, 0 higher priority than 1. Only Class 1 arrive, every 2 time units All classes have service distribution Deterministic 2.5. Class 1 turn into class 0 after waiting 1.2 time units
def test_preemptive_priorities_at_class_change(self): # First without preemption: N = ciw.create_network( arrival_distributions={ 'Class 0': [ciw.dists.NoArrivals()], 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]}, service_distri...
[ "def test_class_change_while_waiting(self):\n N = ciw.create_network(\n arrival_distributions={'Class 0': [ciw.dists.Deterministic(3)],\n 'Class 1': [ciw.dists.NoArrivals()]},\n service_distributions={'Class 0': [ciw.dists.Deterministic(4.5)],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to AKS cluster. No auth as of now
def aks_connect(rg, cluster): az("aks get-credentials --resource-group {} --name {}".format(rg, cluster))
[ "def cluster_auth_aws(deployment, project, cluster, zone, service_key):\n\n subprocess.check_call(['aws', 'eks', 'update-kubeconfig',\n '--name', cluster, '--region', zone])", "def login():\n\n # Configure the default client credentials for all possible environments.\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add helm repo to the attached k8 cluster
def helm_repo_add(name): subprocess.call(["helm", "repo", "add", name+"-stable","https://syashfr.github.io/"+name])
[ "def setup_helm():\n subprocess.check_output([\n 'helm', 'init', '--upgrade',\n ])\n # wait for tiller to come up\n subprocess.check_call([\n 'kubectl', 'rollout', 'status',\n '--namespace', 'kube-system',\n '--watch', 'deployment', 'tiller-deploy',\n ])", "def helm_add_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes statistical confidence interval of the results from standard deviation and number of iterations
def _confidence_interval(self, std): return 1.96 * std / np.sqrt(self.n_iter)
[ "def byConfidenceInterval(self) -> ConfidenceIntervalResults:\n global_mean: Rational = Moment.mean(self.data)\n\n upper, lower = ops.splitList(self.data.data, lambda obs: obs <= global_mean)\n upper_std_dev: Rational = Moment.std_dev(Vector(upper))\n lower_std_dev: Rational = Moment.std...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the authorship from this reader with a writer, given by name (e.g., text, biorxiv).
def print( # noqa:T202 self, writer: HintOrType["Writer"] = "text", writer_kwargs: OptionalKwargs = None, file=None, **kwargs, ) -> None: from ..writers import writer_resolver _writer = writer_resolver.make(writer, writer_kwargs) if file is None: ...
[ "def author_name(self) -> str:", "def display(self):\r\n\r\n bookinfo = '\"{}, written by {}\"'.format(self.title, self.author)\r\n print bookinfo", "def display_author(self):\n self.screen.blit(self.author, (0, 620))", "def author(cls, author_name: str) -> \"meta\":\n return cls(n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the prepared authorship.
def get_authorship(self) -> "Authorship": return self.authorship
[ "def get_author(self):\n\t\treturn self._author", "def author_info(self):\n return User.objects.get(pk=self.author)", "def author_info(self):\n return User.objects.get(pk=self.user_id)", "def author(self):\n return self._commit.author", "def author(self) -> SAuthor:\n return self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the circular convolution helper functions in Numpy
def test_helpers(self): rng = np.random.RandomState(43232) dims = 1000 invert_a = True invert_b = False x = rng.randn(dims) y = rng.randn(dims) z0 = circconv(x, y, invert_a=invert_a, invert_b=invert_b) dims2 = 2*dims - (2 if dims % 2 == 0 else 1) ...
[ "def test_regressiontest_issue9168():\n\n x = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]],)\n\n kernel_fwhm = 1*u.arcsec\n pixel_size = 1*u.arcsec\n\n kernel = Gaussian2DKernel(x_stddev=kernel_fwhm/pixel_size)\n\n result = convolve_fft(x, kernel, boundary...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asserts the same organization can be linked to several identity providers
def test_one_organization_many_identity_providers(self): IdpOrganizationAssociation.objects.create( organization=self.organization, idp_identifier="https://some-other-idp.com/entity/id/", ) IdpOrganizationAssociation.objects.create( organization=self.organiza...
[ "def test_one_identity_provider_many_organizations(self):\n with self.assertRaises(ValidationError) as exception_context_manager:\n IdpOrganizationAssociation.objects.create(\n organization=OrganizationFactory(),\n idp_identifier=self.idp_entity_id,\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asserts the same identity provider cannot be linked to several organizations
def test_one_identity_provider_many_organizations(self): with self.assertRaises(ValidationError) as exception_context_manager: IdpOrganizationAssociation.objects.create( organization=OrganizationFactory(), idp_identifier=self.idp_entity_id, ) raise...
[ "def test_one_organization_many_identity_providers(self):\n IdpOrganizationAssociation.objects.create(\n organization=self.organization,\n idp_identifier=\"https://some-other-idp.com/entity/id/\",\n )\n\n IdpOrganizationAssociation.objects.create(\n organization...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each of the test case initialize a new KeywordProcessor. Add the keywords the test case to KeywordProcessor. Extract keywords and check if they match the expected result for the test case.
def test_extract_keywords(self): for test_id, test_case in enumerate(self.test_cases): keyword_processor = KeywordProcessor() keyword_processor.add_keywords_from_dict(test_case['keyword_dict']) keywords_extracted = keyword_processor.extract_keywords(test_case['sentence']) ...
[ "def test_extract_keywords_case_sensitive(self):\n for test_id, test_case in enumerate(self.test_cases):\n keyword_processor = KeywordProcessor(case_sensitive=True)\n keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])\n keywords_extracted = keyword_processor....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each of the test case initialize a new KeywordProcessor. Add the keywords the test case to KeywordProcessor. Extract keywords and check if they match the expected result for the test case.
def test_extract_keywords_case_sensitive(self): for test_id, test_case in enumerate(self.test_cases): keyword_processor = KeywordProcessor(case_sensitive=True) keyword_processor.add_keywords_from_dict(test_case['keyword_dict']) keywords_extracted = keyword_processor.extract_k...
[ "def test_extract_keywords(self):\n for test_id, test_case in enumerate(self.test_cases):\n keyword_processor = KeywordProcessor()\n keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])\n keywords_extracted = keyword_processor.extract_keywords(test_case['senten...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of active constraints at point x.
def active_set_at(self, x: np.ndarray, as_equalities: bool) -> List[Constraint]: return [c.as_equality() if as_equalities else c for c in self.constraints if c.is_active(x)]
[ "def calc_constraints_at(self, x: np.ndarray) -> np.ndarray:\n return np.array([c(x) for c in self.constraints])", "def get_active_constraints(self):\n if self.active_constraints_set:\n return self.active_constraints_index\n else:\n raise Exception('Active constraints no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the approximated gradient of the function at the point x.
def calc_gradient_at(self, x: np.ndarray) -> np.ndarray: return gradient_approximation(self.f, x)
[ "def gradient(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=AutoDiffXd)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }