query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Load image from file and perform preprocessing. Args | def _load_preprocess_image(self, image_file):
image_raw = tf.io.read_file(image_file)
image = self._preprocess_image(image_raw)
return image | [
"def _load_preprocess_image(self, image_file):\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)",
"def load_and_preprocess(u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert raw binary to float64 and scale the pixel values. Args | def _preprocess_image(self, image_raw):
image = tf.io.decode_raw(image_raw, tf.float64)
return image * self.rescale | [
"def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE",
"def Bin_to_float(self):\r\n float_x = int(self.binary_x, 2) / 1000\r\n float_y = int(self.binary_y, 2) / 1000\r\n return float_x, float_y",
"def int2float(img_int):\n img = img_int.astype(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a Path object for an image file. Args | def get_path_image(path_data, label, filename):
return path_data.joinpath(f'label_{label}', filename) | [
"def image_path(fname):\n root = Path(__file__).parent.parent\n return str((root / \"images\" / Path(fname)).resolve())",
"def get_image_from_file(path):\n return _ir.get_image_from_file(path)",
"def path(self):\n\t\treturn self.image.path",
"def open(path):\n return Picture(path=os.path.abspath(p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort metric performance results by the mean. Args | def sort_results(metric_results):
means, stds, params_list = metric_results
dtype = [('index', int), ('params_list', object), ('std', float), ('mean', float)]
#Sort will fail when attempting to rank based on the
#dictionary 'params_list' when encountering identical mean and
#standard deviations. T... | [
"def sort_gscordata_mean(data: list) -> list:\n return sorted(data, key=lambda x: x.mean, reverse=True)",
"def sort_by_metric(self, value): \n self._sort_by_metric = value",
"def fetch_sorted_metric(self, *args, **kwargs):\n return sorted(self.fetch_metric(*args, **kwargs).items(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Task state handler that timestamps new states and logs the duration between state changes using the task's logger. | def timestamper(task, old_state, new_state):
new_state.timestamp = pendulum.now("utc")
if hasattr(old_state, "timestamp"):
duration = (new_state.timestamp - old_state.timestamp).in_seconds()
task.logger.info(
"{} seconds passed in between state transitions".format(duration)
)... | [
"def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))",
"def task_state(args) -> None:\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti, _ = _get_ti(task, args.map_index, exec_date_or_run_id=a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gathers paths and converts to data frames per the arguments provided. Multiple checks are in place to ensure data exists prior to processing. | def gather_data(instance):
paths = [instance.get('file1'), instance.get('file2')]
if Path(instance.get('file1')).parents[0].is_dir() is True and Path(
instance.get('file2')).parents[0].is_dir() is True:
files = [f for f in paths if os.path.isfile(f)]
if len(files) == 0:
r... | [
"def Collect1DResults(Path, FolderNames, Left, Right, SavePath, OneD,\n fromf='', tof='', FilterbyName = False):\n\n second = \"=pd.DataFrame()\"\n if fromf == '':\n fromf = 0\n\n for i in range(len(FolderNames)):\n print(str(i) + \"-\" + FolderName... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the Fourier power spectrum as a function of Fourier period (1/frequency) | def FourierPlot(tas):
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
plt.plot(1/freqs,mag) | [
"def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plt.plot(frq,abs(Y),'r') ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check to see whether the annual cycle is dominant | def annual_cycle_dominant(tas):
detrend = signal.detrend(tas)
L = len(tas)
freqs = np.fft.fftfreq(L)
tas_fft = np.fft.fft(detrend)
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
the_period = 1./np.abs(freqs[np.argmax(mag)])
return the_period | [
"def duty_cycle(self):\n diff = np.diff(self.lc.time)\n t = np.median(diff)\n std = np.std(diff)\n mask = diff > (t + 3 * std)\n return (1 - np.sum(diff[mask]) / np.sum(diff))",
"def is_dominant(self, index_set = None, positive = True):\n return self.first_descent(ind... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the tangent of the phase associated with Fourier mode | def get_tan_phase(tas,period=12):
L = len(tas)
freqs = np.fft.fftfreq(L)
closest = np.abs(freqs-1./period)
# i = np.where(freqs == 1./period)[0]
i = np.argmin(closest)
#print 1/freqs[i]
tas_fft = np.fft.fft(tas)/L
R = tas_fft.real
Im = tas_fft.imag
mag = np.sqrt(R**2+Im**2)
ph... | [
"def tangent(angle):\r\n\r\n return math.tan(angle)",
"def _phase(self):\n re = self.real\n im = self.imag\n \n return im._atan2(re)",
"def tand(angle):\n return np.tan(np.radians(angle))",
"def mag_phase(F):\n\n return (np.absolute(F), np.angle(F))",
"def phase(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert phase to day of the year | def phase_to_day(phase):
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi)) | [
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))",
"def time_to_year(time):\n\treturn str(time)[0:4]",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_lon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert phase to day of the year | def phase_to_day(phase):
if phase < 0:
phase += 2*np.pi
return phase*(365./(2*np.pi)) | [
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))",
"def time_to_year(time):\n\treturn str(time)[0:4]",
"def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_lon... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of observers of the same type, aggregates and returns their results. | def aggregate_results(observers):
return None | [
"def _process_all_outputs(observers_list_list, results_dict = {}):\n\n if observers_list_list == []:\n return results_dict\n\n # A list of list of observers aggregated by type.\n aggregated_observers_list = []\n\n # This will be the list of observers list that will not be processed in\n # this... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialization of graph class with specification of the style used, default is 'ggplot' | def __init__(self, Style='ggplot'):
fig_var = ['style', 'Plots', 'title', 'xlabel', 'ylabel',
'Xmin',
'Xmax',
'Ymin', 'Ymax']
self.data = dict.fromkeys(fig_var)
self.data['style'] = Style
self.data['Plots'] = {} | [
"def new_graph(self, data, name=None, style='bar', color=colors.lightgreen,\n altcolor=colors.darkseagreen, linewidth=1, center=None,\n colour=None, altcolour=None, centre=None):\n #Let the UK spelling (colour) override the USA spelling (color)\n if colour is not None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
define the X and Y axis limits | def setlimits(self, Xlim=[], Ylim=[]):
self.data['Xmin'] = Xlim[0]
self.data['Xmax'] = Xlim[1]
self.data['Ymin'] = Ylim[0]
self.data['Ymax'] = Ylim[1] | [
"def set_x_limits(self, min_=None, max_=None):\n self._x_min, self._x_max = self._set_limits(self._x_min, self._x_max, min_, max_)",
"def setGraphXLimits(self, xmin, xmax):\n self.__xLimits = xmin, xmax",
"def axes_limits_set(self, data):\n xmax = self.calcs.iterations - 1 if self.calcs.ite... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts stoplist text file into list of tokens | def load_stop_list():
stop_list = []
with open(STOP_LIST, "r") as f:
lines = f.readlines()
stop_list = [word.strip() for word in lines]
return stop_list | [
"def read_tokens_from_file() -> List[str]:\n with open(\"tokens.txt\") as file:\n return file.readlines()",
"def readStopList():\n f = None\n try:\n f = open('documents/stoplist.txt', 'r')\n except FileNotFoundError:\n print(\"ERROR: File not found.\")\n exit(-1)\n if f ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort in a specified order any dictionary nested in a complex structure. Especially useful for sorting a JSON file in a meaningful order. | def make_custom_sort(orders):
orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in stuff.items()]
keys = set(stuff)
order = max(orders, key=lambda order: l... | [
"def sort_structure(self, structure):\n\n def _cmp(arg1, arg2):\n arg1 = arg1[1][\"title\"] if \"title\" in arg1[1] else arg1[0]\n arg2 = arg2[1][\"title\"] if \"title\" in arg2[1] else arg2[0]\n # cmp not exists in py3\n # via <https://docs.python.org/3.0/whatsnew... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return statistics about the current state of this lock. | def statistics(self) -> LockStatistics:
return LockStatistics(self.locked(), self._owner_task, len(self._waiters)) | [
"def status(self):\n if self.useHWlock:\n self.HWlock.acquire()\n ctrlStat = {}\n for key, c in self.items():\n curr = {}\n curr['active'] = c.isActive()\n curr['actual'] = c.get()\n curr['target'] = c.getTarget()\n curr['unit'] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Notify exactly n listeners. | def notify(self, n: int = 1) -> None:
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set() | [
"def notify(self, n=1):\n if not self._is_owned():\n raise RuntimeError(\"cannot notify on un-acquired lock\")\n all_waiters = self._waiters\n waiters_to_notify = _deque(_islice(all_waiters, n))\n if not waiters_to_notify:\n return\n for waiter in waiters_to_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return statistics about the current state of this condition. | def statistics(self) -> ConditionStatistics:
return ConditionStatistics(len(self._waiters), self._lock.statistics()) | [
"def status(self):\n if self.useHWlock:\n self.HWlock.acquire()\n ctrlStat = {}\n for key, c in self.items():\n curr = {}\n curr['active'] = c.isActive()\n curr['actual'] = c.get()\n curr['target'] = c.getTarget()\n curr['unit'] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return statistics about the current state of this semaphore. | def statistics(self) -> SemaphoreStatistics:
return SemaphoreStatistics(len(self._waiters)) | [
"def statistics(self):\n return self._queue.statistics(self._name)",
"def status(self):\n if self.useHWlock:\n self.HWlock.acquire()\n ctrlStat = {}\n for key, c in self.items():\n curr = {}\n curr['active'] = c.isActive()\n curr['actual'] = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an asynchronous lock. | def create_lock() -> Lock:
return Lock() | [
"def create_lock() -> Lock:\n return _get_asynclib().Lock()",
"def create_lock(self):\n return DefaultLock()",
"def create_lock(self):\n return Lock()",
"def allocate_lock():\n return LockType()",
"def acquire_lock(self, object_id):",
"def acquire_lock(cls, obj):\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an asynchronous condition. | def create_condition(lock: Optional[Lock] = None) -> Condition:
return Condition(lock=lock) | [
"def create_condition(lock: Lock = None) -> Condition:\n return _get_asynclib().Condition(lock=lock)",
"def make_condition(self, name):\n pass",
"def create_wait_condition(self, lock=None):\n if lock is None:\n lock = self.create_lock()\n return DefaultWaitCondition(lock)",
"def await_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an asynchronous event object. | def create_event() -> abc.Event:
return get_asynclib().Event() | [
"def create_event() -> Event:\n return _get_asynclib().Event()",
"async def createEvent(self, event: Event) -> None:",
"def for_asyncio(cls) -> Event:\n return _AsyncioEvent()",
"def createEvent(self, data) -> None:\n pass",
"def createEvent(self):\n return self._java.make_proxy(EventPro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an asynchronous semaphore. | def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore:
return Semaphore(value, max_value=max_value) | [
"def create_semaphore(value: int) -> Semaphore:\n return _get_asynclib().Semaphore(value)",
"def BoundedSemaphore(self, value):\n\t\tpass",
"def create_lock() -> Lock:\n return _get_asynclib().Lock()",
"def get_semaphore(name, pid, time_out=None, pause=0.1):\n\n semaphore_key = get_semaphore_key(name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a capacity limiter. | def create_capacity_limiter(total_tokens: float) -> abc.CapacityLimiter:
return get_asynclib().CapacityLimiter(total_tokens) | [
"def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:\n return _get_asynclib().CapacityLimiter(total_tokens)",
"def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Caulculate the wall panels area from CC | def walls_cc(lenght, width, wall_height=3, roof_height=4):
a = min(0.1*lenght, 0.1*width, 0.4*(wall_height+0.5*roof_height))
trian = 0.5*width*roof_height
trian_5 = 0.5*a*a*(wall_height/roof_height)
# trian_4 = trian - 2*trian_5
area = (lenght + width)*2*wall_height + 2*trian
area_5 = 8*a*wall_... | [
"def _calc_plasma_area(self):\n self.area = 0\n for mat in self.mat:\n if not mat:\n self.area += self.dx",
"def calc_inner_cross_sectional_area(self):\n return (pi / 4.0) * self.inner_dia ** 2",
"def calc_outer_cross_sectional_area(self):\n return (pi / 4.0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Caulculate the roof area from CC sections roof components and clading | def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4):
a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height))
area = (lenght + overhang)*(width + overhang)
area_3 = 8*a**2
area_1 = (lenght - 2)*(width - 4*a)
area_2 = area - area_3 - area_1
return area, area_1... | [
"def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Caulculate the area of the roof sections from mwfrs | def roof_mwfrs(lenght, width, overhang=1, wall_height=3, roof_height=4):
h = wall_height + 0.5*roof_height
area = (lenght + overhang) * (width + overhang)
area_1 = 0.5*h*width
area_2 = 0.5*h*width
area_3 = h*width
area_4 = area - area_1 -area_2 - area_3
return area, area_1, area_2, area_3, a... | [
"def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area",
"def _get_sheet_area(self):\n b1, b2, b3 = self.poscar.structure.lattice\n return np.linalg.norm(np.cross(b1, b2))*1e-16",
"def get_binary_rf_area(self):\n\n if self.thr is None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the path references a storage managed by this client. | def is_managed_path(self, path):
if self._config is None:
return False
fields = path.split(':', 1)
return len(fields) == 2 and fields[0] in self._config | [
"def is_storage_local(self):\n\n\t\treturn self.get_volume_guid_raw() == self.get_storage_guid_raw()",
"def supported_storage(cls,path):\r\n if path.startswith('http://'):\r\n return False\r\n path = os.path.abspath(path)\r\n (name,ext) = os.path.splitext(path)\r\n if path !... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the storage ID and the full path from a managed path. | def parse_managed_path(path):
fields = path.split(':', 1)
return fields[0], fields[1] | [
"def split(self, path):\n if not self.is_managed_path(path):\n return os.path.split(path)\n client, _ = self._get_storage(path)\n prefix, rel_path = self.parse_managed_path(path)\n return (\"%s:\" % prefix,) + client.split(rel_path)",
"def _get_storage(self, path, storage_id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the storage implementation based on storage_id or infer it from the path. Defaults to the local filesystem. | def _get_storage(self, path, storage_id=None):
if storage_id is None:
fields = path.split(':', 1)
if len(fields) == 2 and len(fields[0]) > 1:
storage_id = fields[0]
path = fields[1]
if storage_id is not None:
if storage_id not in self.... | [
"def get_storage_backend(self):\n return self.client.info()['Driver']",
"def get_storage(path=None, options=None):\n path = path or settings.STORAGE\n option = options or {}\n options = options or settings.STORAGE_OPTIONS\n if not path:\n raise ImproperlyConfigured('You must specify a st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Joins the paths according to the storage implementation. | def join(self, path, *paths):
if not self.is_managed_path(path):
return os.path.join(path, *paths)
client, _ = self._get_storage(path)
prefix, rel_path = self.parse_managed_path(path)
return '%s:%s' % (prefix, client.join(rel_path, *paths)) # Only join the actual path. | [
"def join(self, path, *paths):",
"def _join_path(self, path1, path2):\n raise NotImplementedError",
"def __combine_path(self, other):\n self.path = other.path + self.path",
"def join(\n self, store: \"FlattenedStorage\", lsuffix: str = \"\", rsuffix: str = \"\"\n ) -> \"FlattenedStorag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits the path according to the storage implementation. | def split(self, path):
if not self.is_managed_path(path):
return os.path.split(path)
client, _ = self._get_storage(path)
prefix, rel_path = self.parse_managed_path(path)
return ("%s:" % prefix,) + client.split(rel_path) | [
"def splitpath(self):\n \n pass",
"def splitPath(self, path):\n return os.path.split(path)",
"def splitpath(self):\n parent, child = os.path.split(self)\n return self.__class__(parent), child",
"def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves a file from remote_path to local_path. | def get_file(self, remote_path, local_path, storage_id=None):
return self.get(remote_path, local_path, directory=False, storage_id=storage_id) | [
"def retrieve_file(self, remote_full_path, local_full_path):\n conn = self.get_conn()\n logging.info('Retrieving file from FTP: {}'.format(remote_full_path))\n conn.get(remote_full_path, local_full_path)\n logging.info('Finished retrieving file from FTP: {}'.format(\n remote_f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves a full directory from remote_path to local_path. | def get_directory(self, remote_path, local_path, storage_id=None):
return self.get(remote_path, local_path, directory=True, storage_id=storage_id) | [
"def _remote_path(self):\n return self._remote_dir",
"def download(remote, local):\n remote = nodes.get_node_by_id(remote or session.cwd)\n local = local or os.getcwd()\n blobs.download_folder(remote, local)",
"def remote_path(self, volume):\n nfs_share = volume['provider_location']\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns stat on remote_path file | def stat(self, remote_path, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.stat(remote_path) | [
"def file_stat(self, file_path):",
"def lstat(path):\n pass",
"def get_stat(spath, fn):\n cl_path = os.path.join(spath, fn)\n if os.path.exists(cl_path):\n return os.stat(cl_path)\n else:\n return os.stat(spath)",
"def stat(self, path: bytes) -> Any:\n return os.stat(self.stor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pushes a local_path file or directory to storage. | def push(self, local_path, remote_path, storage_id=None, lp=None):
if not os.path.exists(local_path):
raise RuntimeError('%s not found' % local_path)
if local_path == remote_path:
return None
LOGGER.info('Uploading %s to %s', local_path, remote_path)
client, remot... | [
"def push_local_file(self, local, remote):\n self.run_cmd('push %s %s' % (local, remote))\n return self.__output",
"def upload_to_gcs(local_path, gcs_path):\n gfile.Copy(local_path, gcs_path)",
"def push(self, localfiles: list, remotepath: str, sync: bool = False):\n coro = self._aadb.push... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete segments from a corpus in a storage. | def seg_delete(self, remote_path, corpus_id, seg_ids, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.seg_delete(corpus_id, seg_ids) | [
"def delete_network_segments(self, tenant_id, network_segments):",
"def del_segment_translations(*args):\n return _ida_segment.del_segment_translations(*args)",
"def test_deleting_a_segment(self):\n pass",
"def delete(self, bbox_or_slices):\n if type(bbox_or_slices) is Bbox:\n requested_bbox =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Modify segment from a corpus in a storage. | def seg_modify(self, remote_path, corpus_id, seg_id, tgt_id, tgt_seg, src_seg, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.seg_modify(corpus_id, seg_id, tgt_id, tgt_seg, src_seg) | [
"def test_updating_a_segment(self):\n pass",
"def change_segment(self):\n logging.debug(\"change_segment\")\n word_string_list = list(self.word_string) # Making a mutable list from immutable string\n index_of_change = randint(0, len(self.word_string)-1)\n old_segment = word_str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add segments from a corpus in a storage. | def seg_add(self, remote_path, corpus_id, segments, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.seg_add(corpus_id, segments) | [
"def add_segments(self, *segments):\n for s in segments:\n self._add_one(s)",
"def combine(self, corpus: Corpus):\n for sentence in corpus.sentences:\n self.addSentence(sentence)",
"def _segment(self, documents):\n unigram_docs = self._load_json('unigram_docs')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renames a file or directory on storage from old_remote_path to new_remote_path. | def rename(self, old_remote_path, new_remote_path, storage_id=None):
client_old, old_remote_path = self._get_storage(old_remote_path, storage_id=storage_id)
client_new, new_remote_path = self._get_storage(new_remote_path, storage_id=storage_id)
if client_old._storage_id != client_new._storage_i... | [
"def rename(self, old, new):\n\n if old == new:\n return\n\n node = self.cache.resolve(old, False)\n if not node:\n raise FuseOSError(errno.ENOENT)\n\n new_bn, new_dn = os.path.basename(new), os.path.dirname(new)\n old_bn, old_dn = os.path.basename(old), os.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a tag associated with a corpus. | def tag_add(self, remote_path, corpus_id, tag, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.tag_add(corpus_id, tag) | [
"def addTag(nodeID, tag):",
"def add_tag(self, tag):\n self.tags.append(tag)",
"def add(self, tag):\n self.tags[tag.name] = tag",
"def addTag(self, tag):\n\t \tself.tags.append(tag)",
"def add_tag(self, training_dataset, name, value):\n self._tags_api.add(training_dataset, name, value)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a tag associated with a corpus. | def tag_remove(self, remote_path, corpus_id, tag, storage_id=None):
client, remote_path = self._get_storage(remote_path, storage_id=storage_id)
return client.tag_remove(corpus_id, tag) | [
"def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))",
"def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def delete_tag(tag):\n tag.destroy()",
"def remove... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct half of a queued batch, all of the same label | def _generate_half_batch(record_data, min_queue_examples, batch_size, num_steps, test_mode):
# From TF documentation: "The batching will be nondeterministic if num_threads > 1"
# Ok to have many threads for training, but in testing want a deterministic result.
if test_mode:
num_preprocess_threads = 1
else:
num_... | [
"def MakeBatch(self, buffer):\r\n raise NotImplementedError('Must implement MakeBatch in a subclass.')",
"def _generate_video_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n pass",
"def _init_queued(volume):\n queued = np.zeros(volume.shape)\n queued[0, :, :] = 1\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads data from a binary file of cell image data. Create an object with information about sequence and batch that will be filled with data obtained from the queue by the FixedLengthRecordReader | def _read_from_file(queue, config, class_label):
class SequenceRecord(object):
pass
result = SequenceRecord()
# Dimensions of the images and the bytes they each take
# up in the binary file
result.height = config.image_size
result.width = config.image_size
result.depth = config.image_depth
result.sequence... | [
"def readBinary (self, filename=None):\n if filename is None:\n filename = self.pathname\n\n stream = open(filename, 'rb')\n magic = struct.unpack('>H', stream.read(2))[0]\n self.crc32 = struct.unpack('>I', stream.read(4))[0]\n self.seqid = struct.unpack('>H', stream.read(2))[0]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the product 'abc' for a pythagorean triple (a^2 + b^2 = c^2) whose sum a+b+c is equal to the given number. Utilizes Euclid's formula. | def find_pythagorean_triple_product(num):
m = 2
n = 1
while True:
a = m ** 2 - n ** 2
b = 2 * m * n
c = m ** 2 + n ** 2
if a ** 2 + b ** 2 == c ** 2 and a + b + c == num:
return a * b * c
else:
m += 1
if m >= num:
... | [
"def product_pythagorean_triplet(N):\n\tfor a in range(0,N):\n\t\tfor b in range(a+1,N):\n\t\t\tfor c in range(b+1,N):\n\t\t\t\tif a+b+c == 1000 and is_pythagorean_triplet(a,b,c):\n\t\t\t\t\treturn a*b*c\n\treturn \"N too small\"",
"def main():\n # Since a < b < c, we have a + b + c = 1000 > a + a + a, making ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a string list with the devices type to test the source code. CUDA devices will be test only in case the current hardware supports it. | def get_test_devices():
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
return devices | [
"def get_test_devices():\n\n # Assumption: CPU is always available\n devices = ['cpu']\n\n if torch.cuda.is_available():\n devices.append('cuda')\n\n return devices",
"def testing_platform(self):\n if self.device in ['CPU', 'XLA_CPU']:\n return 'CPU'\n elif self.device in ['GPU', 'XL... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for existing Service Catalog Provisioned Products. If it's not found then will search for any inprogress deployments since Control Tower has a serial method of deploying accounts. | def search_provisioned_products(search_pp_name, client: boto3.client) -> dict:
logger.info(f"Searching for {search_pp_name}")
response = client.search_provisioned_products(
AccessLevelFilter={
'Key': 'Account',
'Value': 'self'
},
Filters={
'SearchQuery... | [
"def scan_provisioned_products_single_page(self, **kwargs):\n return slurp(\n 'scan_provisioned_products',\n self.scan_provisioned_products,\n 'ProvisionedProducts',\n **kwargs\n )",
"def search_provisioned_products_single_page(self, **kwargs):\n return slurp(\n 'search... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the Default Service Catalog Provisioning Artifact Id from the Service Catalog Product specified in the definition call. | def get_provisioning_artifact_id(product_name: str, client: boto3.client) -> str:
product_info = client.describe_product(
Name=product_name
)
logger.info(product_info)
for _product_info in product_info['ProvisioningArtifacts']:
if _product_info['Guidance'] == 'DEFAULT':
logg... | [
"def default_resource_discovery_association_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_resource_discovery_association_id\")",
"def default_resource_discovery_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_resource_discovery_id\")",
"def product_id(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build an op used as a target for return values at given quantiles. | def _build_target_quantile_values_op(self):
batch_size = tf.shape(self._replay.rewards)[0]
###### Munchausen-specific
replay_action_one_hot = tf.one_hot(
self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')
# tau * ln pi_k+1 (s')
replay_next_log_policy = utils.stable_scaled... | [
"def as_quantiles(*args, **kwargs):\n return _as_classification(Quantiles, *args, **kwargs)",
"def quantile_return(self, target_factor=None, quantile=10, by_group=None):\n if target_factor is None:\n target_factor = self.factor_code\n \n def _calc_ret(df, factor):\n q... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a new fox instance. Expects a gender (true=male) and a location | def create_fox(a_male,a_location):
fox = None
if a_male:
fox = Fox()
else:
fox = Vixen()
fox.location = a_location
return fox | [
"def make_fake_person() -> models.Person:\n person = models.Person()\n fake = Faker(choice(config['person']['locals']))\n fake_person = fake.profile(sex=choice(config['person']['gender']))\n person.name = str(fake_person['name'])\n person.login = (\n fake_person['username'] + str(fake_person['... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits a daterange in even buckets | def _split_date_range(start, end, intv):
previous = start
diff = (end - start) / intv
for i in range(1, intv):
current = start + diff * i
yield (previous, current)
previous = current
yield (previous, end) | [
"def test_split_ranges(self):\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.asse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RPC request to subscribe to specific type of transactions | def subscribe(self, topic_type, tx_filter=None):
request = protos.RequestSubscribe(type=topic_type, filter=tx_filter)
res = self.stub.subscribe(request)
for r in res:
yield r | [
"def subscribe(self, namespace, sub_strings=None):\n req = JSONRPCRequest('subscribe', [namespace, sub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)",
"def handle_xmlrpc_subscribe( s, subscription_dict, connection_dict ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
close the file handler | def close(self) -> None:
if self.file_handler:
self.file_handler.close() | [
"def close_file(self):\r\n self.file.close()",
"def close_file(file_handle):\n file_handle.close()",
"def close(self):\n self.f.close()",
"def close_file(self):\n self.file.close()",
"def close(self):\n self.fp.close()",
"def close(self, file_handle):\n if file_handle is ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the client name given a subversion string | def subverParseClient(s):
return s[1:].split(":")[0] | [
"def svn_client_ctx_t_client_name_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def svn_ra_invoke_get_client_string_func(*args):\r\n return _ra.svn_ra_invoke_get_client_string_func(*args)",
"def get_client_name(self, obj):\n\t\treturn obj.client.name",
"def c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that every passed node is interconnected with all the other clients | def verifyInterconnect(nodes, clientTypes=clientSubvers):
for n in nodes:
connectedTo = set()
myclient = subverParseClient(n.getnetworkinfo()["subversion"])
pi = n.getpeerinfo()
for p in pi:
connectedTo.add(subverParseClient(p["subver"]))
notConnectedTo = clientT... | [
"def test_connections(self) -> None:\n for server in self.servers:\n server.verify_connected()",
"def test_nodes_connected(graph_multi_node):\n assert 'gn1' in graph_multi_node.gnodes",
"def test_connection_is_established(self):\n assert self.connection_node_1.is_connected is True\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a cookie is expired. | def is_cookie_expired(cookie_name):
if cookie_name:
expires = int
timestamp = int(time.time())
for cookie in __request_session.cookies:
if cookie.name == cookie_name:
expires = cookie.expires
else:
return None
if timestamp > exp... | [
"def is_expired(self):\n return int(time.time()) - self.time > self.interval",
"def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()",
"def has_expired(self):\n return d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check a cookie by name to see if it exist. | def has_cookie(cookie_name):
if cookie_name in __request_session.cookies:
log.debug('cookie found: %s' % __request_session.cookies[cookie_name])
return __request_session.cookies[cookie_name]
log.debug('no cookie named: %s found.' % cookie_name)
return False | [
"def load_cookie(cookie_name: str) -> bool:\n if (os.path.exists(cookie_file)):\n func_name = sys._getframe(1).f_code.co_name\n try:\n with open(cookie_file, 'rb') as fr:\n cookies = pickle.load(fr)\n \n cookie = next(x for x in cookies if x.n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load a genbank file as a Biopython object. | def load_genbank(path):
with open(path, 'r') as fd:
try:
genbank = SeqIO.read(fd, 'genbank')
except Exception as err:
raise Exception(path + '\t' + str(err))
return genbank | [
"def load_genbank(seqfile):\n parser = GenBank.FeatureParser()\n input_handle = open(seqfile, 'rU')\n gb_record = parser.parse(input_handle)\n input_handle.close()\n return gb_record",
"def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate all import files for a given genbank file path to an output_dir. Will produce CSV files for each collection (filename = collection name) | def generate_genome_import_files(genbank_path, output_dir):
genbank = load_genbank(genbank_path)
genome_path = os.path.join(output_dir, _genome_vert_name + '.json')
write_import_file(generate_genome(genbank), genome_path)
gene_path = os.path.join(output_dir, genbank.id, _gene_vert_name + '.json')
wr... | [
"def exported_files(self):\n\n sep = os.path.sep\n for root, dirs, files in os.walk(self.output):\n if files:\n base = os.path.relpath(root, self.output)\n if base == '.':\n base = ''\n else:\n if sep != '/':... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate gene rows for every feature in a genbank object. | def generate_genes(genbank):
for (idx, feature) in enumerate(genbank.features):
if feature.type == 'source' or feature.type == 'gene':
continue
row = {
'location_start': feature.location.start,
'location_end': feature.location.end,
'strand': feature.st... | [
"def generate_gene_edges(genbank):\n genome_key = genbank.id\n genome_id = _genome_vert_name + '/' + genome_key\n for (idx, feature) in enumerate(genbank.features):\n # Skip the 'source' feature, which describes the entire genome\n if feature.type == 'source' or 'locus_tag' not in feature.qua... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate genetogenome edges for every feature in a genbank object. | def generate_gene_edges(genbank):
genome_key = genbank.id
genome_id = _genome_vert_name + '/' + genome_key
for (idx, feature) in enumerate(genbank.features):
# Skip the 'source' feature, which describes the entire genome
if feature.type == 'source' or 'locus_tag' not in feature.qualifiers:
... | [
"def generate_genes(genbank):\n for (idx, feature) in enumerate(genbank.features):\n if feature.type == 'source' or feature.type == 'gene':\n continue\n row = {\n 'location_start': feature.location.start,\n 'location_end': feature.location.end,\n 'strand'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a permutation of {0,1,...,n1} return the 2^n by 2^n permuation matrix representing the permutation of qubits (bigendian convention). | def lift_perm(p: Dict[int, int]) -> np.ndarray:
n = len(p)
pm = np.zeros((1 << n, 1 << n), dtype=complex)
for i in range(1 << n):
j = 0
mask = 1 << n
for q in range(n):
mask >>= 1
if (i & mask) != 0:
j |= 1 << (n - 1 - p[q])
pm[j][i] = ... | [
"def matrixperms(N=2):\n P = np.zeros((N, N, np.math.factorial(N)))\n for k, p in enumerate(permutations(range(N))):\n i, j = np.arange(N), np.array(p)\n P[i, j, k] = 1\n return P",
"def makePermutationMatrix(permList):\n permList = scipy.array(permList)\n n = len(permList)\n if 0 ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Translate tk1 to a RzRxRz so AerUnitaryBackend can simulate | def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit:
circ = Circuit(1)
circ.Rz(c, 0).Rx(b, 0).Rz(a, 0)
return circ | [
"def setT1Button(self):\n self.T1Button = qt.QPushButton(\"Create T1 Mapping\")\n self.T1Button.toolTip = \"Create the T1 Mapping of the Scalar Volumes selected\"\n self.T1Button.enabled = False\n self.InputOutput_Layout.addRow(self.T1Button)",
"def getSpinControl(*args):",
"def _tf1_ ( self , *args... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Report the result of an authentication request This endpoint returns the result of the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id. If the request was successful, then the server makes and returns a token which can be used as to authorize API calls. It must be included in an Author... | def async_token_result(auth_req_id):
# create a new user based on auth request so that each auth request returns a different token
new_user_params = {
'zenkey_sub': auth_req_id,
'name': 'Mock User',
'phone_number': '+15555555555',
'postal_code': '55555',
'email': 'mockus... | [
"def authenticated_request(**kwargs):\n return authenticated_request_async(**kwargs).get_result()",
"def continueWithAuth(\n self, requestId: str, authChallengeResponse: Dict[str, Any]\n ) -> Awaitable[Dict]:\n return self.client.send(\n \"Fetch.continueWithAuth\",\n {\"req... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retry an authentication request This endpoint retries the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and has the same return value. | def async_token_retry(auth_req_id):
return jsonify({'auth_req_id': auth_req_id}) | [
"def _request_retry(self, req, url, **kwargs):\n logger.debug('Entered QiitaClient._request_retry()')\n url = self._server_url + url\n retries = MAX_RETRIES\n while retries > 0:\n retries -= 1\n r = self._request_oauth2(req, url, verify=self._verify, **kwargs)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cancel an authentication request This endpoint cancels the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and just returns a status, 200 if successful. | def async_token_cancel(auth_req_id): #pylint: disable=unused-argument
return "" | [
"def nexmo_cancel(request):\n state = request.validated[\"querystring\"][\"state\"]\n\n # Require on-going session\n state_info = request.registry.cache.get(state)\n\n if not state_info:\n error_msg = \"The Nexmo session was not found, please re-authenticate.\"\n return http_error(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Grant token request This endpoint grants the token request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and has the same return value. The ZenKey carrier hits this endpoint | def async_token_grant():
required_params = ['auth_req_id',
'state',
'scope']
optional_params = ['access_token',
'expires_in',
'refresh_token',
'id_token',
'error',
... | [
"def auth_access_token_request(self, auth_access_token_request):\n\n self._auth_access_token_request = auth_access_token_request",
"def authorize_request_token(self, request, oauth_request, request_token):\n raise NotImplementedError",
"def authorize_request_token(self, context, request_token_id, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The construction method of the page passes the page's implemented entityBlocks attribute to the Template base class construction , or the optional entityBlocks Parameter. | def __init__(self, entityBlocks):
Template.__init__(self, entityBlocks) | [
"def __init__(self, entityBlocks=None):\n\t\tif entityBlocks:\n\t\t\tself.entityBlocks = entityBlocks\n\t\tTemplate.__init__(self, self.entityBlocks)",
"def __init__(self, **kwargs):\n child_block = CharBlock(required=True)\n super(ULBlock, self).__init__(child_block, **kwargs)",
"def create_page(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Terminate the server process | def terminate(self):
if self.proc:
logging.info("Terminating Proxy Server...")
self.proc.terminate()
self.proc = None | [
"def close_server(self):\n os.popen(closeServer)",
"def terminate(self):\n self._stop_proc(signal.SIGTERM)",
"def terminate(self):\n self._assert_bound()\n self._context.terminate(self.pid)",
"def stop():\n server = current_server()\n server.stop()",
"def terminate_server(self, por... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a RPC server that uses an websocket that connects to a proxy. | def websocket_proxy_server(url, key=""):
def create_on_message(conn):
def _fsend(data):
data = bytes(data)
conn.write_message(data, binary=True)
return len(data)
on_message = rpc._CreateEventDrivenServer(_fsend, "WebSocketProxyServer")
return on_message
... | [
"def create(addr='127.0.0.1', port=0, options=None):\n if options is None:\n options = {}\n\n backend = MitmProxy(addr, port, options)\n\n t = threading.Thread(name='Selenium Wire Proxy Server', target=backend.serve_forever)\n t.daemon = not options.get('standalone')\n t.start()\n\n addr, p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the embeddings, depending on the embedding type. | def _initialize_embeddings(self):
with tf.variable_scope(self.scope):
init_temporal_s = np.sqrt(
6. / (self._config.nact_dict["num_s"] + self._config.ndim_emb + 1))
self.w_dt = tf.get_variable(
name="w_dt",
shape=[1, self._config.ndim_emb],
initializer=tf.initial... | [
"def init_emb(self):\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)",
"def set_embeddings(self):",
"def _init_embeddings(self):\n super()._init_embeddings()\n self.relation_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the total expected size of the embedding. | def get_total_embedding_size(self) -> Union[int, List[int]]:
features = self._config.context_features + self._config.sequential_features
feature_dims = [self._embed_dim_dict[feat] for feat in features]
if self._config.embedding_combination_method == (
types.EmbeddingCombinationMethod.SUM_ALL):
... | [
"def embeddings_size(self):\n return self.hidden_size",
"def get_expected_size(self):\n if not self._fw_info_leaf_bytes:\n return 0\n return len(self._fw_info_leaf_bytes)",
"def get_vocab_size(self):\n return self.vocab.get_size()",
"def get_final_emb_size(self):\n size = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Embeds data dictionary and creates inputs to temporal model. | def embed_data(
self,
data: Dict[str, tf.SparseTensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
batch_shape = tf.shape(data["t"])[:-1]
flat_data = nest.map_structure(batches.flatten_batch, data)
flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data)
context_embeddings = (... | [
"def build(data_in: str, data_out: str):\n pretty_print('Downloading model...')\n model = download_model()\n df = pd.read_csv(data_in)\n generate_embeddings(data_out, df, model)\n pretty_print('Done. Embeddings successfully created... 🤙')",
"def embed_episodes(data, embedder):\n all_segment_emb... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the regularization loss for embedding weights. | def get_embedding_regularization_loss(self) -> tf.Tensor:
sparse_lookup_regularization = self._config.sparse_lookup_regularization
sparse_lookup_regularization_weight = (
self._config.sparse_lookup_regularization_weight)
encoder_regularization = self._config.encoder_regularization
encoder_regula... | [
"def get_regularization_loss(self):\n # iterate through all layers\n layer_norms = []\n for regularization, parameter_group in self.regularization_groups.items(\n ):\n if regularization > 0.0:\n # L2 regularization\n for parameters in parameter_gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combines embeddings into one input for the model. The embeddings can be combined in different ways and this function encapsulates that logic and returns an input vector based on the combination method that is specified. | def _combine_embeddings_for_input(
self, embedding_dict: Dict[str, int]) -> tf.Tensor:
if self._config.embedding_combination_method == (
types.EmbeddingCombinationMethod.SUM_ALL):
return sum(embedding_dict.values())
elif self._config.embedding_combination_method == (
types.EmbeddingC... | [
"def combine_word_vectors(self, embeddings):\n # the embeddings are combined according to the operation given in\n # the configuration\n combined = self.embedding_combiner(embeddings, axis=0)\n # make sure the resulting vector has the dimensions 1 x embedding_dim\n assert combined... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the suffix of a feature from its name. | def get_feature_suffix(feature_name: str) -> str:
if "_" not in feature_name:
return ""
return feature_name.split("_")[-1] | [
"def suffix(self):\n name = self.name\n i = name.rfind('.')\n if 0 < i < len(name) - 1:\n return name[i:]\n else:\n return ''",
"def get_suffix(self, word):\r\n if word in self.w2i:\r\n return word[-ut.UNIT_SUB_WORD:]\r\n return ut.UNK[-ut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In the CoAP client read method, different exceptions can arise from the DTLS stack. Depending on the type of exception, a continuation might not be possible, or a logging might be desirable. With this callback both needs can be satisfied. | def _cb_ignore_read_exception(self, exception, client):
return False | [
"def _cb_ignore_read_exception(self, exception, client):\n return False",
"def _cb_ignore_read_exception(self, exception, client):\r\n return False",
"def readException(self):\n pass",
"def test_wantReadError(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In the CoAP client write method, different exceptions can arise from the DTLS stack. Depending on the type of exception, a continuation might not be possible, or a logging might be desirable. With this callback both needs can be satisfied. | def _cb_ignore_write_exception(self, exception, client):
return False | [
"def _cb_ignore_write_exception(self, exception, client):\n return False",
"def _cb_ignore_write_exception(self, exception, client):\r\n return False",
"def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:",
"def callback_error(self, delivery_tag, exception):\n _LOG.warning(\"Call... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set current sensors values+10% as the comparison baseline for trigger | def set_sensor_baseline(self, sensor_data=None):
cont = self.continuous(sensor_data)
for idx,val in enumerate(cont):
self.thresholds[idx][0] = val*1.1 | [
"def update_sensors(self):\n\n # Empty sensor\n if (self.t_l_l < 10):\n self.ls_1= 0\n else:\n self.ls_1= 1\n\n # Full sensor\n if (self.t_l_l > 90):\n self.ls_2= 1\n else:\n self.ls_2= 0\n\n # If tank is empty, reset mix p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that you are able to retrieve a list of all users ranked by win percentage | def test_get_user_rankings(self):
user = User(name=u'no win', email=u'generic@thingy.com')
user.put()
userone = User(name=u'one win', email=u'generic@thingy.com', total_played=1, wins=1)
userone.put()
usertwo = User(name=u'two wins', email=u'generic@thingy.com',... | [
"def test_players_account_id_rankings_get(self):\n pass",
"def test_ranking_results():\n ranking = calculate_ranking(EXPERIMENT.results_)\n assert set(ranking.Dataset.unique()) == set(EXPERIMENT.datasets_names_)\n assert set(ranking.Classifier.unique()) == set(EXPERIMENT.classifiers_names_)\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
General Gaussian elimination. Solve Av = b, for `v`. `A` is a square matrix with dimensions (n,n) and `b` has dim (n,) | def gaussian_elimination(A, b):
n = len(b)
# Join A and b
ab = np.c_[A,b]
# Gaussian Elimination
for i in range(n-1):
if ab[i,i] == 0:
raise ZeroDivisionError('Zero value in matrix..')
for j in range(i+1, n):
ratio = ab[j,i] / ab[i,i]
for k in ra... | [
"def gaussElimin(a,b):\n a=float64(a)\n b=float64(b)\n n=len(b)\n x=zeros((n,1),dtype=float)\n for k in range(n-1):\n for i in range(k+1,n):\n l=float(a[i][k])/a[k][k]\n\t a[i][k]=0\n\t for j in range(k+1,n):\n\t a[i][j]=a[i][j]-l*a[k][j]\n\t b[i]=b[i]-l*b[k]\n x[n-1]=float(b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses YAML document using filepath, returns dict. | def load(filePath):
stream = open(filePath, 'r')
yamlDict = yaml.safe_load(stream)
return yamlDict | [
"def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:\r\n with tf.io.gfile.GFile(yaml_file_path, 'r') as f:\r\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\r\n return config_dict",
"def read_yaml(filepath):\n with open(filepath) as stream:\n data = yaml.safe_load(stream... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes pyYAML output; resolves references and evaluates arithmetic expressions. | def process(yamlDict, subDict=None, path=[], first=True):
if subDict is None:
subDict = yamlDict.copy()
for key, value in subDict.items():
if first:
first = False
path = path + [key]
else:
path[-1] = key
if isinstance(value, dict):
... | [
"def float_validation_expanded():\n yield _get_parsed_yaml(\"float-validation.yaml\")",
"def test_parse_yaml(self) -> None:\n pass",
"def main():\n res = []\n\n all_lines = [l.strip() for l in sys.stdin]\n\n # Possibly the stats are empty? This can happen if nothing passes.\n if all_lines:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Methode getTestObjects gibt fuer das uebergebene GMLFile ein temporaeres TestObject zurueck. | def requestTestObjects(self, gmlFile: str) -> dict:
jsonData = {}
url = '%s%s' % (self.__urlWebApp, '/v2/TestObjects?action=upload')
headers = {'Accept': 'application/json'}
with open(gmlFile, 'rb') as f:
files = {'file': f}
r = requests.post(url, files=files... | [
"def _get_example_objects(self, annon_filepath):\n with tf.io.gfile.GFile(annon_filepath, \"r\") as f:\n root = xml.etree.ElementTree.parse(f).getroot()\n\n for obj in root.findall(\"object\"):\n # Get object's label name.\n label = obj.find(\"name\").text.lower()\n # Get objects' ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Methode requestTestRunsProgress gibt true zurueck wenn der TestRun abgeschlossen ist. | def requestTestRunsProgress(self, testRunRef: str) -> bool:
url = testRunRef.replace('.json', '/progress?pos=0')
ist = 0; soll = 100
while ist < soll:
time.sleep(8)
r = requests.get(url, proxies=self.__proxies)
if r.status_code == 200:
... | [
"def test__progress_callback(self):\n backend = self.test_init_valid()\n\n fake_total_size = 500\n num_tests = 10\n progress = 0\n\n for i in range(num_tests):\n result = backend._progress_callback(i * fake_total_size/num_tests, fake_total_size)\n self.assert... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Methode requestTestRunResults gibt das Ergebnis des TestRuns zurueck. | def requestTestRunResults(self, testRunRef: str) -> dict:
jsonData = {}
url = testRunRef
r = requests.get(url, proxies=self.__proxies)
if r.status_code == 200:
jsonData = json.loads(r.text)
else:
message = '%s: %s' % (r.status_code, 'Test Run Resu... | [
"def get(self):\n parser = reqparse.RequestParser()\n\n add_filter_query_parameters(parser, 'tests_runs')\n args = parser.parse_args()\n filter = parse_filter_query_parameters(args, 'tests_runs')\n\n running_tests = get_running_tests(constraints=filter)\n\n return {'tests_runs' : running_tests}, 2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
evaluates other metrics for the dataset | def __evaluate_other_metrics(dataset, m, y_act, y_pred):
return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes) | [
"def evaluate(self, dataset):\n\t\tpass",
"def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Strips the string value if and only if all the characters in the string are not " ". | def strip_if_not_blank(value):
if any([i != " " for i in value]):
return value.strip()
return value | [
"def _strip(string):\n string = re.sub(r'^\\W*', '', string)\n string = re.sub(r'\\W*$', '', string)\n return string",
"def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()",
"def filter_empty(string):\n\n content = string.split()\n content = [filter(lambda x : x != \"\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Safely joins an array of iterables together while preventing empty/null values from corrupting the final string spacing and allowing prefix/suffix values to be included for each element if the element is not null. | def space_join(*items):
valid_items = []
for item in items:
if item is None:
continue
if isinstance(item, tuple):
if item[0] is None:
continue
stripped = strip_if_not_blank(item[0])
if not is_null(stripped):
if len(i... | [
"def join(self, iterable): # real signature unknown; restored from __doc__\n return \"\"",
"def join(self, iterable) -> String:\n pass",
"def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a child and a parent, tries to infer whether or not the child is an extension of the parent. | def extends_or_instance_of(child, parent):
if isinstance(child, six.string_types):
raise ValueError("The child cannot be of string type.")
if isinstance(parent, six.string_types):
if isinstance(child, type):
bases = classlookup(child)
return (
parent in [... | [
"def isAncestorOf(ancestor, child):\n\twhile child is not None:\n\t\tif child is ancestor:\n\t\t\treturn True\n\t\tchild = child.parent()\n\treturn False",
"def inherits_from(child, parent_name):\n if inspect.isclass(child):\n if parent_name in [c.__name__ for c in inspect.getmro(child)[1:]]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Does a POST request to /accounts/viewaccount.{ResponseType}. Display Account Description | def create_view_account(self,
options=dict()):
# Validate required parameters
self.validate_parameters(date = options.get("date"))
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/accounts/viewaccount.{Res... | [
"def test_account_view_good(flask_server, create_account):\n import json\n import requests\n\n data = create_account\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.status_code == 200\n assert json.loads(req.content.decode('utf-8')) == [data['name'], data['code']... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build omega and weight for one quadrant. | def Build_quadrant(self) :
self.omega = np.zeros((self.n_dir,3))
self.weight = np.zeros((self.n_dir))
if self.sn==2 :
direction = 0.577350269189625764509149
weight = 1.
self.omega[0,0] = direction
self.omega[0,1] = direction
self.omega[0,2] = direction
self.weight... | [
"def _quad_points(self):\n\n def H():\n p = Jacobi(self.n, self.alpha, self.beta)\n\n ans = numpy.power(2, self.alpha+self.beta+1)\n ans *= factorial_division(self.n, self.n+self.alpha)\n ans /= factorial_division(\n self.n+self.beta, self.n+self.alp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes a list of Log Levels | def init(cls, levels: List[str]) -> List[Level]:
return [cls(lvl, val) for val, lvl in enumerate(levels)] | [
"def _initialize(self: _LoggerProtocol):\n self.level: _IntOrStr = 1\n self.log_records: List[LogRecord] = []",
"def get_log_levels(cls):\n return [\n cls.LOG_LEVEL_CRITICAL,\n cls.LOG_LEVEL_ERROR,\n cls.LOG_LEVEL_WARNING,\n cls.LOG_LEVEL_INFO,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
load annotation from the text file 从标注文件中读取 坐标 | def _load_annotation(self, txt_file):
text_polys = []
text_tags = []
if not os.path.exists(txt_file):
return np.array(text_polys, dtype=np.float32),None
with open(txt_file, 'r') as f:
reader = csv.reader(f)
for line in reader:
# strip ... | [
"def load_annotation_txt_data(txt_file_path):\n if not os.path.exists(txt_file_path):\n raise FileNotFoundError\n\n annotation = dict()\n\n # Grab the object instance ids\n annotation['instances'] = dict()\n\n with open(txt_file_path, 'r') as f:\n lines = f.readlines()\n lines = [lin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a nonempty array of integers arr and an integer k, return the sum of the first k element that has at most two digits. | def add_elements(arr, k):
return sum(elem for elem in arr[k] if len(str(elem)) <= 2) | [
"def twoSumLessThanK(nums, k):\n nums.sort()\n start = 0\n end = len(nums) - 1\n max_sum = -1\n while start < end:\n curr_sum = nums[start] + nums[end]\n if curr_sum < k:\n max_sum = max(curr_sum, max_sum)\n if curr_sum >= k:\n end -= 1\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
_dqmHarvesting_ DQM Harvesting for RelVal MC production | def dqmHarvesting(self, datasetName, runNumber, globalTag, **args):
options = defaultOptions
options.scenario = "pp"
options.step = "HARVESTING:validationHarvesting+dqmHarvesting"
options.isMC = True
options.isData = False
options.beamspot = None
options.eventcont... | [
"def testDQMHarvest(self):\n testArguments = DQMHarvestWorkloadFactory.getTestArguments()\n testArguments.update(REQUEST)\n testArguments.update({\n \"DQMConfigCacheID\": self.injectDQMHarvestConfig(),\n \"LumiList\": {\"251643\": [[1, 15], [50, 70]], \"251721\": [[50, 100... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create polygon for corresponding mesh code | def create_polygon(meshcode):
lat1,lon1 = ju.to_meshpoint(meshcode,0,0)
lat2,lon2 = ju.to_meshpoint(meshcode,1,1)
poly_text = 'POLYGON (('+str(lon1)+' '+str(lat1)+','+str(lon1)+' '+str(lat2)+','+str(lon2)+' '+str(lat2)+','+str(lon2)+' '+str(lat1)+','+str(lon1)+' '+str(lat1)+'))'
return poly_text | [
"def generatePolygons():",
"def create_polygon(self, *args, **kw):\n return self._create('polygon', args, kw)",
"def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")",
"def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Downloads a new file and replaces the current | def downloadAndReplaceFile(file_path, download_url):
file = urllib.request.urlopen(download_url)
with open(file_path, 'wb') as output:
output.write(file.read()) | [
"def downloadfile(self):\n req = requests.get(self.url, stream=True)\n mdsha256 = hashlib.sha256()\n with gzip.open(self.file_path, \"wb\") as gfile:\n for line in req.iter_lines():\n if line:\n gfile.write(line + b\"\\n\")\n mdsha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for updates by connecting to an endpoint which returns the latest versions | def checkForUpdates(self):
url = self.config.get_conf("Client", "versions-url")
try:
self._logger.info("Checking for updates...")
response = requests.get(url)
if 200 <= response.status_code <= 300:
data = response.json()[self.mission_name]
... | [
"def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()",
"def download_updates_if_available(self):\n current_version = self.get_version(self.modulepath(self.main_dir))\n latest_ve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |