query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Create a dictionary from an output
def _get_property_from_output(self, output): obj = {} items = self.parser.listing(output) for item in items: obj[item['Property']] = str(item['Value']) return obj
[ "def _command_output_to_dict(self, output):\n models_dict = {}\n for i in (output.strip()).split(\"\\n\"):\n item = i.split(\"\\t\")\n models_dict[item[0]] = int(item[1])\n return models_dict", "def _textfsm_to_dict(\n structured_output: Union[List[Any], Dict[str, Any...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait until object reaches given status.
def wait_for_object_status(self, object_name, object_id, status, timeout=120, interval=3): cmd = self.object_cmd(object_name, 'show') start_time = time.time() while time.time() - start_time < timeout: if status in self.cinder(cmd, params=object_id): ...
[ "def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=True):\n if status is None:\n status = \"ok\"\n\n interval = 5\n iterations = int(timeout / interval)\n\n vm_ = get_configured_provider()\n manager = packet.Manager(auth_token=vm_[\"token\"])\n\n for i in range(0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that object deleted successfully.
def check_object_deleted(self, object_name, object_id, timeout=60): cmd = self.object_cmd(object_name, 'show') try: start_time = time.time() while time.time() - start_time < timeout: if object_id not in self.cinder(cmd, params=object_id): break...
[ "def checkDeleted(self) -> None:\n ...", "def _objectDeleted(self, obj):\n pass", "def delete_object(self,model,obj_id):\n obj = self.get_object(model,obj_id)\n if not obj:\n return (False,'Object Doest Not Exist')\n # Check for reserved ids before deleting\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check sparsemaxloss kernel against numpy
def _test_sparsemax_loss_against_numpy(self, dtype, random, use_gpu): z = random.uniform(low=-3, high=3, size=(test_obs, 10)) q = np.zeros((test_obs, 10)) q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1 tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu) np_l...
[ "def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check sparsemaxloss transfers nan
def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu): q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan], [np.nan, np.nan, np.nan]]).astype(dtype) _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_gpu) s...
[ "def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_neg = np.asarray([\n [0, -np.inf, 0],\n [0, -np.inf, -np.inf],\n [-np.inf, -np.inf, 0],\n [-np.inf, -np.inf, -np.inf],\n ]).astype(dtype)\n z_pos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check sparsemaxloss is infinity safe
def _test_sparsemax_loss_of_inf(self, dtype, random, use_gpu): q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]]) z_neg = np.asarray([ [0, -np.inf, 0], [0, -np.inf, -np.inf], [-np.inf, -np.inf, 0], [-np.inf, -np.inf, -np.inf], ]).astype(dtype) z_pos = np.asarray...
[ "def _test_sparsemax_loss_of_nan(self, dtype, random, use_gpu):\n q = np.asarray([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n z_nan = np.asarray([[0, np.nan, 0], [0, np.nan, np.nan],\n [np.nan, np.nan, np.nan]]).astype(dtype)\n\n _, tf_loss_nan = self._tf_sparsemax_loss(z_nan, q, dtype, use_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check sparsemaxloss proposition 4
def _test_sparsemax_loss_positive(self, dtype, random, use_gpu): z = random.uniform(low=-3, high=3, size=(test_obs, 10)) q = np.zeros((test_obs, 10)) q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1 tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu) self.ass...
[ "def _test_sparsemax_loss_zero(self, dtype, random, use_gpu):\n # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for\n # delta_0 = 1.\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n z[:, 0] = np.max(z, axis=1) + 1.05\n\n q = np.zeros((test_obs, 10))\n q[:, 0] = 1\n\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check sparsemaxloss proposition 5
def _test_sparsemax_loss_zero(self, dtype, random, use_gpu): # construct z and q, such that z_k >= 1 + max_{j!=k} z_k holds for # delta_0 = 1. z = random.uniform(low=-3, high=3, size=(test_obs, 10)) z[:, 0] = np.max(z, axis=1) + 1.05 q = np.zeros((test_obs, 10)) q[:, 0] = 1 tf_loss_op, tf_...
[ "def _test_sparsemax_loss_positive(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n q = np.zeros((test_obs, 10))\n q[np.arange(0, test_obs), random.randint(0, 10, size=test_obs)] = 1\n\n tf_loss_op, tf_loss_out = self._tf_sparsemax_loss(z, q, dtype, use_gpu)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the export_host of this ExportResponseMetadata.
def export_host(self, export_host): self._export_host = export_host
[ "def response_host(self, response_host):\n\n self._response_host = response_host", "def set_host(self, host):\n\n self.host = host", "def set_host(self, host):\n self.host = host", "def host(self, host):\n self._host = host", "def email_host(self, email_host):\n\n self._em...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the export_date of this ExportResponseMetadata.
def export_date(self, export_date): self._export_date = export_date
[ "def set_date(self, date):\n self.date = date\n return", "def date(self, date):\n \n self._date = date", "def date(self, date):\n\n self._date = date", "def set_extracte_date(self, extracte_date):\n if extracte_date is not None:\n self.extracte_date = extra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the requested_object_list of this ExportResponseMetadata.
def requested_object_list(self, requested_object_list): self._requested_object_list = requested_object_list
[ "def exported_object_list(self, exported_object_list):\n\n self._exported_object_list = exported_object_list", "def _set_object_list(self, name, value):\n if isinstance(value, list):\n object_list = getattr(self, name)\n object_list.clear()\n object_list.extend(value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the exported_object_list of this ExportResponseMetadata.
def exported_object_list(self, exported_object_list): self._exported_object_list = exported_object_list
[ "def requested_object_list(self, requested_object_list):\n\n self._requested_object_list = requested_object_list", "def device_reset_list(self, device_reset_list):\n\n self._device_reset_list = device_reset_list", "def detail_list(self, request, export_locations):\n return self._list_export...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean credentials and batch environment. It cleans a token credential for the user, and the batch environment, in addition to delete all dockers. Also, Command executed by the root in prolog
def clean_environment(ctx, token): try: out = ctx.obj.clean_environment(token) print_message(out) except BaseException as e: print_error(e.message)
[ "def clean(self, data):\n required = {'admin_token', 'token'}\n api.validate(data, required)\n admin_token = data['admin_token']\n force = True\n self.credentials_module.authorize_admin(admin_token)\n token = data['token']\n containers = self.credentials_module.list_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a container or list of them.
def container_delete(ctx, token, container_ids, force): try: out = ctx.obj.container_delete(token, container_ids, force) print_message(out) except exceptions.DockerException as e: m = e.message print_error(m)
[ "def delete_container(self, container: Container):", "def delete_container(ContainerName=None):\n pass", "def delete_container(request, container, force=True):\n\n auth_token = get_token_id(request)\n storage_url, http_conn = connection(request)\n\n try:\n if force:\n head = client...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
floor the point to the next lower multiple of bucket_size
def bucketize(point, bucket_size): return bucket_size * math.floor(point / bucket_size)
[ "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bucketize(point: float, bucket_size: float) -> float:\n return bucket_size * math.floor(point / bucket_size)", "def get_bucket_boundaries(bucket_size: int, max_size: int) -> np.ndarray:\n return np.arange(bu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
buckets the points and counts how many in each bucket
def make_histogram(points, bucket_size): return Counter(bucketize(point, bucket_size) for point in points)
[ "def make_histogram(points: List[float], bucket_size: float) -> Dict[float, int]:\n return Counter([bucketize(point, bucket_size) for point in points])", "def make_histogram(points: List[float], bucket_size: float) -> Dict[float, int]:\n\n return Counter(bucketize(point, bucket_size) for point in points)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a random draw from a standard normal distribution
def random_normal(): return inverse_normal_cdf(random.random())
[ "def get_standard_normal_distribution():\n return np.random.normal(0, 1)", "def draw_normal(self):\n means, scale = self.get_means_and_scales()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def randn(mean: float = 0.0, sd: float = 1.0):\n return Float(None, None...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Добавить товар/ы в корзину. для методов Ajax
def basket_add(request): # одиночный товар или у товара есть позиции для выбора obj = ProductItem.objects.get(id=int(request.POST['product_id'])) # Сформировать данные для ответа клиенту response_data = { 'product_id': obj.id, 'name': obj.name, 'articul': obj.articul, 'q...
[ "def add_data():\n return jsonify({\"api\": \"add data\"})", "def add(request):\n # 得到浏览器发送的 json 格式数据\n # 浏览器用 ajax 发送 json 格式的数据过来\n # 所以这里我们用新增加的 json 函数来获取格式化后的 json 数据\n form = request.json()\n # 创建一个 todo\n t = Todo.new(form)\n # 把创建好的 todo 返回给浏览器\n return json_response(t.json())"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform request data to dict with 2 level of depth
def request_data_to_dict(data): if not isinstance(data, ImmutableMultiDict): raise ValueError('Input must be ImmutableMultiDict type.') res = {} for (key, value) in data.to_dict().items(): matches = re.match('(.*)\[(.*)\]', key) if matches: (key_lv_1, key_lv_2) =...
[ "def get_data_dict(self):\n return self.build_data_dict(self.get_flatten_values())", "def get_data_from_request():\n return {\n 'request': {\n 'url': '%s://%s%s' % (web.ctx['protocol'], web.ctx['host'], web.ctx['path']),\n 'query_string': web.ctx.query,\n 'method': we...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fades all outputs to the given color and waits for it to complete.
def FadeOutputs(box, color, steps=50): for output in box: output.Fade(color=color, steps=steps) time.sleep(steps / (float(box.frequency) / len(box)))
[ "def do_fade_colour(l, leds, r, g, b, duration):\n l._do_multi_led_command(\n create_fade_colour_command, leds, r, g, b, duration\n )", "def fadeToColor(self, seconds: float, red: float, green: float, blue: float, alpha: float, background=False) -> None:\r\n fn = self.function_tabl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ham constructor(initialize method )nhằm tạo ra một khuôn mẫu và truyền các thuộc tình vào ,còn việc gán giá trị thì để khuôn mẫu làm
def __init__(self, pa_ten ,pa_tuoi, pa_vu_khi): self.ten= "Sieunhan" + pa_ten # từ khóa self nhan gia trị chính là đối tượng của hàm đó self.tuoi= pa_tuoi self.vukhi= pa_vu_khi
[ "def __init__(self, nome, dataNascimento, nomeDeMae, nomeDePai):\n self.nome = nome \n self.dataNascimento = dataNascimento\n self.nomeDeMae = nomeDeMae\n self.nomeDePai = nomeDePai", "def __init__(self):\n super(SanMenWithZhonggDealer, self).__init__()\n # 本玩法包含的花色\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the name the function should have in the Python api, based on the c++function name. For entry_type 'function', the cpp_name is used unmodified, otherwise strip everything before the first underscore, so that
def to_py_name(cpp_name, entry_type): if entry_type == 'function': return cpp_name first_underscore = cpp_name.find('_') assert(first_underscore != -1) return cpp_name[first_underscore + 1:]
[ "def _clean_function_name(name):\n # Note: each time a function is wrapped into `function_lib.ConcreteFunction`\n # its name becomes \"__inference_<orig>_xyz\".\n match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)\n if match:\n return match.group(1)\n else:\n return name", "def _plugin_funcname(func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the name the property should have in the Python api, based on the C++ struct name.
def property_to_py_name(cpp_struct_name): first_underscore = cpp_struct_name.find('_') assert first_underscore != -1 return cpp_struct_name[first_underscore + 1:]
[ "def PropertyName(self) -> str:", "def _GetJSObjectFieldName(field):\n if _IsMapEntry(field):\n return field.json_name + 'Map'\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n return field.json_name + 'List'\n else:\n return field.json_name", "def property_name(self) -> str:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines the Python method type (METH_NOARGS or METH_VARARGS) from the C++ argument list and type of function.
def get_type(args_str, entry_type): # The C-method-implementations accept self as the first argument, # so a one-argument method will be invoked with zero arguments in Python. no_args = 1 if entry_type == "method" else 0 return ("METH_NOARGS" if len(args_str.split(",")) == no_args else ...
[ "def get_args_type(java_args):\n if len(java_args) == 0:\n return 'JNIEnv* env, jobject thiz'\n jargs = java_args.lower()\n args = jargs.split(', ')\n # print 'arg count:', len(args)\n full_arg = 'JNIEnv* env, jobject thiz, '\n i = 1\n for java_arg in args:\n java_type = java_arg....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates one entry for a PyMethodDef array from the entries for one function (as returned by parse_file).
def to_PyMethodDef_entry(items): entry_type = items[0] items = items[1:] if entry_type == 'method': return 'FORWARDER(%s, %s, "%s", %s)' % items elif entry_type == 'function': return 'FREE_FORWARDER(%s, %s, "%s", %s)' % items elif entry_type == 'method_template': re...
[ "def to_PyMethodDef(name, entries, extra_includes):\r\n\r\n methodEntries = [to_PyMethodDef_entry(items) for items in entries]\r\n if name is not None:\r\n methodDef = ('static PyMethodDef %s_methods[] = {\\n ' % name +\r\n ',\\n '.join(methodEntries) + ',\\n ')\r\n else:\r\n me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates one entry for a PyGetSetDef array from the entries for one propertystruct (as returned by parse_file).
def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc): return 'PROPERTY_FORWARDER(%s, "%s", %s)' % ( cpp_struct_name, py_name, doc)
[ "def to_PyGetSetDef(name, entries):\r\n getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries]\r\n getSetDef = ('static PyGetSetDef %s_getseters[] = {\\n ' % name +\r\n ',\\n '.join(getSetDefEntries) + ',\\n ')\r\n getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} /...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a string of a CPyGetSetDef array named _getseters, containing all entries in the list (as created by to_PyGetSetDef_entry).
def to_PyGetSetDef(name, entries): getSetDefEntries = [to_PyGetSetDef_entry(*items) for items in entries] getSetDef = ('static PyGetSetDef %s_getseters[] = {\n ' % name + ',\n '.join(getSetDefEntries) + ',\n ') getSetDef += '{nullptr,nullptr,nullptr,nullptr,nullptr} // Sentinel\n};' ...
[ "def getSets():", "def get_setspecs(self):\n\n response = requests.get(\"http://export.arxiv.org/oai2?verb=ListSets\")\n soup = BeautifulSoup(response.text)\n\n setspecs = soup.find_all(\"setspec\")\n setspecs = [item.text for item in setspecs]\n\n return setspecs", "def get_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a string of a CPyMethodDef array named _methods, containing all the entries in the list (as created by to_PyMethodDef_entry). Includes any include in the extra_includes list after the regular entries (before the sentinel).
def to_PyMethodDef(name, entries, extra_includes): methodEntries = [to_PyMethodDef_entry(items) for items in entries] if name is not None: methodDef = ('static PyMethodDef %s_methods[] = {\n ' % name + ',\n '.join(methodEntries) + ',\n ') else: methodDef = ',\n'.join(m...
[ "def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'meth...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes an htmlfile documenting the passed in methods, using the docstrings (as returned by parse_file)
def write_method_doc(file_name, entries): with open(file_name, 'w', newline='\n') as f: f.write('<table border="0">') f.write('<tr><td><b>Method</b></td><td><b>Description</b></td></tr>') for items in sorted(entries, key=itemgetter(3)): f.write('<tr><td valign="top">%s</td...
[ "def md_methods_doc(self, method_doc):\n mark_doc, mark_head = '', '\\n--------- \\n\\n## Methods \\n\\n {0} \\n {1}'\n method_table = MD_TABLE_ALT.format('method')\n\n for method in method_doc:\n # isolate only the name of the function (without def and params)\n name = me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes an htmlfile documenting the passed in properties, using the docstrings (as returned by parse_file) Expects a list of (propertyname, docstr)tuples.
def write_property_doc(file_name, entries): if len(entries) == 0: return with open(file_name, 'w', newline='\n') as f: f.write('<!-- Generated by %s -->' % os.path.basename(__file__)) f.write('<table border="0">') f.write('<tr><td><b>Property</b></td><td><b>Description</b...
[ "def generate_html(self, doc, filepath):\n\n if self.to_evaluate:\n ents = ['TP', 'FN', 'FP']\n elif self.domain == 'diseases':\n ents = ['DIS']\n elif self.domain == 'food':\n ents = ['FOOD']\n elif self.domain == 'both':\n ents = ['DIS', 'FOO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the Python methoddef header and html documentation for the c++file indicated by src_file_name, by locating "special" Ccomments. The header is saved to dst_file_name and the html documentation to dst_doc_file_name. The name is used for the PyMethodDef and PyGetSetDef.
def generate(src_file_names, dst_file_name, dst_doc_file_name, dst_property_doc_file_name, name): methods = [] properties = [] extra_includes = [] entries = (methods, properties) for src_file_name in src_file_names: check_file(src...
[ "def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate headers with a Python methoddef array and html documentation tables for the listed source files.
def generate_headers(src_files, out_root, doc_root): if not os.path.exists(out_root): os.makedirs(out_root) did_print_heading = False changed = False for (name, files) in src_files: if files.__class__ == str: src = files files = (src,) else: ...
[ "def _generateModuleDocs( self ):\n html = []\n \n # generate the module environ\n environ = commands.ENVIRON.copy()\n environ['title'] = self.title()\n environ['base_url'] = self.baseurl()\n environ['static_url'] = environ['base_url'] + '/_static'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct an instance of ``client_class`` and register it under given alias.
def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs): kwargs.setdefault('serializer', serializer) conn = self._conns[alias] = client_class(**kwargs) return conn
[ "def register_client(client):\n global _client\n _client = client", "def alias(self, cls: Type, alias: str) -> 'ClassRegistry':\n # Make sure the alias isn't the empty string\n if alias == \"\":\n raise NameError(\"Aliases can't be empty\")\n\n # Make sure the alias isn't alr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform outer indexing on dask array `x`, one dimension at a time. It is assumed that `indices` is suitably normalised (no ellipsis, etc.)
def _dask_oindex(x, indices): axis = 0 for index in indices: x = da.take(x, index, axis=axis) # If axis wasn't dropped by a scalar index: if not isinstance(index, Integral): axis += 1 return x
[ "def broadcast_index(values, indices):\r\n assert_array(indices, shape=(...,) + values.shape[:-1])\r\n indexed_values = jp.take_along_axis(\r\n values.reshape((1,) + values.shape),\r\n indices.reshape((-1,) + values.shape[:-1] + (1,)),\r\n axis=-1,\r\n )\r\n flat_result = jp.squeeze...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine appropriate name for callable `f` (akin to function name).
def _callable_name(f): try: return f.__name__ except AttributeError: if isinstance(f, partial): return f.func.__name__ return f.__class__.__name__
[ "def function_name(f):\n if isinstance(f, functools.partial):\n f = f.func\n\n try:\n return f.__name__\n except Exception:\n return \"<unknown function>\"", "def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform data (`keep` is userspecified secondstage index).
def __call__(self, data, keep): return self.transform(data, keep)
[ "def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)", "def preprocessData(df, removeCols):\n\tdf1=df....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialises the ``InputDevice`` object and starts ``pifacecad.SwitchEventListener``. Also, registers callbacks to ``press_key`` method.
def __init__(self): self.cad = pifacecad.PiFaceCAD() self.listener = pifacecad.SwitchEventListener(chip=self.cad) for i in range(8): self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key) self.listener.activate() atexit.register(self.atexit)
[ "def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()", "def start(self):\n keyboard.on_release(self.record_press)\n # starts record_press() when true\n keyboard.wait(hotkey=\"esc\")\n # The wait() keeps the listener active", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper over _get_variable_wrapper() to get weights, with weights decay factor in loss.
def _get_weights_wrapper( name, shape, dtype=tf.float32, initializer=initializers.xavier_initializer(), weights_decay_factor=None ): weights = _get_variable_wrapper( name=name, shape=shape, dtype=dtype, initializer=initializer ) if weights_decay_factor is not None and weights_decay_factor > 0.0: we...
[ "def _get_weights_wrapper(\n name,\n shape,\n dtype=tf.float32,\n initializer=initializers.xavier_initializer(),\n weights_decay_factor=None,\n):\n\n weights = _get_variable_wrapper(\n name=name, shape=shape, dtype=dtype, initializer=initializer\n )\n\n if weights_decay_factor is not ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get variables in a triple pattern
def get_vars(triple): return set([v for k, v in triple.items() if v.startswith('?')])
[ "def variables(s):\n return tuple(Variable(c) for c in s)", "def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result", "def identify_variables(lin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the first pattern in a set of triples pattern connected to a set of variables
def find_connected_pattern(variables, triples): pos = 0 for triple in triples: tripleVars = get_vars(triple['triple']) if len(variables & tripleVars) > 0: return triple, pos, variables | tripleVars pos += 1 return None, None, variables
[ "def _triples(self, pattern):", "def calculate_first(terminals, nonterminals, grammar, nullable):\n first = dict()\n for t in terminals:\n first[t] = {t}\n for a in nonterminals:\n first[a] = set()\n changing = True\n while changing:\n changing = False\n \n\t#for each ru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the frequency of a tone. formulas from
def tone_to_freq(tone): return math.pow(2, (tone - 69.0) / 12.0) * 440.0
[ "def get_tone_frequency(self):\n return self.tone_frequency", "def getFrequency(self):\n return _yarp.Sound_getFrequency(self)", "def main_frequency(self):\n fft_length, _ = self.check_params()\n n_freq = fft_length // 2 + 1\n freq = np.linspace(0, self.fs / 2, n_freq)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function adds 5 cards from the deck to the hand
def deal_poker_hand(self, deck): for i in range(5): self.hand.append(deck.drawCard())
[ "def deal_poker_hand(self, deck):\n\n for i in range( 5 ):\n self.__hand.append( deck.deal() )", "def add_cards(self, cards):\r\n self.cards = cards + self.cards", "def add_card(self, added_cards):\n\n self.hand[:0] = added_cards", "def deal_cards(my_deck):\n user_cards = []...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prints all cards in hand
def print_hand(self): for card in self.hand: card.printCard()
[ "def show_hand(self):\n for card in self.hand:\n print(card)", "def print_hand(hand):\n for i, card in enumerate(hand):\n print(Fore.WHITE + \"[\" + str(i) + \"] - \", end=\"\")\n print_card_in_color(card)\n print()", "def print_hand(hand):\n for card in hand:\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
There are values in the xls that have descriptions in one cell and the value to the left, this function is a helper in those cases
def get_horizontal_field_value(xls, row_index, description_index, fields_count=1, description=None, partial_match=False): if description: actual_description = get_cell_value(xls, row_index, description_index) if not actual_description: raise ValueError("empty cell at coordinate: {}:{}".f...
[ "def __view(self, title):\n sheet = self.workbook[title]\n if sheet.max_row == 1:\n print('\\n*****No '+title+' registered yet!*****\\n')\n return None\n items = []\n maxLens = []\n print('\\n'+title+'\\n')\n for i in range(1, sheet.max_column+1):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The user clicked to update their favorites. This checks whether or not to remove the athlete in the session as a favorite
def update_favorites(): check_favorite = Favorite.query.filter(Favorite.favorited_item==session["athlete_id"]).first() route = f'/athletes/{session["athlete_id"]}' if check_favorite is None: new_update = Favorite(id=current_user.id, favorited_item=session["athlete_id"]) db.session.add(new...
[ "def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)", "def favourite(self, favourite):\n\n self._favourite = f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds hopping conjugates to self.dict.
def add_conjugates(self): # declare new dict self.new_dict = copy.deepcopy(self.dict) # iterate over items for i in range(len(self.dict)): for rel_tag, hopping in self.dict[i].items(): x, y, z, j = rel_tag reverse_tag = (-x, -...
[ "def makepep_dict(self):\r\n for an in self.prot_col_dict: #key=an value=protein object\r\n for pep in self.prot_col_dict[an].pep_list: # for every pep_seq in protein\r\n if not self.pep_dict.has_key(pep): # if dict doesn't have pepseq as key\r\n self.pep_dict[pep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift input ids one token to the right, and wrap the last non pad token (usually ).
def shift_tokens_right(self, input_ids, pad_token_id): prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_outp...
[ "def shift_tokens_right(input_ids, pad_token_id):\r\n prev_output_tokens = input_ids.clone()\r\n index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)\r\n prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()\r\n prev_output_tokens[:, 1:] = input_ids[:, :-1]\r\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualizes in a pyplot window an image and a label pair from provided paths. For reading files, Pillow is used so all paths and formats must be Pillowcompatible. The task definition is used to define colors for label ids (see panoptic_parts/utils/defs/template_v1.0.yaml).
def visualize_from_paths(image_path, label_path, task_def_path): # sid2color is a mapping from all possible sids to colors with open(task_def_path) as fp: task_def = yaml.load(fp, Loader=yaml.Loader) sid2color = task_def['sid2color'] # add colors for all sids that may exist in labels, but don't have a color...
[ "def plot_image_and_label(imgs, lbls): \n for i, (img, lbl) in enumerate(zip(imgs, lbls)):\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))\n image = axes[0].imshow(img)\n mask = axes[1].imshow(lbl)\n fname = 'temp/' + str(i) + '_.png'\n plt.savefig(fname)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flying formation box calculation
def calculateFFBox(qOfFlights): # if qOfFlights == 2: rows=2; columns=1 # else: pass
[ "def box_function(width_left, width_right, shift, sigma, x):\n\n prefactor = 2.0 * 0.25\n left = erf( (1.0/width_left * x + 1.0/width_left * shift + 1.0) / ( sigma * math.sqrt(2.0)) )\n right = erf( (1.0/width_right * x + 1.0/width_right * shift - 1.0) / ( sigma * math.sqrt(2.0)) )\n\n return prefacto...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate track [degrees] between flights [degrees]
def calculateTrackBetweenFlights(lat1,lon1,lat2,lon2): return Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['azi1']
[ "def match_to_degrees(match: Match) -> float:\n raw_degree, minute, second, sign = match.groups()\n degree = int(raw_degree) + 30 * ZODIAC_ZET9.index(sign)\n return dms_to_deg(degree, int(minute), float(second))", "def steps_to_angle():\n pass", "def get_difference(wind_direction, runway_heading):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checking if tracks match
def checkTracks(track1,track2): matched=True if abs(track1-track2) <= TRACKS_DIFFERENCE else False return matched
[ "def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the latitude and longitude of a point at a distance dist [m] with a degree deg from lat,lon
def getPoint(lat,lon,deg,dist): point={} point['LAT'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lat2'] point['LON'] = Geodesic.WGS84.Direct(lat,lon,deg,dist)['lon2'] return point
[ "def get_point(lat,lon,alt,deg,dist):\n\treturn [Geodesic.WGS84.Direct(lat,lon,deg,dist)['lat2'],Geodesic.WGS84.Direct(lat,lon,deg,dist)['lon2'],alt]", "def lat_deg(mm: int) -> float:\n lat = round(\n math.degrees(2 * math.atan(math.exp(mm / SEMIMINOR_B)) - math.pi / 2), 8\n )\n return lat", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the current date, add ".0", to build a suffix for the Docker tag.
def _build_tag_suffix() -> str: now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone() return now.strftime(".%Y%m%d.0")
[ "def construct_tagged_docker_image_name(dockerImageName, dockerImageTag=None):\n if dockerImageTag is None:\n return dockerImageName\n else:\n return \"{}:{}\".format(dockerImageName, dockerImageTag)", "def get_tag():\n # All tags begging with architecture type (based on the Python version) and\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the sposi version to use; parse "wip" in a special way.
def osi_version() -> str: if sp_osi is None: return find.find_sp_osi_version() if sp_osi == "wip": return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX return sp_osi
[ "def sisyphus_version(self):\n conf = self.conf_svc.get_app_config()\n cmd = [conf[\"perl\"], conf[\"version_bin\"]]\n sisphus_version = check_output(cmd)\n return sisphus_version", "def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_vers...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild the container for a single component.
def build_component(component: str) -> None: parts: Final = component.split("-", maxsplit=1) if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case sys.exit(f"Internal error: build_component() invoked with {component=!r}") kolla_component, kolla_service = parts ...
[ "def rebuild(self): # remake_all_components\n for _, obj in self._components.items(): # pylint: disable=unused-variable\n obj.rebuild()", "def smart_containers_is_asked_to_build_it():", "def refresh_components(self):\n self._cmp = {}\n self._messenger.add_info_message(\"Refresh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Group some charactesitics by postal code area (first 3 letters)
def postalcode_area_studies(): dfpawnshop = pd.read_csv(pawnmtl.csv) cpdic = getPostalCodeDic() for ik in cpdic.keys(): print ik, cpdic[ik]
[ "def group_by_street_name(self, the_entire_range, list_address):\n group_street_name = []\n temporary_list = []\n # print(colored(the_entire_range, 'yellow'))\n for elem in the_entire_range:\n temporary_list.append(elem)\n if self.capital_letter_word.findall(elem):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills in placeholders with previous entries (if such available) should be called via ajax (similar to evaluate)
def placeholders_fill_in_last_response(): task_key = request.vars.task_key if auth.is_logged_in(): rows = db(task_query(task_key)).select() if len(rows) > 1: raise RuntimeError("DB error: learn table has too many (%s) entries with task_key=%s, user_id=%s " % (len(rows), task_key, a...
[ "def _build_placeholders(self):\n\n\t\t# Create a placeholder for input data", "def set_placeholder_values(message, data):\n \n return message.render(data)", "def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
func returns true only if leave can be granted
def isLeaveLeft(self,leave_type,days): if leave_type == 1 : return days<=self.earned_balance elif leave_type == 2 : return days<=self.hp_balance elif leave_type == 3 : return days*2<=self.hp_balance else : return False
[ "def canUnlockAll(boxes):", "def can_leave_team(uid):\n current_user = get_user(uid=uid)\n current_team = api.team.get_team(current_user[\"tid\"])\n if current_team[\"team_name\"] == current_user[\"username\"]:\n return False\n if current_team[\"creator\"] == uid and current_team[\"size\"] != 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For CV Extract val_perc% of the training set as the validation set.
def get_train_val(train: datasets, test_transform: transforms, dataset: str, val_perc: float = 0.1): dataset_length = train.data.shape[0] directory = 'datasets/val_permutations/' create_if_not_exists(directory) file_name = dataset + '.pt' if os.path.exists(directory + file_name): ...
[ "def k_fold_validator(X, y, classifier, cv=5):\n \n scaler = MinMaxScaler()\n\n X_scaled = scaler.fit_transform(X)\n X_scaled = pd.DataFrame(X_scaled, index=X.index, columns=X.columns)\n \n kf = KFold(n_splits=cv, random_state=807, shuffle=True)\n clf = classifier\n\n train_recall_scores = [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DNS query to get TXT record list of google networks
def google_rr_dns_query(record: str) -> Optional[str]: try: res = resolver.resolve(record, 'TXT') return str(res.rrset[0].strings[0], 'utf-8') except (resolver.NoAnswer, resolver.NXDOMAIN) as error: raise NetworkError(f'Error querying TXT record for {record}: {error}') from error
[ "def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill the missing values(NaN) in column with the mean value of the group the row belongs to. The rows are grouped based on the values of another column
def fill_with_group_average(df, group, column): #df=None df[column].fillna(df.groupby(group)[column].transform('mean'), inplace=True) return df
[ "def fill_mean(df):\n df = df.fillna(df.mean().fillna(0).to_dict())\n return df", "def mean_impute(self, column_val):\n mean = np.mean(column_val)\n column_val = column_val.fillna(mean)\n return column_val", "def filling_nan_values(df: pd.DataFrame) -> pd.DataFrame: \n ratio = df.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all the rows(with all columns) where the value in a certain 'column' is greater than the average value of that column. row where row.column > mean(data.column)
def get_rows_greater_than_avg(df, column): df= df[df[column] > df[column].mean()] return df
[ "def get_data(df, score, column):\n new_df = df.loc[df[column] > score]\n return new_df", "def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a junitxml filename or path to said file. From this file it extracts the testsuite node and adds it to the junit_docker.xml file, in the process it adds a name to the testsuite (the suite param) and changes the classname from tests. to {suite}. Finaly, it removes the original file. This is because jenkins was not...
def merge_to_junit_xml(filename: str, suite: str) -> None: junit_docker = Path("junit_docker.xml") if junit_docker.exists(): tree = ElementTree.parse(junit_docker) root = tree.getroot() for testsuite in root: if testsuite.get("name", None) == suite: root.remov...
[ "def _write_test_file(self):\n with open('nlt-junit.xml', 'w') as file:\n junit_xml.TestSuite.to_file(file, [self.test_suite], prettyprint=True)", "def resmoke2junit(skip_long_lines=1):\n\n cwd = os.getcwd()\n error_log = deque(\"\",200)\n\n with open('junit.xml', 'w') as junitfile:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get all versions of inmanta packages into a freeze file, to make the environment inside docker like the one outside
def pip_lock_file() -> None: with open("requirements.freeze.all", "w") as ff: subprocess.check_call([sys.executable, "-m", "pip", "freeze"], stdout=ff) with open("requirements.freeze.tmp", "w") as ff: subprocess.check_call(["grep", "inmanta", "requirements.freeze.all"], stdout=ff) # pip free...
[ "def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')", "def freeze():\n with fabtools.python.virtualenv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of docker files that should be used to run the tests against.
def _get_dockerfiles_for_test() -> str: project_root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) dockerfiles_dir = os.path.join(project_root_dir, "dockerfiles") if sys.version_info[0:2] == (3, 6): return os.path.join(dockerfiles_dir, "centos7.Dockerfile") elif sys.version_...
[ "def find_dockerfiles(self):\n self.docker_build_paths = list()\n path = self.working_dir\n filename = 'Dockerfile.j2'\n\n for root, dirs, names in os.walk(path):\n if filename in names:\n self.docker_build_paths.append(root)\n LOG.debug('Found %s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
log_loss / cross_entropy / categorical_crossentropy X is the logits y is labels (num_examples, 1) Note that y is not onehot encoded vector. It can be computed as y.argmax(axis=1) from onehot encoded vectors of labels if required.
def cross_entropy(X, y, using_onehot=True): M = y.shape[0] if using_onehot : log_likelihood = -np.log(np.max(X * y, -1)) else: log_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit loss = np.sum(log_likelihood) / M return loss
[ "def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass", "def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):\n num_classes = logits.shape[-1]\n labels = jax.nn.one_hot(labels, num_classes, dtype=dty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goes through the first column of input table and returns the first sequence of dates it finds.
def get_dates(raw_table) -> "list of dates": dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') ...
[ "def first_event(df, codes, cols=None, pid='pid', date='in_date', sep=None, codebook=None):\n codes = _listify(codes)\n cols = _listify(cols)\n sep = _sniff_sep(df=df, cols=cols)\n cols = _expand_cols(df=df, cols=cols)\n codes = expand_codes(df=df, codes=codes, cols=cols, sep=sep, codebook=codebook)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of tweets with a given hashtag in JSON format
def getByHashtags(hashtag): # set page_limits. The default is 1 pages_limit = request.args.get('pages_limit') or 1 pages_limit = int(pages_limit) raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit) list_response = convert_resp2list(raw_response)...
[ "def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test density function for multiple values at once
def test_density_multiple(self): earth = PREM() radii = np.linspace(0, 6500e3, 6501) expected = [earth.density(r) for r in radii] assert np.array_equal(earth.density(radii), expected)
[ "def test_density_multiple(self):\n earth = CoreMantleCrustModel()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def density(self, x):", "def f_density(values):\n return map(lambda v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test density function for multiple values at once
def test_density_multiple(self): earth = CoreMantleCrustModel() radii = np.linspace(0, 6500e3, 6501) expected = [earth.density(r) for r in radii] assert np.array_equal(earth.density(radii), expected)
[ "def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def density(self, x):", "def f_density(values):\n return map(lambda value: value/valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure gitfusionuser has permissions to write to depot.
def check_p4gf_user_write_permission(self): gf_client_map = P4.Map() gf_client_map.insert("//...", "//client/...") utp = p4gf_protect.UserToProtect(self.ctx.p4) prot = utp.user_to_protect(p4gf_const.P4GF_USER) gf_write_filter = prot.map_for_perm(p4gf_protect.WRITE) gf_wri...
[ "def ensure_write_access(self):\n if not os.access(self.root_dir, os.W_OK):\n raise NoWriteAccess, 'no write access to \"%s\"' % self.root_dir", "def permissions(ctx):\n ctx.obj = Git()", "def test_write_thank_you_files1():\n\n # if root / sudo this will make a mess, so we double check\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dict of depot_path => user of any locked files.
def _find_locked_by(self): fstat_flags = NTR('otherLock | otherOpen0 & headType=*+l') any_locked_files = {} # depot_path : user for branch_chunk in self.ctx.iter_writable_branch_chunks(): # Skip any newly defined branches: they're new, won't contain any # files yet, and ...
[ "def GetUserLockName():\r\n \r\n repositoryName = getRemotePath() #It must returns path to the remote repository\r\n return os.path.join(repositoryName, 'user_lock.pickle')", "def pipfile_lock_names(self):\n return ext_split(self.pipfile_locks, \"Pipfile.lock\")", "def list_locks(root=None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the entire sequence of commits will (likely) go through without any errors related to permissions or locks. Raises an exception if anything goes wrong.
def check_commits(self, commits): LOG.info('Checking Perforce permissions and locks') self.ctx.checkpoint("copy_to_p4._preflight_check") # Stop if files are opened in our repo client # We expect this to be none, since we have the view lock opened = self.ctx.p4.run(['opened', '-m...
[ "def commit_unless_managed(self):\n self.validate_thread_sharing()\n if not self.is_managed():\n self._commit()\n self.clean_savepoints()\n else:\n self.set_dirty()", "def commit(self):\n if FORCE_COMMIT_FAILURE:\n raise psycopg2.DatabaseErro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prior to copying a commit, perform a set of checks for a specific branch to ensure the commit will (likely) go through successfully.
def check_commit_for_branch( self , commit , branch_id , any_locked_files , case_conflict_checker ): rev = commit['sha1'] if LOG.isEnabledFor(logging.DEBUG): LOG.debug(...
[ "def ensure_branch_preflight(self, commit, branch_id):\n log = LOG.getChild('ensure_branch_preflight')\n branch = self.ctx.branch_dict().get(branch_id)\n # branch should never be None here. p4gf_branch_id.Assigner() must\n # create Branch objects for each assignment.\n\n if s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If not already switched to and synced to the correct branch for the given commit, do so. If this is a new lightweight branch, perform whatever creation we can do at preflight time. We don't have commits/marks for any notyetsubmitted parent commits, so the depot_branch_info will often lack a correct parent or fully popu...
def ensure_branch_preflight(self, commit, branch_id): log = LOG.getChild('ensure_branch_preflight') branch = self.ctx.branch_dict().get(branch_id) # branch should never be None here. p4gf_branch_id.Assigner() must # create Branch objects for each assignment. if self._curren...
[ "def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does this branch map our placeholder file? Returns nonFalse if mapped, None or empty string if not.
def _is_placeholder_mapped(self): return self.ctx.gwt_path( p4gf_const.P4GF_EMPTY_CHANGELIST_PLACEHOLDER).to_depot()
[ "def is_map(filename):\n\n return is_regex(filename, MAP_REGEX)", "def map_image_exists(map_code):\n file_path = get_file_path(map_code)\n return os.path.isfile(file_path)", "def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.name\n elif hasattr(f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If any of the files in this commit intersect any fully populated branch (other than the current branch), then reject this commit. Shared/common/overlapping paths in branch views must be readonly from Git. Otherwise you end up with a Git push of commit on one Git branch inserting changes into other Git branches behind G...
def _check_overlap(self, fe_commit): # +++ Avoid O(b branches * r rev) checks when # overlap is impossible because current branch # overlaps no other branch. if self._current_branch not in self._overlapping_branch_list(): ...
[ "def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If this is a stream branch, check that all files in the commit are writable. If any of the files is not writable then reject this commit.
def _check_stream_writable(self, fe_commit): if not self._current_branch.stream_name: return prefix = self._current_branch.writable_stream_name + '/' for fe_file in fe_commit['files']: gwt_path = fe_file['path'] depot_path = self.ctx.gwt_path(gwt_path).to_de...
[ "def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If this is a classic branch, check that none of the files in the commit are in stream depots and thus not writable. If any of the files is not writable then reject this commit.
def _check_stream_in_classic(self, fe_commit): if self._current_branch.stream_name: return depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$') for fe_file in fe_commit['files']: gwt_path = fe_file['path'] depot_path = self.ctx.gwt_path(gwt_path).to_depot() ...
[ "def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the named path was introduced in the HEAD commit.
def _path_added(self, path, fecommit): # Because git-fast-export includes the entire tree in its output, # regardless of whether the requested commit is the first in the # branch or not, we need to check the repo itself to be certain if # this path was truly introduced in this commit, or...
[ "def exists(self, path):\n\n if path == self.repo_tree.path:\n return True\n\n for e in self.repo_tree.traverse():\n if e.path == path:\n return True\n\n return False", "def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
We have changed our branch_dict (or more likely finish_branch_definition()ed a branch within that dict) in a way that invalidates any cached calculations that consumed the branch dict.
def _invalidate_branch_cache(self): self._cached_overlapping_branch_list = None
[ "def __neg__(self):\n for branch in self.branches:\n key = branch.fullname\n del self._metastore._store[key]", "def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v for k, v in d_tree.items() if v}\n # By creating a new binding for 'd_tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of fully populated branches that overlap other fully populated branches. Caches the result because we check every file revision path for overlap, and for huge repos with thousands of nonoverlapping LW branches, just iterating through the branch list starts to waste measurable CPU time.
def _overlapping_branch_list(self): if self._cached_overlapping_branch_list is not None: return self._cached_overlapping_branch_list have_overlap = set() for outer in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()): outer_lhs = P4.Map() outer_lhs.inse...
[ "def branches(self):\n return self.get_branches(\n include_except_branches=False,\n include_reraise_branches=False)", "def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If gfe_file is under Git LFS control, require that its large file content exist somewhere, either in our upload cache (it's new!) or in depot dedupe storage (already got it).
def _check_lfs(self, fe_commit, fe_file): # Deleted files carry no LFS pointer. if "sha1" not in fe_file: return # Symlinks and non-files carry no LFS pointer. if fe_file.get("mode") not in [ FileModeStr.PLAIN ...
[ "def test_file_too_large(db, tmpdir, settings, permission_client):\n user, client = permission_client([\"change_datatemplate\"])\n settings.FILE_MAX_SIZE = 1\n\n # Make temp file and upload it\n fields_file = os.path.join(tmpdir.mkdir(\"test\"), \"fields.json\")\n with open(fields_file, \"w+\") as js...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Init view map for client.
def init_view(self): self.view_map = self.ctx.clientmap
[ "def map_viewing_client():\n\n # Read configuration settings\n config = gis.get_config()\n if config.opt_gis_layout == 1:\n window = True\n else:\n window = False\n\n # @ToDo Make Configurable\n toolbar = True\n\n map = define_map(window=window, toolbar=toolbar, config=config)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run list of paths through filter and set list of paths that don't pass.
def filter_paths(self, blobs): # check against one map for read, one for write # if check fails, figure out if it was the view map or the protects # that caused the problem and report accordingly self.author_denied = [] self.pusher_denied = [] self.foruser_denied = [] ...
[ "def filter_paths(unique_paths):\n contains = re.compile(r\"work_version/xbox/dlc/.+\")\n restrict = re.compile(r\"work_version/xbox/dlc/.+/out$\")\n filtered_paths = unique_paths.copy()\n\n for path in unique_paths:\n if not contains.search(path) or restrict.search(path):\n print(f'[i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the given message to the error stream, as well as to the log.
def _print_error(msg): sys.stderr.write(msg + '\n') LOG.error(msg)
[ "def error(message):\n print(message, file=sys.stderr)", "def log_error(self, message):\n u = six.text_type\n log_line = (\n u('{0:%Y-%m-%d %H:%M:%S} [FALCON] [ERROR] {1} {2}?{3} => {4}\\n').\n format(datetime.now(), self.method, self.path, self.query_string,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if c will be rejected by P4D as nonprintable. P4D rejects "nonprintable" characters with
def is_p4d_printable(c): if ord(c) < 0x20: return False if ord(c) == 0x7F: return False return True
[ "def is_printable(c):\n return ord(c)>=32 or c in ['\\r','\\n', '\\t']", "def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "def _is_printable(char):\n category = unicodedata.category(char)\n return (not category.startswith(\"C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function is meant to make use of the sqoop export functionality to export data from hive to mysql db.
def hive_to_mysql(hive_conn, username, password, host, port, export_dir, table_name): # the input fields terminated by parameter is to specify os.system("sqoop export --connect jdbc:mysql://{0}:{1}/hive --username " \ "{2} --password {3} --table {4} --export-dir {5} --input-fields-terminated-...
[ "def export_to_hive(self, hive_table_name):\n self._scala.exportToHive(hive_table_name)", "def export():\n tables = [\"montreal_slots\", \"quebec_slots\", \"newyork_slots\", \"seattle_slots\", \"boston_slots\",\n \"cities\", \"city_assets\", \"parking_lots\", \"rules\", \"permits\"]\n\n Logger.inf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create new map item
def create(self, mapItem: MapItem) -> int: pass
[ "def new_map(self):\n self.wizard = NewMap(self)", "def newMap( capacity=17, prime=109345121, maptype='CHAINING') :\n return ht.newMap (capacity, prime, maptype)", "def new_item(source_name, item):\n\t# id is required\n\tif 'id' not in item:\n\t\traise Exception(f'Cannot create item with no id. Value ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update map item in database
def update(self, mapItem: MapItem): pass
[ "def update_item(self, key, item):\n self.dict[key]= item", "def update(self, mapper_info: dict):\n self.update_from_dict(\n [\n \"form_id\",\n \"form_name\",\n \"form_revision_number\",\n \"process_key\",\n \"proc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete Map item from database and all his translates
def delete(self, mapitem_id: int): pass
[ "async def delete_map(map_id: str):\n query = (mindmap\n .delete()\n .where(mindmap.c.id == map_id)\n )\n await database.execute(query=query)", "async def delete_map(self, ctx, name):\n await ctx.send(get_game(ctx).delete_map(name))", "def deleteMapTable(self, na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get map item from database
def get(self, mapitem_id: int) -> MapItem: pass
[ "def __getitem__(self, key):\n for db in self.db:\n if db.name == key:\n return db\n raise IndexError", "def _get_item(self, question_id):\n question_map = self._get_question_map(question_id) # Throws NotFound()\n real_question_id = Id(question_map['questionI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of map items for selected lang
def get_all(self, lang: str = None): pass
[ "def get_item_concept_mapping(self, lang):\n concepts = self.filter(active=True, lang=lang)\n return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))", "def get_langs(id):", "def getLanguage():\n con = db.getDBConnection()\n sql = 'select distinct language ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test an execution plan with downloadable git files
def test_execution_plan_type_downloable_git(self, mock_makedir, mock_path, mock_git): mock_makedir.return_value = None mock_path.return_value = True mock_git.clone.return_value = None template = self.get_template_downloable_git() fi...
[ "def test_download_deployment_run_test_report(self):\n pass", "def test_provider_project_development_snapshot_download(self):\n pass", "def calc_test(commits, author):\n\topen('modifications.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test an execution plan with svn files.
def test_execution_plan_type_svn(self, mock_makedir, mock_subproc_popen): process_mock = mock.Mock() attrs = {'communicate.return_value': ('ouput', 'ok'), 'poll.return_value': 0} process_mock.configure_mock(**attrs) mock_subproc_popen.return_value = process_mock ...
[ "def test_get_file_with_svn_and_revision(self):\n self._test_get_file(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123')", "def test_get_file_with_svn_and_base_commit_id(self):\n self._test_get_file(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to convert padding format for pad operator.
def transform_padding(pad_width): num_pad_values = len(pad_width) onnx_pad_width = [0]*num_pad_values start_index = 0 # num_pad_values will always be multiple of 2 end_index = int(num_pad_values/2) for idx in range(0, num_pad_values): if idx % 2 == 0: onnx_pad_width[start_in...
[ "def _make_padding(self, op, padding, input_shape, kernel_shape, output_shape, strides):\n dim = len(padding)\n if dim == 4:\n padding = (padding[3], padding[1], padding[2], padding[0])\n elif dim == 3:\n padding = (False, padding[1], padding[2], padding[0])\n elif ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to convert a string version of Boolean attributes to integer for ONNX. Takes attribute dictionary and attr_name as parameters.
def get_boolean_attribute_value(attrs, attr_name): return 1 if attrs.get(attr_name, 0) in ["True", "1"] else 0
[ "def convert_to_intbool(val_str):\n return 1 if val_str == 'Yes' else 0", "def _bool_to_int(self, bool_arg):\n if bool_arg == True:\n return 1\n else:\n return 0", "def _convert_bool_to_int(tensor):\n if tensor.dtype == mstype.bool_:\n return tensor.astype(\"int3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to create a basic operator node that doesn't contain op specific attrs
def create_basic_op_node(op_name, node, kwargs): name, input_nodes, _ = get_inputs(node, kwargs) node = onnx.helper.make_node( op_name, input_nodes, [name], name=name ) return [node]
[ "def _generate_binary_expression(operator: BinaryOperator, docstring: str):\n\n class Expr(BinaryExpression):\n __doc__ = docstring\n\n def __init__(\n self, loc: Optional[SourceLocation], left: Expression, right: Expression\n ):\n super().__init__(loc, operator, left, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to convert weights and inputs.
def convert_weights_and_inputs(node, **kwargs): name, _, _ = get_inputs(node, kwargs) if kwargs["is_input"] is False: weights = kwargs["weights"] initializer = kwargs["initializer"] np_arr = weights[name] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype] dims...
[ "def _convert_trained_weights(**kwargs):", "def apply_weight(self, inputs):\n assert len(inputs) == self.size_inputs\n W = self.weights\n return [x * w for w, x in zip(inputs, W)]", "def convert(cls, op_name: str, params: Dict, weights: Dict = None):", "def normalize_network_input(self, n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Map MXNet's convolution operator attributes to onnx's Conv operator and return the created node.
def convert_convolution(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) ...
[ "def _create_conv_pool(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n k = [op.handle.kernel_h, op.handle.kernel_w]\n s = [op.handle.stride_h, op.handle.stride_w]\n oddp = op.odd_padding\n p = [\n op.handle.pad_h + oddp[0],\n op.h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }