query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Find a child relationship view matching the supplied nonNone arguments.
def find_relationship_view( self, *, relationship: Optional[Relationship] = None, description: Optional[str] = None, response: Optional[bool] = None, ) -> Optional[RelationshipView]: for view in self._relationship_views: rel = view.relationship ...
[ "def get_child(self, graph_id, child_id):\n queryset = self.get_child_qs(graph_id)\n child = get_object_or_404(queryset, id=child_id)\n return child", "def __getChildView(self, parentId, childSeq):\n # child_view = None\n # str_getChildView = \"self.vc.findViewById('\" + parentI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that an element can't be added if parent or children are in view.
def check_parent_and_children_not_in_view(self, element: Element) -> None: for view in self.element_views: if view.element in element.child_elements: raise ValueError(f"A child of {element.name} is already in this view.") if view.element is getattr(element, "parent", None...
[ "def hasParent(self):\n\t\traise Exception(\"Abstract method IElement.hasParent not implemented in: \" + str(self))", "def child_invalid(self):\n raise NotImplementedError(\n \"{} does not have implemented `child_invalid`\".format(self)\n )", "def _validate_on_set_parent(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements bilinear functions using replicated secret shares. Shares are input as ArithmeticSharedTensors and are replicated within this function to perform computations. The protocol used here is that of section 3.2 of ABY3
def __replicated_secret_sharing_protocol(op, x, y, *args, **kwargs): assert op in { "mul", "matmul", "conv1d", "conv2d", "conv_transpose1d", "conv_transpose2d", } x_shares, y_shares = replicate_shares([x.share, y.share]) x1, x2 = x_shares y1, y2 = y_sh...
[ "def fuse_tg_nd(\n image_a: xpArray,\n image_b: xpArray,\n downscale: Optional[int] = 2,\n sharpness: Optional[float] = 24,\n tenengrad_smoothing: Optional[int] = 4,\n blend_map_smoothing: Optional[int] = 10,\n bias_axis: Optional[int] = None,\n bias_exponent: Optional[float] = 3,\n bias_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a new client to the database
def add_client(name): return create_client(name)
[ "def add_client(self, client: Client):\n try:\n query = 'INSERT INTO clients(ID, Name, PublicKey, LastSeen) VALUES(?,?,?,?)'\n params = (client.id, client.name, client.public_key, client.last_seen)\n\n self.cursor.execute(query, params)\n self.db_connection.commit(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Let's public and registered user submit a support ticket
def support_submit_ticket(self, **kw): person_name = "" if http.request.env.user.name != "Public user": person_name = http.request.env.user.name pic = http.request.env['schedule.person.in.charge'].sudo().search([],order = 'end_date desc',limit=1).pic return http.request.r...
[ "def support_submit_ticket(self, **kw):\n first_name = \"\"\n if http.request.env.user.name != \"Public user\":\n first_name = http.request.env.user.name\n\n setting_max_ticket_attachments = request.env['ir.values'].get_default('website.support.settings',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new session and persist it according to its username and token values.
def _new_session(self, username_key=None, **attributes): for key in ['username', 'token', 'tenant_id']: if attributes.get(key, None) is None: attributes[key] = key + "_" + text_type(uuid4()) if 'expires' not in attributes: attributes['expires'] = ( ...
[ "def _create_new_session_token(self):\n session_token = self.__generate_session_token()\n payload = {\n 'token' : session_token\n }\n self.encoded_token = jwt.encode(payload, 'secret', algorithm='HS256')\n Token.objects.create(session_token=session_token)", "def creat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the name of a region and a mimic internal service ID, get a resource for that service.
def service_with_region(self, region_name, service_id, base_uri): key = (region_name, service_id) if key in self.uri_prefixes: return self.uri_prefixes[key].resource_for_region( self.uri_for_service(region_name, service_id, base_uri))
[ "def __get_resource(service, region):\n if region is None:\n region = aws_region.get_region()\n return boto3.resource(service, region_name=region)", "def service_with_region(self, region_name, service_id, base_uri):\n if service_id in self._uuid_to_api:\n api = self._uuid_to_api[ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a URI prefix for a given region and service ID.
def uri_for_service(self, region, service_id, base_uri): return str(URLPath.fromString(base_uri) .child("service").child(region).child(service_id).child(""))
[ "def uri_for_service(self, region, service_id, base_uri):\n return str(URLPath.fromString(base_uri)\n .child(b\"mimicking\").child(service_id.encode(\"utf-8\"))\n .child(region.encode(\"utf-8\")).child(b\"\"))", "def service_with_region(self, region_name, service_id, bas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_time_period takes in a data frame and calculates the time period (ThanksGiving, WinterBreak, SummerBreak or Not Holiday)for each record returns a modified data frame
def set_time_period(df): for i in range(0,len(df['Day'])): if (((df.loc[i,'Month']==11) & (df.loc[i,'Day']<=30) & (df.loc[i,'Day']>=27))\ | ((df.loc[i,'Month']==12) & (df.loc[i,'Day']<=3) & (df.loc[i,'Day']>=1)) ): df.loc[i,'Holiday'] = 'ThanksGiving' TGDate = df.loc[i,'w...
[ "def add_time_period(df):\n\n # determine in which half hour period of the day the \n # predicted time of arrival falls\n\n interval = df.iloc[0].planned_arrival // 1800 \n\n # find string representation of period from dict. mapping (top)\n\n inverval_string = interval_map[interval]\n\n # add the ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
anova_analysis takes in a data frame and performs an anova test for hypothesis testing 1 prints out the test results
def anova_analysis(df): time_periods = df.groupby(['week_ending','Holiday'],as_index = False)[['seats_sold']].sum() TG = time_periods.loc[time_periods['Holiday'] == 'ThanksGiving','seats_sold'] WB = time_periods.loc[time_periods['Holiday'] == 'WinterBreak','seats_sold'] SB = time_periods.loc[time_period...
[ "def testANOVA():\n\n data = {'Control': [54, 23, 45, 54, 45, 47], 'Treated': [87, 98, 64, 77, 89],\n 'TreatedAntagonist': [45, 39, 51, 49, 50, 55]}\n print(type(data))\n OneWayAnova(dataStruct=data, dataLabel='3 Groups', mode='parametric')\n print ('-'*80)\n print ('Compare to Prism output: ')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test2 takes in the grosses data set and the rating data set prepares the data performs a logistic regression to test the hypothesis 2 prints out the regression results
def hypoTest2(df, rt): from sklearn import preprocessing import statsmodels.api as sm gs = df ratings = rt # limit the time scope to recent 5 years testData2 = gs[gs['year']>=2015] testData2 = testData2[['show','year','month','this_week_gross']] # calculate avg weekly grosses mean (b...
[ "def test_g2(self):\n self.setUp()\n theta = self.data.theta\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n gamma_0, gamma_1 = theta[\"gamma_0\"], theta[\"gamma_1\"]\n g2 = self.E_func.g2(self.S, gamma_0, beta_0, gamma_1, beta_1)\n # values of g2 at first group a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add model parameters in the modelica file and create dictionary of model parameters This function extract model and subckt information along with their parameters with the help of optionInfo
def addModel(self,optionInfo): modelName = [] modelInfo = {} subcktName = [] paramInfo = [] transInfo = {} for eachline in optionInfo: words = eachline.split() if words[0] == '.include': name = words[1].split('.') ...
[ "def create_modelinfo(mi):\n from modelinfo import modelcfg\n\n modelinfocfg = {k: mi[k] if k in mi else modelcfg[k] for k in modelcfg.keys()}\n timestamp = common.timestamp()\n modelinfocfg['problem_id'] = 'rld'\n modelinfocfg['rel_num'] = modelinfocfg['timestamp'] = timestamp\n modelinfocfg['model_info'] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
separate print plot and component statements
def separatePlot(self,schematicInfo): compInfo = [] plotInfo = [] for eachline in schematicInfo: words = eachline.split() if words[0] == 'run': continue elif words[0] == 'plot' or words[0] == 'print': plotInfo.append(eachline) ...
[ "def visualize():", "def printParticle(self):\n print(self.barcode,',',self.vertices,',',self.mangle,',',self.charge, sep='')", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def view(self):\n import matplotlib.pyplot as plt\n for sp in range(self.nspecies):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take subcircuit name and give the info related to parameters in the first line and initislise it in
def getSubParamLine(self,subname, numNodesSub, subParamInfo,dir_name): #nodeSubInterface = [] subOptionInfo_p = [] subSchemInfo_p = [] filename_t = subname + '.sub' filename_t = os.path.join(dir_name, filename_t) data_p = self.readNetlist(filename_t) subOptionInfo...
[ "def append_subcircuit(self, lines: Tuple[int, int], circuit: Circuit, content: str) -> None:", "def __init__(self,\n name,\n vertices_location,\n connectivity,\n connectivity_ids=None,\n connectivity_label=None,\n connectivity_label_metadata...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
separate the node numbers and create nodes in modelica file; the nodes in the subckt line should not be inside protected keyword. pinInit is the one that goes under protected keyword.
def nodeSeparate(self,compInfo, ifSub, subname, subcktName,numNodesSub): node = [] nodeTemp = [] nodeDic = {} pinInit = 'Modelica.Electrical.Analog.Interfaces.Pin ' pinProtectedInit = 'Modelica.Electrical.Analog.Interfaces.Pin ' protectedNode = [] print "CompInfo ...
[ "def init_nodes(self):\n self.nodes = {}\n app.logger.warning(\"init LogViewer\")\n with open(\"./config\",\"r\") as f:\n self.nodes_addrs = json.loads(f.read().replace('\\n',''))\n for node in self.nodes_addrs:\n self.create_node(node)", "def kernel_main_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It is main function of module Ngspice to Modelica converter
def main(args): if len(sys.argv) == 2: filename = sys.argv[1] else: print "USAGE:" print "python NgspicetoModelica.py <filename>" sys.exit() dir_name = os.path.dirname(os.path.realpath(filename)) file_basename = os.path.basename(filename) obj_NgMoConvert...
[ "def main():\n # type: () -> None\n from .modelinfo import make_model_info\n\n if len(sys.argv) <= 1:\n print(\"usage: python -m sasmodels.generate modelname\")\n else:\n name = sys.argv[1]\n kernel_module = load_kernel_module(name)\n model_info = make_model_info(kernel_modul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads the given filename found at filepath with the preferred options. Accepts CSV, XLS, XLSX, HTML, and XML.
def load(path, directory=None): if directory is not None: path = os.path.join(directory, path) _, ext = os.path.splitext(path) if ext.lower() == '.csv': try: df = pd.read_csv(path, index_col=None, dtype=object) return df except: df = pd.read_csv...
[ "def load_file(self, **kwargs):\n csv.register_dialect('withPipe', delimiter='|', quoting=csv.QUOTE_NONE)\n csv.register_dialect('withSemi', delimiter=';', quoting=csv.QUOTE_NONE)\n\n if self.extension == '.txt' or self.extension == '':\n\n try:\n file = open(self.full...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts all xls files at the given path to CSV files and outputs them into a directory named after the file. If the xls file has sheets, those are also outputted to the new directory.
def csv_from_excel(path=os.getcwd()): path = path + '/*.xls*' files = glob.glob(path) for i in files: file = os.path.basename(i) filename = os.path.splitext(file)[0] xls_file = pd.ExcelFile(i, index_col=None, dtype=object) if len(xls_file.sheet_names) > 1: try: ...
[ "def split_excel_files(self):\n for x in self.files:\n if x[-4:] not in [\".xls\", \"xlsx\"]:\n continue\n else:\n files = pd.read_excel(x, sheet_name=None)\n for k, v in files.items():\n #get name with the extension stripp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Distributes values in ranks column into columns created based on unique values in programs column.
def fix_ranks(df, ranks='Current Ranks', programs='Programs'): # Create columns based on unique program values if programs in df: unique_programs = set(df[programs].unique()) if np.nan in unique_programs: unique_programs.remove(np.nan) for value in unique_programs.copy(): ...
[ "def ranksByPrograms(self, ranks, programs):\n\n if ranks == '' or programs == '':\n self.parent.window().notifyUser(\"One or more of the selections are invalid.\")\n\n elif ranks is None or programs is None:\n pass\n\n else:\n self.df = procedures.fix_ranks(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Distributes the values in the phones column based on the identifier given in parentheses into either Home, Mobile, or Work.
def split_phones(df, column): df['Work'] = df[column].str.extract(r'(...-...-....)\(W\)', expand=True) df['Mobile'] = df[column].str.extract(r'(...-...-....)\(M\)', expand=True) df['Mobile 2'] = df[column].str.extract(r'...-...-....\(M\).*?(...-...-....)\(M\)', expand=True) df['Mobile 3'] = df[column]....
[ "def home_phones(self):\n if \"homePhones\" in self._prop_dict:\n return self._prop_dict[\"homePhones\"]\n else:\n return None", "def align_device_type(connected_devices_df):\n \n connected_devices_df['deviceSubtype_mod'] = connected_devices_df['deviceSubtype']\n conne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits the comma separated values in the emails column into a maximum of 3 different columns.
def split_emails(df, column): df['Email'] = df[column].str.extract(r'(.*?@.*?\....),?', expand=True) df['Email 2'] = df[column].str.extract(r'.*@.*\....,\s?(.*@.*\....)', expand=True) df['Email 3'] = df[column].str.extract(r'.*@.*\....,\s?.*@.*\....,\s?(.*@.*\....)', expand=True) return df
[ "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "def _parse_emails(self, emails):\n return [e.strip() for e in emails.split(',')]", "def _email_sequence(emails):\n if isinstance(emails, six.string_types):\n return [email_address.stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes all leading and trailing whitespace. Replaces all newlines, carriage returns, and invisible tabbreaks with a space. \n If a column isn't specified, it acts on the entire dataframe.
def strip_whitespace(df, column=None): if column is None: for x in df.columns: if df[x].dtypes == object: df[x] = pd.core.strings.str_strip(df[x]) df[x] = df[x].str.replace('\n', '') df[x] = df[x].str.replace(r'\r', ' ', regex=True) ...
[ "def strip_columns(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)", "def strip_extra_spaces_and_newline_characters_in_column_names(\n self,\n df\n ):\n return df.rename(columns=lambda x: x.strip())", "def StringStrip(df)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits a column of comma separated values into their own rows with values identical to the original.
def tidy_split(df, column='Members', sep=', '): indexes = [] new_values = [] for i, presplit in enumerate(df[column].astype(str)): for value in presplit.split(sep): indexes.append(i) new_values.append(value) new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a wa...
[ "def row_splitter(row: str) -> List[str]:\n return row[:-1].split(',')", "def split_column(df,col_name,reg_ex=',',keep=False):\n # https://stackoverflow.com/a/51680292/5847441\n df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\\\n .select(col_name,concat(lit(col_name)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over a dataframe and drops all rows that contain quotes as part of the string.
def drop_quote_rows(df): for i in df.columns.values: if df[i].dtype != 'datetime64[ns]' and df[i].dtype != 'float64': df = df[~df[i].str.contains('"', na=False)] return df
[ "def test_replace_quotes():\n df_result_quotes = replace_quotes(output_not_cleaned)\n assert '\"' not in df_result_quotes.values\n print('TEST PASSED, for replace_quotes')", "def StringStrip(df):\n ### Columnas\n\n df.columns = [col.strip() for col in df.columns]\n\n ### Observacion...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Chooses the date closest to the given date in a given pandas series in the given direction.
def closest_date(series, date=pd.to_datetime('today'), period='future'): x = series.copy() x = x.append(pd.Series(date, index=[len(x.index)])) x = x.ix[pd.to_datetime(x).sort_values().index] x = x.reset_index(drop=True) index_today = x[x == date].head(1) if period == 'future': if x.tail...
[ "def nearestBusinessDay(target_date, prefer_forward = True, cal=WEEKEND_CALENDAR):\n if cal.isBusinessDay(target_date):\n return target_date\n \n one_day = timedelta(1)\n forward_date = target_date + one_day\n backward_date = target_date - one_day\n \n while True:\n is_forward_ok ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function call for every view before Django choose witch view would be called. function ask user`s browser for Negotiate token
def process_request(self, request, *args, **kwargs): if not settings.GSSAPI_ENABLED_OPTION: return None unauthorized = False if 'HTTP_AUTHORIZATION' in request.META: kind, initial_client_token = request.META['HTTP_AUTHORIZATION'].split(' ', 1) if kind != 'Nego...
[ "def devpiserver_authcheck_always_ok(request):", "def lesson_auth(request):", "def token_required(view_func):\n\n def _parse_auth_header(auth_header):\n \"\"\"\n Parse the `Authorization` header\n\n Expected format: `WATCHMAN-TOKEN Token=\"ABC123\"`\n \"\"\"\n\n # TODO: Fig...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a dataset from the given parameters. Note that despite the type hinting, ``domain`` is only semioptional, as a domain is required to create a
async def create_dataset(self, dataset_name: str, category: DataCategory, domain: Optional[DataDomain] = None, **kwargs) -> bool: # If a domain wasn't passed, generate one from the kwargs, or raise and exception if we can't if domain is None: data_format = kwargs...
[ "def synthetic(domain, N):\n arr = [np.random.randint(low=0, high=n, size=N) for n in domain.shape]\n values = np.array(arr).T\n df = pd.DataFrame(values, columns = domain.attrs)\n return Dataset(df, domain)", "def CreateDataset(all_arrays):\n dataset = Dataset()\n\n dataset._add...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request the status of the provided job, represented in string form.
async def request_job_status(self, job_id: str, *args, **kwargs) -> str: # TODO: implement raise NotImplementedError('{} function "request_job_status" not implemented yet'.format(self.__class__.__name__))
[ "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def job_status(self, job_id):\n return self.api_client.job(job_id).status()", "def get_job_status(jobid, wait=30):\n cmd = \"scontrol show job {0}\".format(jobid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request the provided job be stopped; i.e., transitioned to the ``STOPPED`` exec step.
async def request_job_stop(self, job_id: str, *args, **kwargs) -> bool: # TODO: implement raise NotImplementedError('{} function "request_job_stop" not implemented yet'.format(self.__class__.__name__))
[ "def stop_job(self, job_handle: JobHandle, job_runtime_env: JobRuntimeEnv):\n pass", "def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)", "def stop_training_job(TrainingJobName=None):\n pass", "def stopped(self):\n self.status_changed()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a blockchain file and store blocks into a list
def loadBlockchain(path): list = [] filename = Blockchain(0, '0', 0, 0, 0).getFilename() f = open(path) for line in f: if line == '\n': continue linesplit = line[:-1].split(';') b = Blockchain(int(linesplit[0]), linesplit[1], int(linesplit[2]), int(linesplit[3...
[ "def load_data(self):\n try:\n with open(\"blockchain.txt\", mode=\"r\") as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n # OrderedDict\n updated_blockchain = []\n for block in blockcha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a block in string to a block object
def stringToBlock(strBlock): blocksplit = strBlock.split(';') block = Blockchain(int(blocksplit[0]), blocksplit[1], int(blocksplit[2]), int(blocksplit[3]) ,blocksplit[4]) return block
[ "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n opti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creat a block for a trascation
def newBlock(preBlock, remitter, number, payee): index = preBlock.index + 1 timestamp = int(round(time.time() * 1000)) data = (remitter, number, payee).__str__() previousHash = preBlock.hash nounce = 0 return Blockchain(index, data, timestamp, nounce, previousHash)
[ "def _create_block(self, data):\n raise NotImplemented()", "def create_block(self):\r\n self.current_block = self.blocks[\r\n random.randint(0, len(self.blocks) - 1)](self.width/2, 0)", "def __create_block(self, blocknode):\n block_name = blocknode.attributes['id'].value\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function handles the /practices/create endpoint for the blueprint
def practices_create(): practice = Practice() form = PracticeCreateForm() if form.validate_on_submit(): form.populate_obj(practice) db.session.add(practice) db.session.commit() return redirect(url_for('practices.home')) return render_template('practices/create.html', for...
[ "def create_book():\n book_data = request.get_json()\n return flaskify(book_model.create_book(book_data))", "def create_doctor():\n first_name = request.json['first_name']\n second_name = request.json['second_name']\n last_name = request.json['last_name']\n email = request.json['email']\n spe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator for sequence. Will return num equally spaced items from the sequence.
def takespread(sequence, num): length = float(len(sequence)) for i in range(num): yield sequence[int(np.ceil(i * length / num))]
[ "def c(sequence):\n Debugger.starts += 1\n for item in sequence:\n Debugger.items += 1\n yield item", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def c(sequence):\n c.sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an object (or an object of the given class) with the given name to the runtime object.
def add_object(self, name, cls_or_object): if self._internal.is_node_registered_within_endpoint(): raise CloudioModificationException('A CloudioRuntimeObject\'s structure can only be modified before' + ' it is registered within the endpoint!') ...
[ "def addObject(self, name, clsOrObject):\n if self.isNodeRegisteredWithinEndpoint():\n raise RuntimeError(u'A CloudioRuntimeNode\\'s structure can only be modified before it is registered within' +\n u' the endpoint!')\n\n # Check if parameter is a class\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter input api request
def _filter_in_request(self): pass
[ "def filter(self, data):\n pass", "def _filter(self):", "def filter(self, inputs):\n pass", "def filter(ctx):\n pass", "def filterRansac():\n pass", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def filter(self, filters):", "def extract_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a QuerySet of current objects related to ``objs`` via the relation ``related``.
def related_objects(self, related, objs): from versions.models import Versionable related_model = related.related_model if issubclass(related_model, Versionable): qs = related_model.objects.current else: qs = related_model._base_manager.all() return qs.us...
[ "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def get_related_objects(self):\n result = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add user to db. User details (name, pass, phone) is in json_details
def add_user_to_db(json_details): return True
[ "def add_user(self, data):\n pwd = generate_password_hash(data['password'], method='sha256')\n sql = \"\"\"INSERT INTO users(username, firstname, lastname, contact, email, password, usertype)VALUES(%s, %s, %s, %s, %s, %s, %s)\"\"\"\n self.cursor.execute(sql, (data['username'], data['firstname']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeats a command a specified number of times.
async def do(ctx, times : int, *, command): msg = copy.copy(ctx.message) msg.content = command for i in range(times): await bot.process_commands(msg)
[ "async def repeat(times : int, content='repeating...'):\r\n for i in range(times):\r\n await bot.say(content)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "async def repeat(self,ctx, times: int, content='repeating...'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gives a URL to the current bot changelog.
async def changelog(): await bot.say('https://discord.gg/y2PcWMM')
[ "def get_changelog_url(repository_url, branch):\n changelog_url = f\"{repository_url}/blob/{branch}/CHANGES.txt\"\n requests_var = requests.get(changelog_url, timeout=30)\n if requests_var.status_code != 200:\n raise RuntimeError(f\"Page at URL {changelog_url} not found\")\n\n return changelog_ur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download raw REPY data from Technion, convert it to almostunicode (numbers are reversed)
def repy_data(): REPY_URI = "http://ug.technion.ac.il/rep/REPFILE.zip" if prefs.options.usecache: t = open("REPFILE.zip") else: try: t = tempfile.TemporaryFile() t.write(urllib.urlopen(REPY_URI).read()) except: ttime.warning(_("Network download of ...
[ "def raw(self, txt_unRaw):", "def decode(self, encoded):", "def download_to_utf_string(url: str) -> str:\n request = get(url)\n content = request.content.decode(\"utf-8\")\n return content", "def Decode(self, encoded_data):", "def _trans_bytes(data):\n if platforms.is_py2():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert time in seconds from 1998.1.1 to fractional year
def sectoFracYear(stime): ltime = convertCtimeToYdate(stime) atemp = re.split(':', ltime) year= int(atemp[0]) ydate = int(atemp[1]) hours = int(atemp[2]) minutes = int(atemp[3]) seconds = int(atemp[4]) chk = 4.0 * int(0.25 * year) if chk == year: base = 366 ...
[ "def convertToYearDate(val):\n\n ntime = tcnv.convertCtimeToYdate(val)\n\n btemp = re.split(':', ntime)\n year = float(btemp[0])\n ydate = float(btemp[1])\n hour = float(btemp[2])\n mins = float(btemp[3])\n\n chk = int(0.25 * year)\n if chk == year:\n base = 366\n else: \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize summary logger (if needed).
def initialize_summary(self): if self.need_logs: self.summary_writer = tf.summary.create_file_writer(self.log_dir) if self.verbose > 0: full_log_path = os.path.abspath(self.log_dir) print('Initialize logs, use: \ntensorboard --logdir={}'.format(full_log_pa...
[ "def __init__(self, log_dir):\n self.writer = SummaryWriter(log_dir)", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def _init_logs(self):\n log_utils.set_is_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get operation profiling info.
def get_profile_op_info(): profiler_dir = get_profiler_dir(request) train_id = get_train_id(request) if not profiler_dir or not train_id: raise ParamValueError("No profiler_dir or train_id.") search_condition = request.stream.read() try: search_condition = json.loads(search_...
[ "def get_profiling(self):\n return self.net.get_profiling()", "def get_profile_op_info():\n profiler_dir = request.args.get('profile')\n train_id = request.args.get('train_id')\n if not profiler_dir or not train_id:\n raise ParamValueError(\"No profiler_dir or train_id.\")\n\n search_con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get profile device list.
def get_profile_device_list(): profiler_dir = get_profiler_dir(request) train_id = get_train_id(request) if not profiler_dir or not train_id: raise ParamValueError("No profiler_dir or train_id.") profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler_dir) try: ...
[ "def get_profile_device_list():\n profiler_dir = request.args.get('profile')\n train_id = request.args.get('train_id')\n if not profiler_dir or not train_id:\n raise ParamValueError(\"No profiler_dir or train_id.\")\n\n profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the image without atomic prefixes used to map to skopeo args.
def image(self): image = self._image for remove in ('oci:', 'http:', 'https:'): if image.startswith(remove): image = image.replace(remove, '') return image
[ "def no_bin(image, *args, **kwargs):\n return image", "def no_img(texto):\n return sub_no_img.sub(\"\", texto)", "def image_cleaner(self) -> Optional[pulumi.Input['ManagedClusterSecurityProfileImageCleanerArgs']]:\n return pulumi.get(self, \"image_cleaner\")", "def get_image_hdu():\n raise Not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute an action on the signin API.
def _action(self, action, data=None, api="signin"): if not data: data = {} data['action'] = action # data['redirect_uri'] = self._REDIRECT_URL data['csrf'] = self._csrf_token() print(data) r = self.session()._post( "https://signin.aws.amazon.com...
[ "def signin_handler(self):\n __transaction = self._db.begin_nested()\n output = self.request_handler.handle_signin_request(\n flask.request.data, self.model.standard_lookup,\n self.model.authdata_lookup)\n __transaction.commit()\n return Response(output, 200, {\"Con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checksum for nmea string if there is no checksum present for simplified arduino input The check sum should be appended to the original NMEA sentence and return
def checkSum(nmea_string): # take string after $ nmea_str = re.sub(r'^\$(.*)$', r'\1', nmea_string) # clear whitespace nmea_str = re.sub(r'\s', '', nmea_str) checksum = 0 # initialize for b in nmea_str: checksum ^= ord(b) # xor # need to remove the front '0x' from the import hex...
[ "def calculate_check_sum(self):\n\n checksum = self.encoded_text[0]\n\n for index, char in enumerate(self.encoded_text):\n if index > 0:\n checksum += (index * char)\n\n return checksum % 103", "def checksum (upc):\n\n # check type of input\n # raise TypeError ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sign(x) = 1 when x >=0, sign(x) = 1 when x < 0
def sign(x): if x >= 0: return 1 else: return -1
[ "def sign(x):\n if x >= 0:\n return 1\n return -1", "def sign( x ):\n if( x >= 0 ):\n return 1\n else:\n return -1", "def sign(x):\n if x == 0:\n return 0\n elif x > 0:\n return 1\n else:\n return -1", "def sign(a):\n if a > 0:\n return 1\n elif ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
vector dot product, return a scalar x and y are two vectors with same length
def vector_dot(x, y): if(len(x) != len(y)): raise ValueError("vector lengths differ") else: # return x1*y1+x2*y2+...xn*yn return sum([x[i] * y[i] for i in range(len(x))])
[ "def dotproduct(x, y):\n return sum(vector_apply(lambda a, b: a * b, x, y))", "def dot_product(vec_a: Vector, vec_b: Vector) -> float:\n return vec_a * vec_b", "def dot(vec1, vec2):\n #return sum([i*j for i, j in zip(vec1, vec2)])\n # Faster if we know it is 3d only\n return vec1[0]*vec2[0] + vec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
vector cross product, return a vector x and y are two vectors with same length returned vector z is the same length as well this time only with dim=3
def vector_cross(x, y): if(len(x) != len(y)): raise ValueError("vector lengths differ") elif(len(x) > 3): raise ValueError("vector is more than 3D") else: s = [x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]] return s
[ "def cross_product(v1, v2):\r\n x3 = v1[1] * v2[2] - v2[1] * v1[2]\r\n y3 = -(v1[0] * v2[2] - v2[0] * v1[2])\r\n z3 = v1[0] * v2[1] - v2[0] * v1[1]\r\n return [x3, y3, z3]", "def cross(x, y):\n x = x.reshape(3)\n y = y.reshape(3)\n z = np.cross(x, y)\n z = z.reshape((3, 1))\n return z",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
data acquisition of accelerator and magentometer data, LSM303D datasheet
def LSM_acquisition(add): # control register CTRL0 = 0x1f # p.34, accelerator CTRL1 = 0x20 CTRL2 = 0x21 CTRL5 = 0x24 # p.36, magnetic CTRL6 = 0x25 CTRL7 = 0x26 FIFO_CTRL = 0x2e # p.40 # accelerater OUT_X_L_A = 0x28 OUT_X_H_A = 0x29 OUT_Y_L_A = 0x2a OUT_Y_H_A = 0x2b...
[ "def do_acquisition(self):\n self.arm()\n\n self.daq_module.execute()\n\n self.fn_start()\n\n while not self.daq_module.finished():\n time.sleep(0.01)\n\n results = self.daq_module.read(True)\n data = np.array([results[str.lower(signal)][0]['value'][0] for signal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read the LSM value for certain time and return the value and write to offset register, make sure the chip is leveled and calm lsm arduino library
def LSM_offset(add, timer_out=1000): # initialize timer = 0 m_min = [32767, 32767, 32767] m_max = [-32768, -32768, -32768] # lsm reading lsm_result = LSM_acquisition(add) # loop to update min and max while timer <= timer_out: m = lsm_result.next()[3:6] m_min = [min(m_min...
[ "def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete URI and return the number of bytes deleted.
def delete_uri( self, uri: str, logger: Optional[logging.Logger] = default_logger ) -> int: local_dir = get_local_dir_from_uri(uri, self._resources_dir) local_dir_size = get_directory_size_bytes(local_dir) deleted = delete_package(uri, self._resources_dir) if not deleted: ...
[ "def delete(self, uri):\n cache_key = self._build_cache_key(uri)\n self._delete(cache_key)", "def delete(self, uri):\n return self.Delete(uri = uri)", "def delete(name):\n conn = _getcloudnetconnection()\n\n resp = conn.send_request(_file_delete_query, {'name': name})\n deleted = r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download a jar URI.
async def _download_jars( self, uri: str, logger: Optional[logging.Logger] = default_logger ): try: jar_file = await download_and_unpack_package( uri, self._resources_dir, self._gcs_aio_client, logger=logger ) except Exception as e: raise R...
[ "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download_url(url, fd, handle=None):\n return _librepo.download_url(handle, url, fd)", "def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a polynomial learning rate of the form (1/nw)
def polynomial_learning_rate(n, w=0.5): assert n > 0, "Make sure the number of times a state action pair has been observed is always greater than 0 before calling polynomial_learning_rate" return 1./n**w
[ "def lr_poly(base_lr, iteration, max_iter, power):\n return base_lr * ((1 - iteration * 1.0 / max_iter) ** power)", "def get_learning_rate():\n return 0.00001", "def __getAndUpdateLearningRate(self):\n rate = self.gamma_0 / (1 + (self.gamma_0 / self.d) * self.t)\n self.t += 1\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query client to get the practitioner's patient list
def get_patient_list(self, client): self._patient_list = client.get_patient_list(self.id)
[ "def doctors_patients_get(self, request):\n q = Doctor.all()\n q.filter('__key__ =', Key.from_path('Doctor', request.email))\n doctor = q.get()\n\n if doctor != None:\n return doctor.get_patients() \n else:\n return PatientListResponse(patients=[])", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each patient in the practitioner's monitored patient list, retrieve their data from the server
def get_patient_data(self, client): for patient in self._monitored_patients.get_patient_list(): # print("Requesting data for " + patient.first_name+" "+patient.last_name+"...") patient.update_data(client.get_patient_data(patient.id))
[ "def get_patients(self):\r\n return self.__patients_list", "def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)", "def getPatientInfo(self, patientIds):\n pass", "def getPatientInfo(self, patientIds):\n self.send_getPatientInfo(patientIds)\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add patient to the monitor list from the all patients list
def add_patient_monitor(self, patient_name): for patient in self._patient_list.get_patient_list(): if patient_name == patient.first_name + " " + patient.last_name: self._monitored_patients.add_patient(patient)
[ "def add_patients(self, patient_id, patient):\r\n self.__patients_list[patient_id] = patient\r\n self.__occupied_beds += 1", "def add_patient(self, patient):\n if isinstance(patient, Patient):\n self._patient_list.append(patient)\n self.calculate_avg_cholesterol()", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove patient from monitor list
def remove_patient_monitor(self, patient_name): self._monitored_patients.remove_patient(patient_name)
[ "def clear_monitor(self):\n self._monitored_patients = PatientList()", "def remove_monitor(monitor_id):\n g = mongo.db[app.config['GLOBAL_COLLECTION']]\n gdata = g.find_one(dict(), {'_id': 0})\n print(monitor_id)\n ga = GoogleAlerts(gdata['email'], gdata['password'])\n ga.authenticate()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the monitored patients list
def clear_monitor(self): self._monitored_patients = PatientList()
[ "def reset(self):\n self._list = []\n self.speaker = None", "def clean_patients(self):\n sp.verify(sp.sender == sp.self_address)\n self.data.patients = sp.set([])", "def clear_device_list(self):\r\n\t\tself.device_list = []", "def reset(self):\n self.ids = []", "def Reset(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the patient to the patient list and recalculate avg cholesterol
def add_patient(self, patient): if isinstance(patient, Patient): self._patient_list.append(patient) self.calculate_avg_cholesterol()
[ "def add_patients(self, patient_id, patient):\r\n self.__patients_list[patient_id] = patient\r\n self.__occupied_beds += 1", "def avg_num_visits_patient(self):\n pass", "def calculate_avg_cholesterol(self):\n total = 0\n no_of_valid_patients = 0\n for patient in self._p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove patient with the given name
def remove_patient(self, patient_name): for i in range(len(self)): selected_patient = self._patient_list[i] if patient_name == selected_patient.first_name + " " + selected_patient.last_name: self._patient_list.pop(i) self.calculate_avg_cholesterol() ...
[ "def remove(name):\n del person_database[name]", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getter method for the list of patients
def get_patient_list(self): return self._patient_list
[ "def get_patients(self):\r\n return self.__patients_list", "def get_patient_list(self, client):\n self._patient_list = client.get_patient_list(self.id)", "def get_patient(self, patient_id):\r\n return self.__patients_list.get(patient_id)", "def getPatientInfo(self, patientIds):\n pass"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find patient with the given name
def select_patient(self, patient_name): for i in range(len(self)): selected_patient = self._patient_list[i] if patient_name == selected_patient.first_name + " " + selected_patient.last_name: return selected_patient return None
[ "def _find_person_by_name(self, name):\n return Person.objects.filter(name=name).first()", "def get_patient_by_name(name):\n patient = model.get_patient_by_name(name)\n return model.patient_schema.jsonify(patient)", "def findPersonByName(self, **kwargs):\n return self._people.findPeople(**kw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate average cholesterol of all patients If patient has no cholesterol data, they are ignored
def calculate_avg_cholesterol(self): total = 0 no_of_valid_patients = 0 for patient in self._patient_list: try: total += patient.get_cholesterol_data()[0] no_of_valid_patients += 1 except AttributeError: continue ...
[ "def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount", "def avg_fuel_per_hour(iterable):\n return mean(row.fuel_per_hour for row in iterable)", "def _get_means_across_days(df_byday):\n\n # loop over each day, component, a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to create a Variable stored on CPU memory.
def _variable_on_cpu(name, shape, initializer, trainable = True): with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer, trainable = trainable) return var
[ "def _variable_on_cpu(name, shape, initializer):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape, initializer=initializer)\n return var", "def create_variable() -> tf.Variable:", "def create_cpu():\n return CPU()", "def to_var(x):\n if torch.cuda.is_available():\n x = x.cuda(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add leaky relu layer
def _add_leaky_relu(hl_tensor, leaky_param): return tf.maximum(hl_tensor, tf.mul(leaky_param, hl_tensor))
[ "def addReLU(self, **kwargs):\n\n input_layer = self.input_layer if not self.all_layers \\\n else self.all_layers[-1]\n\n self.n_relu_layers += 1\n name = \"relu%i\" % self.n_relu_layers\n\n new_layer = ReLU(input_layer, name=name, **kwargs)\n\n self.all_layers += (new_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
3d average pool layer
def _avg_pool3(x, ksize, strides, name): pool = tf.nn.avg_pool3d(x, ksize = ksize, strides = strides, padding = 'VALID', name = name) return pool
[ "def avg_pool3d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('AVG', utils._triple, **locals())", "def _avg_pool3(x, ksize, strides, name):\n pool = tf.nn.avg_pool3d(x, ksize=ksize, strides=strides,\n padding='VALID', name=name)\n return pool", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a directory path to start, looks for filenames in the directory, and then each parent directory successively, until found. Returns tuple (candidates, path).
def find_candidates_in_parent_dirs(filenames, path): candidates = [filename for filename in filenames if os.path.exists(os.path.join(path, filename))] if not candidates: parent_dir = os.path.join(path, '..') if os.path.abspath(parent_dir) != os.path.abspath(path): ...
[ "def IteratePathParents(start_path):\n path = os.path.abspath(start_path)\n yield path\n while path.strip('/'):\n path = os.path.dirname(path)\n yield path", "def search_for_parent_dir(start_at: str=None, with_files: set=None,\n with_dirs: set=None) -> str:\n if not start_at:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes any words containing digits like fligh numbers, phone number etc
def remove_flight_numbers(text): return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))
[ "def remove_words_digits(text):\n return \" \".join([word for word in str(text).split() if not any(c.isdigit() for c in word) ])", "def sanitize_text(text):\n return re.sub(r\"\\d+\", \"\", text)", "def remove_numbers(tweet):\n words = re.split(r'\\s+', tweet)\n new_words = []\n\n for word in w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pushshift.io's Reddit archives are compressed in different formats over time. Find the correct filename given the date.
def find_reddit_filename(wildcards): yearmonth = wildcards.year + '-' + wildcards.month if yearmonth <= '2017-11': ext = '.bz2' elif yearmonth <= '2018-10': ext = '.xz' else: ext = '.zst' return DATA + "/downloaded/reddit/" + yearmonth + ext
[ "def get_zip_file_url_for_specific_date(date):\n date_format = date.strftime('%d%m%y')\n return __class__.href_pattern.format(date_format)", "def get_archive_filename():\r\n today = datetime.date.today()\r\n return str(today)", "def get_radar_archive_file(date, rid: int) -> str:\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the sources of word counts we have in a language.
def language_count_sources(lang): return [ DATA + "/counts/{source}/{lang}.txt".format(source=source, lang=lang) for source in LANGUAGE_SOURCES[lang] ]
[ "def multisource_counts_to_merge(multisource, lang):\n result = [\n _count_filename(source, lang)\n for source in MERGED_SOURCES[multisource]\n if lang in SOURCE_LANGUAGES[source]\n ]\n return result", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{sour...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the sources of tokenized text we have in a language.
def language_text_sources(lang): return [ DATA + "/tokenized/{source}/{lang}.txt".format(source=source, lang=lang) for source in LANGUAGE_SOURCES[lang] if source in FULL_TEXT_SOURCES ]
[ "def get_source_tags(self):\n return ['en:' + self.tag_manager.normalize_tag_wtokenization(t, self.tries['en'], prefixed=False) for t in self.tag_manager.unprefixed_source_tags]", "def tokenize_english(self, text):\n return [tok.text for tok in self.english_model.tokenizer(text)]", "def sentences(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a multisource name like 'news' and a language code, find which sources of counts should be merged to produce it.
def multisource_counts_to_merge(multisource, lang): result = [ _count_filename(source, lang) for source in MERGED_SOURCES[multisource] if lang in SOURCE_LANGUAGES[source] ] return result
[ "def language_count_sources(lang):\n return [\n DATA + \"/counts/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n ]", "def get_source_labels_and_names_including_dynamic():\n sources = get_source_labels_and_names() + get_direct_sharing_sources()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a language code in ParaCrawl, we find the "paired" file that contains monolingual tokenized data from that language. ParaCrawl is parallel data, so its input files refer to language pairs. In practice, each language pair is English and a nonEnglish language. So the result for most languages is that they are paire...
def paracrawl_language_pair_source(lang): if lang == 'en': other = 'fr' else: other = 'en' lang1, lang2 = sorted([lang, other]) langpair = '{}_{}'.format(lang1, lang2) return DATA + "/tokenized/paracrawl-paired/{langpair}.{lang}.txt".format(langpair=langpair, lang=lang)
[ "def merge(filename):\n target_language = filename[:2]\n fname = filename[3:-4]\n\n res = {}\n\n with open(filename, \"r\") as fp:\n res = []\n for line in fp:\n res.append(line.strip())\n\n #p = re.compile('noindent.*') #\\\\newline')\n\n res = \"\".join(res)\n\n #rege...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get trigger by trigger id and start git workflow associated.
def trigger_workflow(self, trigger_id, commit_sha='', status_url=None, collab_url=None): # Note: self.context will be None at this point as this is a # non-authenticated request. db_obj = objects.registry.Assembly.get_by_trigger_id(None, ...
[ "def get_trigger_fire(self, trigger_id, trigger_run_id):\n trigger_fire = self.client._perform_json(\n \"GET\", \"/projects/%s/scenarios/trigger/%s/%s\" % (self.project_key, self.id, trigger_id), params={\n 'triggerRunId' : trigger_run_id\n })\n return DSSTriggerFi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all assemblies, based on the query provided.
def get_all(self): return objects.registry.AssemblyList.get_all(self.context)
[ "def package_all(q):\n\n query = (q.dict_of_lists())[\"q\"][0]\n datasets = p.toolkit.get_action(\"package_search\")(\n {}, data_dict={\"q\": query, \"include_private\": True}\n )\n\n result = datasets[\"results\"]\n results = []\n for res in result:\n results.append(res)\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True iff a chatter can be activated on the model's form views, i.e. if it is a custom model (since we can make it inherit from mail.thread), or it already inherits from mail.thread.
def is_chatter_allowed(self, model): Model = request.env[model] return Model._custom or isinstance(Model, type(request.env['mail.thread']))
[ "def is_form_for_object(self, obj):\n return isinstance(obj, self.get_target_model())", "def has_view_permissions(self, obj):\n queryset = self.model.objects.filter(pk=obj.pk)\n if hasattr(queryset, 'has_view_permissions'):\n return queryset.has_view_permissions( PyFormsMiddleware...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a view for translating the field(s) of the record (model, id).
def _get_studio_action_translations(self, model, **kwargs): domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')] # search view + its inheritancies views = request.env['ir.ui.view'].search([('model', '=', model.model)]) domain = ['|', '&', ('name', '=', 'ir.ui...
[ "def _render_modal_foreign_key(self, model, app, model_lower, field_name):\n\n try:\n content = self._get_snippet(\n os.path.join(\n self.path_core,\n \"management/commands/snippets/modal_form.txt\"))\n # Interpolando o conteúdo\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new menu , linked to a new action associated to the model_id
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None): # create the action model = request.env['ir.model'].browse(model_id) new_action = request.env['ir.actions.act_window'].create({ 'name': name, 'res_model': model.model, 'help': ...
[ "def create_new_menu(request):\n form = MenuForm()\n if request.method == \"POST\":\n form = MenuForm(request.POST)\n if form.is_valid():\n menu = form.save()\n menu.created_date = timezone.now()\n menu.save()\n return redirect('menu_detail', pk=menu.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exports a zip file containing the 'studio_customization' module gathering all customizations done with Studio (customizations of existing apps and freshly created apps).
def export(self, token): studio_module = request.env['ir.module.module'].get_studio_module() data = request.env['ir.model.data'].search([('studio', '=', True)]) content = export.generate_archive(studio_module, data) return request.make_response(content, headers=[ ('Content-D...
[ "def release_diazo(data):\n if not os.path.exists(SETUP_CONFIG_FILE):\n return\n\n config = _check_config(data)\n if not config:\n return\n\n if not utils.ask('Create a zip file of the Diazo Theme?', default=True):\n return\n\n package_name = data.get('name')\n\n if config.has...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retourne le nom singulier définit dans le prototype. Toutefois, si le nom est définit dans le PNJ luimême (l'attribut _nom n'est pas vide), retourne celuici.
def _get_nom(self): nom = self._nom if not nom: nom = self.prototype.nom_singulier return nom
[ "def getNom(self):\r\n if ( self.Valeur > 1 and self.Valeur < 11):\r\n return str( self.Valeur)\r\n elif self.Valeur == 11:\r\n return \"Valet\"\r\n elif self.Valeur == 12:\r\n return \"Dame\"\r\n elif self.Valeur == 13:\r\n return \"Roi\"\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retourne le nom et l'état (singulier ou pluriel).
def get_nom_etat(self, personnage, nombre=1): if nombre == 1: nom = self.get_nom_pour(personnage) + " " if self.etats: nom += Personnage.get_etat(self) else: nom += self.etat_singulier return nom else: return str...
[ "def getNom(self):\r\n if ( self.Valeur > 1 and self.Valeur < 11):\r\n return str( self.Valeur)\r\n elif self.Valeur == 11:\r\n return \"Valet\"\r\n elif self.Valeur == 12:\r\n return \"Dame\"\r\n elif self.Valeur == 13:\r\n return \"Roi\"\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fait gagner de l'XP au personnage.
def gagner_xp(self, niveau=None, xp=0, retour=True): ancien_niveau = self.niveau res = Personnage.gagner_xp(self, niveau, xp, retour) importeur.hook["pnj:gagner_xp"].executer(self, niveau, xp, retour) if self.niveau > ancien_niveau: importeur.hook["pnj:gagner_niveau"].execute...
[ "def gagner_xp(self, xp, niv_monstre, vie_monstre):\n if niv_monstre < self.niveau :\n self.xp = self.xp\n\n if vie_monstre == 0 :\n self.xp = self.xp + xp\n self.level_up()", "def get_xp(self, argv: tp.Sequence[str]) -> XP:\n raise NotImplementedError()", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Le personnage self vient de tuer la victime.
def tuer(self, victime): self.script["tue"].executer(personnage=victime, pnj=self)
[ "def wall_time(self):", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def get_survival_time(self):\r\n # return survival time only if the patient has died\r\n if not self.get_if_alive():\r\n return self._survivalTime\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retourne True si le personnage est noyable, False sinon.
def noyable(self): return False
[ "def is_person_empty(self, pos):\n if not self._is_empty('BUILDING', pos) and not self.data['BUILDING'][pos].permeable:\n return False\n if not self._is_empty('FURNITURE', pos) and not self.data['FURNITURE'][pos].permeable:\n return False\n if not self._is_empty('PEOPLE', ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accept reject sample from a probability distribution.
def accept_reject_sample(prob: Callable, n: int, limits: Space, sample_and_weights_factory: Callable = UniformSampleAndWeights, dtype=ztypes.float, prob_max: Union[None, int] = None, efficiency_estimation: float = 1.0) -> tf.Tensor: multiple...
[ "def rejection_sampling(func, rv, k, n):\r\n assert hasattr(rv, \"draw\"), \"the distribution has no method to draw random samples\"\r\n sample = []\r\n while len(sample) < n:\r\n sample_candidate = rv.draw()\r\n accept_proba = func(sample_candidate) / (k * rv.pdf(sample_candidate))\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all extended pdfs that are daughters.
def extract_extended_pdfs(pdfs: Union[Iterable[ZfitPDF], ZfitPDF]) -> List[ZfitPDF]: from ..models.functor import BaseFunctor pdfs = convert_to_container(pdfs) indep_pdfs = [] for pdf in pdfs: if not pdf.is_extended: continue elif isinstance(pdf, BaseFunctor): i...
[ "def get_all_pdfs():\n\n return filter(lambda f: fnmatch.fnmatch(f, '*.pdf'), os.listdir(cwd))", "def get_pdf_names(self) -> tuple:\n return [f for f in os.listdir(self.path_name) if os.path.isfile(os.path.join(self.path_name, f))]", "def get_extra_files(self):\n return []", "def as_pdf(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a sample from extended pdfs by sampling poissonian using the yield.
def extended_sampling(pdfs: Union[Iterable[ZfitPDF], ZfitPDF], limits: Space) -> tf.Tensor: samples = [] pdfs = convert_to_container(pdfs) pdfs = extract_extended_pdfs(pdfs) for pdf in pdfs: n = tf.random.poisson(lam=pdf.get_yield(), shape=(), dtype=ztypes.float) sample = pdf._single_ho...
[ "def sample(self, samps=100, infty=default_infty, using=None, vb=True):\n def sample_helper(i):\n # with open(self.logfilename, 'wb') as logfile:\n # logfile.write('sampling pdf '+str(i)+'\\n')\n return self.pdfs[i].sample(N=samps, infty=infty, using=using, vb=False)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all the subscribers in the SuggestBot database for the current language version of Wikipedia, check if any of them are due up for receiving suggestions, and then post suggestions to their user talk page (or userspace subpage if that is set).
def post_suggestions(self): # today is? # Note: We use UTC as the basis for our calculations, because # the Wikipedia API also returns timestamps as UTC, thus allowing # us to correctly post suggestions to new subscribers who saw # SuggestBot post to their user talk page earlier...
[ "def _qa__get_instant_notification_subscribers(\n self,\n potential_subscribers = None,\n mentioned_users = None,\n exclude_list = None,\n ):\n #print '------------------'\n #print 'in content function'\n subscriber_set = set()\n #pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mapper to convert json row into a row with only comments and caption in string formats
def mapRow(row): commentsRow = row.comments captionRow = row.caption comments = commentsRow.data # select comments textComments = " ".join([x.text for x in comments]) # remove metadata from comments if hasattr(captionRow, "edges"): captions = captionRow.edges textCaptions = " ".joi...
[ "def process_wiki_row(row):\n out = row.copy()\n # Split comma separated fields\n out['aliases'] = row['aliases'].split(',')\n out['descriptions'] = row['descriptions'].split(',')\n # Convert JSON string to dict\n out['claims'] = json.loads(row['claims'])\n return out", "def format_row(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the processed RDD to output file
def writeToFile(rdd, parallelized, output, user, format, features): fileEnd = "." + format output_path = output + "/ig/" + user + "/" + user + fileEnd if parallelized: rdd.saveAsTextFile(output_path) else: arr = np.array(rdd.collect()) if not os.path.exists(os.path.dirname(output...
[ "def saveRDD(self, path, rdd: DataFrame):\n\n rdd.write.csv(path, sep=\"|\")", "def write_to_file(self):\n self._file_writer.write(self._reconstructed_sentences)", "def write_output(self, result_path):\n with open(result_path, 'w') as f:\n for output in self.output:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans the output directory to make room for new outputs
def cleanOutputDir(output): if os.path.exists(output) and os.path.isdir(output): shutil.rmtree(output)
[ "def cleanup():\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)", "def devclean():\n click.echo(\"start clean your output folder...\")\n rm(OUTPUTDIR, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Appends corpus files for all users to a single corpus file
def append_corpus(output): files = [] output_path = output + "/ig/" + "ig_corpus.txt" for root, directories, filenames in os.walk(output + "/ig/"): for filename in filenames: files.append(os.path.join(root, filename)) corpusfiles = filter(lambda x: ".txt" in x, files) if not os.p...
[ "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def combine_documents(path=os.path.join(os.curdir, \"data/processed\"), name='corpus.txt'):\n outn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes some basic statistics about the output corpus
def corpora_stats(output): igFiles = [] for root, directories, filenames in os.walk(output + "/ig/"): for filename in filenames: igFiles.append(os.path.join(root, filename)) igFiles = filter(lambda x: ".txt" in x, igFiles) words = [] for file in igFiles: fileH = open(file...
[ "def corpus_statistics(corpus, d_corp):\n print('There are {} types of a total of {} tokens in the corpus.\\n' .format(number_types(corpus), corpus_length(corpus)))\n print('There average token length is {}.\\n' .format(average_length(corpus)))\n print('The longest token is {}.\\n' .format(longest_token(co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function, orchestrates the pipeline. Creates the spark context, parses arguments, and parses all users.
def main(): sc = pyspark.SparkContext(conf=sparkConf()) sql = pyspark.SQLContext(sc) args = parse_args() cleanOutputDir(args.output) users = os.listdir(args.input) map(lambda user: parseUser(user, args, sql, args.partitions), users) corpora_stats(args.output) append_corpus(args.output)
[ "def main():\n # start Spark application and get Spark session, logger and config\n spark, log, config = start_spark(\n app_name='lol_etl_job',\n files=['configs/etl_config.json'])\n\n\n\n\n # log that main ETL job is starting\n log.warn('etl_job is up-and-running')\n\n # execute ETL pipe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a session exists for the given sender id, else create a new one
def find_session(sender_id): session = db.sessions.find_one({'sender_id': sender_id}) if session is None: session_id = str(uuid.uuid4()) db.sessions.insert_one({'createdAt': datetime.datetime.utcnow(), 'sender_id': sender_id, 'session_id': session_id}) else: session_id = session[...
[ "def create_session(self):\n if not self.request.session.session_key:\n self.request.session.save()", "def has_session(self, id):\n return self.sessions.has_key(id)", "def _createSession(self, pid, dir):\n # TODO\n session = ServerSession(self, dir, pid=pid)\n uid =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Concatenates all messages in the session
def concatenate_session(session_id): conversation = "" for msg in db.messages.find({'session_id': session_id}): conversation += (msg['message'] + "\n") return conversation
[ "def get_all_message(): \n return \"<br>\".join(messages)", "def sent_messages(short_messages):\n while short_messages:\n send = short_messages.pop()\n print(f\"\\nThe following text has been sent:\\n\\t{send}\")\n sent_message.append(send)\n print(\"\\nAll messages have been sent....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take conversation text and calculates the confidence score using Watson Tone Analyzer
def analyze_tone(conversation): tone_analyzer = ToneAnalyzerV3Beta(username=WATSON_USERNAME,password=WATSON_PASSWORD,version=WATSON_API_VERSION) tone_response = tone_analyzer.tone(conversation) confidence = tone_response['document_tone']['tone_categories'][1]['tones'][1]['score'] return confidence
[ "def analyze(self, text):\n tokens = TweetTokenizer() #创建一个推特分词器的实例\n self.score = 0 #每条推特的分数 \n for word in tokens.tokenize(text): #对一条推特(str)进行分词后遍历\n if word.lower() in self.positives: \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }