query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Handles incoming event by invoking a specific action according to a request type.
def __handle(event, context) -> Tuple[Optional[Dict[Any, Any]], Optional[str]]: serialized_event = json.dumps(event, default=lambda o: "<not serializable>") logger.info(f"Got new request. Event: {serialized_event}.") action = Action(event) if event["RequestType"] == "Create": return action.cre...
[ "def dispatch(self, request, attendees, action, event, **kwargs):\n action_func = getattr(self, action, None)\n if callable(action_func):\n return action_func(request, attendees, event, **kwargs)", "def handle_request(self, tpe, obj_dict):\n if tpe == 'DataRequest':\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Email administrator a success message with need for manual attachments.
def email_success_attachments(dirname, attachments, addresses, smtp_server, smtp_user, smtp_password): # Set up multipart message msg = MIMEMultipart() msg['Subject'] = '%s requires manual intervention' % dirname msg['To'] = ', '.join(addresses) msg['From'] = "p2b@localhost" msg.preamble = 'You ...
[ "def email_sent():\n tkMessageBox.showinfo(\"Email Sent\", \"Email was sent successfully.\",icon='info')", "def send_alert_attached(subject, flist):\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = mailsender\n msg['To'] = mailreceip\n #message = \"Thank you\"\n msg.atta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get value from mapping with handling special cases.
def get_value(self, value): if pd.isna(value): return None if value not in self.mapping.keys(): return value return self.mapping[value]
[ "def try_map(m, value):\n if value in m:\n return m[value]\n else:\n return value", "def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):\n return {\"value\": traverse_get(mapping, *traverse)}", "def get_value_for(self, instance):\n value = super(ValueMapFullTextAtt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the previous hash of block.
def previous_hash(self): return self.__previous_hash
[ "def getPreviousBlockHeaderHash(self) -> str:\n return self.blockHeader.prevBlockHeaderHash", "def getPreviousTransactionHash(self) -> str:\n return self.__previousTransactionHash", "def getPriorBlockHash(self):\n return self.parentBlockHash", "def get_previous_block(self):\n # Ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the transactions of block.
def transactions(self): return self.__transactions
[ "def get_transactions():\n response = {\n 'transaction': list(node.blockchain.current_transactions),\n }\n return jsonify(response), 200", "def getTransactionList(self) -> list:\n return self.__transactions", "def get_internal_transaction_by_block(self, block_number):\n\n # TODO ha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the proof of block.
def proof(self): return self.__proof
[ "def __create_proof(self):\n\n # Create the block base on which the salt will be concatenated\n base_block_str = ''\n for transaction in self.__transactions:\n base_block_str += str(transaction)\n base_block_str += self.__previous_hash\n\n # Find a salt that creates the...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identify a matching untriaged crash in the db
def check_untriaged_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, DBCONN=None): newid = 0 numreports = 0 if not crashfunction: crashfunction = None if not lasttest: lasttest = None dbconn = DBCONN try: if not dbconn: dbconn = psyco...
[ "def match_reason(self):\n conn, cursor = sqlite_base.sqlite_connect()\n # Read today crashes reasons.\n _td_reasons = self.get_specific_range_crashes() #[table_id, ROWID, CRASH_ID, PROJECT, REASON]\n # Remove duplicate data.\n _uniqueness_l = self.make_uniquenesss_list(_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if we have a matching crash and add it, if we have a new one, add a new one
def add_new_crash(lasttest, crashtrigger, crashfunction, crashbt, fullcrash, testlogs, link, CREATETIME=None, DBCONN=None): if not crashfunction: crashfunction = None if not lasttest: lasttest = None dbconn = DBCONN newid, numreports = check_untriaged_crash(lasttest, crashtrigger, cras...
[ "def get_specific_range_crashes(self):\n _tables_id = self.get_day_from_statistics()\n if _tables_id:\n for table_id in _tables_id:\n conn, cursor = sqlite_base.sqlite_connect()\n # conn, cursor = sqlite_base.sqlite_connect(sql_abs_path=self.statistic_sql)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Automatically generate formatted inputs and outputs from input_shapes
def make_inputs_outputs(input_shapes, dtype, is_bfloat16=False): input_list = [ np.random.random(shape).astype(dtype) for shape in input_shapes ] output_shape = find_output_shape(input_list) output_list = [ x + np.zeros(output_shape).astype(x.dtype) for x in input_list ] if is_b...
[ "def test_inputs(self):\n assert list(self._iter_input_shapes())", "def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:\n\n def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):\n if isinstance(tensor, (tuple, list)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload a collection of events to an InfluxDB instance. This uses a POST to upload data to a named database at a URL with a particular timestampresolution. If present, the user and password are used to do the upload. A batch size of events is also used. Events have to be in InfluxDB Line Protocol format, in the collecti...
def upload(upload_spec, collection, context=None): assert upload_spec['type'].lower() == "influxdb" raise_exceptions = upload_spec.get('raise-exceptions', False) user = upload_spec.get('user', None) password = upload_spec.get('password', None) timestamp_resolution = upload_spec.get('timestamp-resolu...
[ "def upload_to_google_calendar(events):\n batch = MyCalendarBatchInsert()\n\n for event in events:\n batch.add(event.to_gcal_event())\n\n return batch.execute()", "def influx_upload(self, config, data_filename):\n points = '%s-data-points.json' % config['metric']\n jq_point = config....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing sentence with no walls and period
def test_parse_no_period_if_no_period(self): options = 0 options |= BIT_STRIP | BIT_NO_PERIOD | BIT_RWALL tokens = parse_tokens(self.tokens_no_walls_no_period, options)[0] self.assertTrue(self.cmp_lists(tokens, ['###LEFT-WALL###', 'eagle', 'has', 'wing']))
[ "def check_sentence(text):\n result = re.search(r\"^[A-Z][a-z\\s]*[.?!]$\", text)\n return result != None", "def filter_paragraph(p):\n # Expect a minimum number of words.\n tokens = p.split()\n if len(tokens) < 6:\n return True\n\n # Require some letters.\n if not re.search(_SOME_ALPHA_RE, p):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing links out when LW and period are presented
def test_parse_links(self): links = parse_links(self.link_str, ['###LEFT-WALL###', 'dad', 'was', 'not', 'a', 'parent', 'before', '.'], 0) # [0 7 2 (Xp)][0 1 0 (Wd)][1 2 0 (Ss*s)][2 5 1 (Osm)][2 3 0 (EBm)][4 5 0 (Ds**c)][5 6 0 (Mp)][7 8 0 (RW)] self.assertTrue(self.cmp_lists(links, [ (0, 7), ...
[ "def test_unquoted_link(self):\n txt=\"\"\"hello <a href=www.co.uk>slashdot.org/?a=c&f=m</a> \"\"\"\n\n uris=self.candidate.extractor.extracturis(txt)\n self.assertTrue('www.co.uk' in uris)\n self.assertTrue('slashdot.org/?a=c&f=m' in uris)", "def is_valid_link(link):\n index = lin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing postscript with both walls in
def test_parse_postscript_all_walls(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_all_walls, options) pm = parse_metrics(tokens) self.assertEqual(1.0, pm.completely_parsed_ratio) self.assertEq...
[ "def is_body(part):\n if get_content_type(part) == 'text/plain':\n if not is_attachment(part):\n return True\n return False", "def isMeaningfulPost(post):\n # nothing for reshares.\n if post.get('verb', '') == 'share':\n return False\n\n return isMeaningfulContent(re.sub('<.*?>', '',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing_postscript with no walls in
def test_parse_postscript_no_walls(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_no_walls, options) pm = parse_metrics(tokens) self.assertEqual(1.0, pm.completely_parsed_ratio) self.assertEqu...
[ "def isMeaningfulPost(post):\n # nothing for reshares.\n if post.get('verb', '') == 'share':\n return False\n\n return isMeaningfulContent(re.sub('<.*?>', '', post['object']['content']))", "def is_image_post(submission):\n return (not submission.is_self) and submission.url.endswith((\".png\", \".jpg\", \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for parsing postscript with no links
def test_parse_postscript_no_links(self): options = 0 options |= (BIT_RWALL | BIT_CAPS) options &= ~BIT_STRIP tokens, links = parse_postscript(self.post_no_links, options) self.assertEqual(0, len(links))
[ "def parse_postscript(text: str, options: int) -> ([], []):\n p = re.compile('\\[(\\(.+?\\)+?)\\]\\[(.*?)\\]\\[0\\]', re.S)\n\n m = p.match(text.replace(\"\\n\", \"\"))\n\n if m is not None:\n tokens, offset = parse_tokens(m.group(1), options)\n links = parse_links(m.group(2), tokens, offset)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply a soft harmonic restraint to the given atoms. This modifies the ``ThermodynamicState`` object.
def restrain_atoms(thermodynamic_state, sampler_state, restrained_atoms, sigma=3.0*unit.angstroms): K = thermodynamic_state.kT / sigma**2 # Spring constant. system = thermodynamic_state.system # This is a copy. # Check that there are atoms to restrain. if len(restrained_atoms) == 0: raise Val...
[ "def harmonic_bond(conf, params, box, bond_idxs, param_idxs):\n ci = conf[bond_idxs[:, 0]]\n cj = conf[bond_idxs[:, 1]]\n dij = distance(ci, cj, box)\n kbs = params[param_idxs[:, 0]]\n r0s = params[param_idxs[:, 1]]\n energy = np.sum(kbs/2 * np.power(dij - r0s, 2.0))\n return energy", "def _f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current market value of the bond.
def market_value(self) -> float: return self._market_dirty_price
[ "def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand", "def value(self):\n return self.shares() * self.price()", "def value_current(self):\n # get current value from Stockexchange\n #TODO: Transform to € if $\n value = self.history.iloc[-1]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the current market value of the bond.
def market_dirty_price(self, value: float): self._market_dirty_price = value
[ "def market_price(self, market_price):\n\n self._market_price = market_price", "def market_close(self, market_close):\n\n self._market_close = market_close", "def place_bet(self, amount):\n self.bet = amount", "def market_value(self) -> float:\n return self._market_dirty_price", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the theoretical dirty value of a bond given a particular discount curve. The theoretical is V = \\sum_i c_i d_i where c_i is the ith coupon and d_i is the associated discount factor associated with the coupon. d_i is calculated as discount_curve(coupon_payment_date)
def calculate_dirty_value(self, discount_curve: Callable[[date], float], today: date) -> float: future_coupons = (c for c in self.coupons if c.ex_date > today) discounted_coupon_value = sum( discount_curve(c.payment_date) * c.coupon_amount for c in future...
[ "def bond_price(maturity, principal=100, coupon_rate=.03, coupons_per_year=12, discount_rate=.03):\n\n cash_flows = bond_cash_flows(maturity, principal, coupon_rate, coupons_per_year)\n pv = present_value(cash_flows, discount_rate / coupons_per_year)\n\n return pv", "def discount_curve(self, currency):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a date to the first available business date.
def _business_date(reference_date: date) -> date: if reference_date.weekday() > 4: return FixedRateACGB._business_date( reference_date + timedelta(days = 1)) return reference_date
[ "def next_bday(date):\n add_day = 1\n next_bday = datetime.strptime(date, '%Y-%m-%d') + BDay(add_day)\n # check it is not holiday\n while is_holiday(date=next_bday.strftime('%Y-%m-%d')) \\\n or is_offdays(date=next_bday.strftime('%m/%d/%y')):\n add_day += 1\n next_bday = datetim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a series of anual coupons starting on the first pay date and ending on or before the last pay date.
def construct_yearly_coupon_series(first_pay_date: date, last_pay_date: date, rate: float, principle: float) -> List[Coupon]: coupons = [] coupon_date = first_pay_date amount = principle * rate while coupon_date <= last_pay_date: ex_date = FixedRateA...
[ "def construct(first_coupon_pay_date: date, \n second_coupon_pay_date: date, maturity_date: date, \n coupon_rate: float, principle: float):\n coupons_a = FixedRateACGB.construct_yearly_coupon_series(\n first_coupon_pay_date, maturity_date, \n coupon_rate / 2.0, principle)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a FixedRateACGB based on some minimal parameters which describe the bond.
def construct(first_coupon_pay_date: date, second_coupon_pay_date: date, maturity_date: date, coupon_rate: float, principle: float): coupons_a = FixedRateACGB.construct_yearly_coupon_series( first_coupon_pay_date, maturity_date, coupon_rate / 2.0, principle) co...
[ "def make_periodic(*args):\n\tif len(args) == 0:\n\t\tnu.warn(\"No BGF file or instance specified.\")\n\t\treturn 0;\n\telif len(args) >= 1:\n\t\tmybgf = args[0]\n\t\tif isinstance(mybgf, str):\n\t\t\tmybgf = bgf.BgfFile(mybgf)\n\n\t\tmybgf.PERIOD = \"111\"\n\t\tmybgf.AXES = \"ZYX\"\n\t\tmybgf.SGNAME = \"P 1 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Breaks an AudioSegment into chunks that are milliseconds long. if chunk_length is 50 then you'll get a list of 50 millisecond long audio segments back (except the last one, which can be shorter)
def make_chunks(audio_segment, chunk_length): number_of_chunks = ceil(len(audio_segment) / float(chunk_length)) return [audio_segment[i * chunk_length:(i + 1) * chunk_length] for i in range(int(number_of_chunks))]
[ "def chunk_audio_pieces(self, pieces, chunk_size):\n left_over = np.array([])\n for piece in pieces:\n if left_over.size == 0:\n combined = piece\n else:\n combined = np.concatenate([left_over, piece], axis=-1)\n for chunk in chunk_audio(c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing if correct electrondensity parsing of a VASPdirectory.
def test_parse_electrondensity(): # Parse envisionpy.hdf5parser.charge(PATH_TO_HDF5, PATH_TO_VASP_CALC) envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC) # Test if the generated HDF5-file contains correct information if os.path.isfile(PATH_TO_HDF5): with h5py.File(PATH_TO...
[ "def _check_density(self,density, num_electrons):\n\n FLOAT_PRECISION = 0.01\n #integrate the density over the spherical space\n #s = float(np.sum(density))\n #s = 4*np.pi * float(np.sum(density * self.grid.gridvec**2 ))\n s = 4*np.pi * integrate.simps(density * self.grid.gridvec*...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the badge is initialized.
def test_init(): _badgegen = badgegen.BadgeGenerator() assert isinstance(_badgegen, badgegen.BadgeGenerator)
[ "def test_init(self):\n assert self.registration_behaviour.is_registered is False\n assert self.registration_behaviour.registration_in_progress is False\n assert self.registration_behaviour.failed_registration_msg is None\n assert self.registration_behaviour._nb_retries == 0", "def tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a csv of DNA sequences and inserts them into an array
def array(file): sequences = [] recSite = [] freq = [] with open(file, 'r') as csv_file: fileReader = csv.reader(csv_file, delimiter = "|") fileReader.next() # throwaway header row for row in fileReader: strippedRow = row[0].strip(",").split(',') sequences.append(strippedRow[1]) recSite.append(strip...
[ "def load_sequence(filename):\n with open(filename) as f:\n data = []\n for line in f:\n data += [int(n) for n in line.strip(',\\n').split(',')]\n return data", "def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a DNA sequence and calculates the running average GC content with a defualt window of 3 basepairs over the length of the sequence
def GC_content(sequence, recLength = 5, overhang = 12, window = 3, ymax = 1, ymin = -1): GC_array = [] maxGC = 100 minGC = 0 # GC percentages for bp in sequence: if bp.capitalize() in ['G', 'C']: GC_array.append(100) else: GC_array.append(0) # window weighting weights = np.repeat(1.0, window)/f...
[ "def GC(seq):\n\tA = seq.count('A')\n\tT = seq.count('T')\n\tC = seq.count('C')\n\tG = seq.count('G')\n\treturn float(C+G) / float(A+T+G+C)", "def GC_content(dna):\n g = dna.count('G')\n c = dna.count('C')\n ret = (g+c)/len(dna)\n return ret", "def get_at_gc_ratio(sequence):\n return get_at_content(sequence)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a DNA sequence and converts it into a list of floats based on the dinucleotide pairs where G = 0010, C = 0.5, T = 0.5, A = 1
def dinucleotide(sequence): frog = [] for i in range(0,(len(sequence)-1)): bp = sequence[i] bp_next = sequence[i+1] bp = bp.capitalize() bp_next = bp_next.capitalize() if bp == 'A': if bp_next == 'A': frog.append([-1,-1,-1,-1]) elif bp_next == 'C': frog.append([-1,-1,-1,1]) elif bp_next =...
[ "def GC(seq):\n\tA = seq.count('A')\n\tT = seq.count('T')\n\tC = seq.count('C')\n\tG = seq.count('G')\n\treturn float(C+G) / float(A+T+G+C)", "def _convert_to_floats(line, start_index=0):\n return [float(f) for f in line.strip().split(' ')[start_index:] if f != '']", "def encode_DNA(seq):\n\tseq2bin_dict = {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The most basic lineal hash is SHA(SHA(name) + version)
def test_basic(self): a = linealHash('name', 'version') expected = sha1(sha1('name').hexdigest() + 'version').hexdigest() self.assertEqual(a, expected)
[ "def getHash(name):\n return hashlib.md5(name).hexdigest()", "def hash(bytes):\n return unpack(sha256(bytes).digest())", "def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a data point has inputs, the lineal hash of the data point is SHA(SHA(SHA(name) + version) + input1_lineal_hash + input2_lineal_hash)
def test_args(self): sample_hash1 = sha1('foo').hexdigest() sample_hash2 = sha1('bar').hexdigest() a = linealHash('name', 'version', [sample_hash1, sample_hash2]) expected = sha1(linealHash('name', 'version') + sample_hash1 \ + sample_hash2).hexdigest() ...
[ "def hash_input(self, input_data: Union[bytes, str]) -> bytes:\n return cast(bytes, self.hash_method(self.bytes_from_input(input_data)).digest()) # We know this is always a hashlib hash that returns bytes", "def __get_hash(self, user_input, number_of_lines):\r\n hash_number = 0\r\n for i in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can make it into a WorkInput
def test_IWorkInput(self): d = Data('joe', 'a', '1', 'xxxx', 'value') i = IWorkInput(d) self.assertEqual(i.name, 'a') self.assertEqual(i.version, '1') self.assertEqual(i.lineage, 'xxxx') self.assertEqual(i.value, 'value') self.assertEqual(i.hash, sha1('value').hex...
[ "def test_inputs(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('b', '1', 'xxxx', 'val', 'hash'),\n ['c', '1', 'xxxx', 'val', 'hash'],\n WorkInput('d', '1', 'xxxx', 'val', 'hash'),\n ])\n self.assertEqual(w.inputs, (\n WorkInput('b', '1', 'xxxx', 'va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert an IWorkInput to an IResultInput
def test_IResultInput(self): i = WorkInput('a', '1', 'xxxx', 'val', 'hash') r = IResultInput(i) self.assertEqual(r.name, 'a') self.assertEqual(r.version, '1') self.assertEqual(r.lineage, 'xxxx') self.assertEqual(r.hash, 'hash')
[ "def test_toResult(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResult('the result')\n self.assertEqual(r, Result('bob', 'a', '1', 'xxxx', 'the result', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "def test_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/WorkInputs should work for the inputs argument.
def test_inputs(self): w = Work('bob', 'a', '1', 'xxxx', [ ('b', '1', 'xxxx', 'val', 'hash'), ['c', '1', 'xxxx', 'val', 'hash'], WorkInput('d', '1', 'xxxx', 'val', 'hash'), ]) self.assertEqual(w.inputs, ( WorkInput('b', '1', 'xxxx', 'val', 'hash'),...
[ "def _populate_inputs(self):\n\n self.inputs = Bunch(outfile=None,\n infile=None)", "def __init__(self, inputs=None, outputs=None, data = None):\n if inputs == None:\n self.inputs = []\n else:\n self.inputs = inputs\n\n if outputs == Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert to a Result
def test_toResult(self): w = Work('bob', 'a', '1', 'xxxx', [ ('a', '1', 'xxxx', 'val', 'hash'), ]) r = w.toResult('the result') self.assertEqual(r, Result('bob', 'a', '1', 'xxxx', 'the result', [ ('a', '1', 'xxxx', 'hash'), ]))
[ "def test_toResultError(self):\n w = Work('bob', 'a', '1', 'xxxx', [\n ('a', '1', 'xxxx', 'val', 'hash'),\n ])\n r = w.toResultError('the err')\n self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [\n ('a', '1', 'xxxx', 'hash'),\n ]))", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can convert to a ResultError
def test_toResultError(self): w = Work('bob', 'a', '1', 'xxxx', [ ('a', '1', 'xxxx', 'val', 'hash'), ]) r = w.toResultError('the err') self.assertEqual(r, ResultError('bob', 'a', '1', 'xxxx', 'the err', [ ('a', '1', 'xxxx', 'hash'), ]))
[ "def check_result(res, msg=None):\n if not res.status:\n return\n\n # If there was an error, it should be the last operation.\n if res.resarray:\n resop = res.resarray[-1].resop\n else:\n resop = None\n raise BadCompoundRes(resop, res.status, msg)", "def get_error_code(result):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/ResultInputs should work for the inputs argument.
def test_inputs(self): r = Result('bob', 'a', '1', 'xxxx', 'val', [ ('b', '1', 'xxxx', 'hash'), ['c', '1', 'xxxx', 'hash'], ResultInput('d', '1', 'xxxx', 'hash'), ]) self.assertEqual(r.inputs, ( ResultInput('b', '1', 'xxxx', 'hash'), Re...
[ "def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can easily convert an IResult to IData
def test_IData(self): r = Result('bob', 'a', '1', 'xxxx', 'val', []) d = IData(r) self.assertEqual(d.entity, 'bob') self.assertEqual(d.name, 'a') self.assertEqual(d.version, '1') self.assertEqual(d.lineage, 'xxxx') self.assertEqual(d.value, 'val')
[ "def getResultData(self):\n return self.result", "def test_IResultInput(self):\n i = WorkInput('a', '1', 'xxxx', 'val', 'hash')\n r = IResultInput(i)\n self.assertEqual(r.name, 'a')\n self.assertEqual(r.version, '1')\n self.assertEqual(r.lineage, 'xxxx')\n self.ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializing with tuples/lists/ResultInputs should work for the inputs argument.
def test_inputs(self): r = ResultError('bob', 'a', '1', 'xxxx', 'val', [ ('b', '1', 'xxxx', 'hash'), ['c', '1', 'xxxx', 'hash'], ResultInput('d', '1', 'xxxx', 'hash'), ]) self.assertEqual(r.inputs, ( ResultInput('b', '1', 'xxxx', 'hash'), ...
[ "def test_inputs(self):\n r = Result('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ['c', '1', 'xxxx', 'hash'],\n ResultInput('d', '1', 'xxxx', 'hash'),\n ])\n self.assertEqual(r.inputs, (\n ResultInput('b', '1', 'xxxx', 'hash'),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update or create a contact address and location pair. If the location does not exist it will be automatically created. If the server already has a location assigned with the same name, the contact address specified will be added if it doesn't already exist (Management and Log Server can have multiple address for a sing...
def update_or_create( self, location, contact_addresses, with_status=False, overwrite_existing=False, **kw ): updated, created = False, False location_ref = location_helper(location) if location_ref in self: for loc in self: if loc.location_ref == location...
[ "def set_device_location(self, name=None, location: str = None, lat: float = None, lng: float = None):\n loc = self.get_location(location)\n if loc:\n location_id = loc.id\n # location exist, check if data is modified\n data = AttrDict()\n if loc.lng != lng:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provides a reference to contact addresses used by this server. Obtain a reference to manipulate or iterate existing contact
def contact_addresses(self): return MultiContactAddress( href=self.get_relation("contact_addresses"), type=self.typeof, name=self.name )
[ "def accessContacts(self):\n\n self.db.execute(\"SELECT ALL id FROM contacts\")\n contacts = self.db.fetchall()\n\n for id in range(1, len(contacts)+1):\n self.info(id)\n\n return", "def get_contacts(self):\n logger.info(\"Retrieve Phonebook\")\n ready = yield ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove contact address by name of location. You can obtain all contact
def remove_contact_address(self, location): return self.contact_addresses.delete(location)
[ "def remove_contact(self, contact):\n self.contacts.remove(contact)", "def remove_contact(self, contact_id):\n pass", "def remove_address(self, address_id):\n pass", "def remove_street(estreet={}, estreet_name=[]):\n del estreet[estreet_name]", "def del_contact(self, contact_name):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restart Web Access on Mgt Server.
def restart_web_access(self): session = _get_session(SMCRequest._session_manager) response = session.session.put( url=self.href+"/restart_web_access" ) if response.status_code != 200: raise SMCOperationFailure(response) return 0
[ "def restart_appserver():\n\n require('hosts')\n \n run(\"invoke restart\")", "def restart_webserver(site='localhost'):\n with hide('output'), settings(host_string=site):\n status = sudo('supervisorctl restart %s' % APP_NAME)\n if 'RUNNING' in status:\n print 'Server restarted...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Port that is used for log forwarding. The default port used by IPFIX/NetFlow data collectors is 2055. Note! If you have to define an Access rule that allows traffic to the target host, make sure that the Port you select is also used as the Port in the Access rule.
def netflow_collector_port(self): return self.data["netflow_collector_port"]
[ "def port(self):\n return self._val.port or DEFAULT_PORTS.get(self._val.scheme)", "def port(self):\n\n return self.server_address[1]", "def get_fluentd_syslog_src_port():\n for port in range(25229, 25424):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field/value pair used to insure server identity when connecting to Sys Log server using TLS Optional If not provided, server identity is not checked This is ignored if service is not tcp_with_tls
def tlsIdentity(self): return self.data["tlsIdentity"]
[ "def tls(self) -> Optional['outputs.ClusterClientAuthenticationTls']:\n return pulumi.get(self, \"tls\")", "def tls_config(self) -> Optional[pulumi.Input['PrometheusSpecApiserverConfigTlsConfigArgs']]:\n return pulumi.get(self, \"tls_config\")", "def tls_config(self) -> Optional[pulumi.Input['Prom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A collection of NetflowCollector
def netflow_collector(self): return [NetflowCollector(**nc) for nc in self.data.get("netflow_collector", [])]
[ "def add_netflow_collector(self, netflow_collectors):\n if \"netflow_collector\" not in self.data:\n self.data[\"netflow_collector\"] = {\"netflow_collector\": []}\n\n for p in netflow_collectors:\n self.data[\"netflow_collector\"].append(p.data)\n self.update()", "def _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add netflow collector/s to this log server.
def add_netflow_collector(self, netflow_collectors): if "netflow_collector" not in self.data: self.data["netflow_collector"] = {"netflow_collector": []} for p in netflow_collectors: self.data["netflow_collector"].append(p.data) self.update()
[ "def netflow_collector(self):\n return [NetflowCollector(**nc) for nc in self.data.get(\"netflow_collector\", [])]", "def remove_netflow_collector(self, netflow_collector):\n _netflow_collector = []\n changed = False\n for nf in self.netflow_collector:\n if nf != netflow_col...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a netflow collector from this log server.
def remove_netflow_collector(self, netflow_collector): _netflow_collector = [] changed = False for nf in self.netflow_collector: if nf != netflow_collector: _netflow_collector.append(nf.data) else: changed = True if changed: ...
[ "def disconnect_env_collector(self, clname, exc=True):\r\n found = None\r\n foundi = None\r\n for i, co in enumerate(self._added_collectors):\r\n if clname == co.__class__.__name__:\r\n found = co\r\n foundi = i\r\n break\r\n if fou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export the certificate request for the component when working with an External PKI. This can return None if the component does not have a certificate request.
def pki_export_certificate_request(self, filename=None): result = self.make_request( CertificateExportError, raw_result=True, resource="pki_export_certificate_request" ) if filename is not None: save_to_file(filename, result.content) return return re...
[ "def requestCertificate(self):\n # Get Cert from the request's environment\n if \"CLIENT_RAW_CERT\" in request.environ:\n return request.environ[\"CLIENT_RAW_CERT\"]\n if \"SSL_CLIENT_CERT\" in request.environ:\n return request.environ[\"SSL_CLIENT_CERT\"]\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a valid certificate. Certificate can be either a file path or a string of the certificate. If string certificate, it must include the BEGIN CERTIFICATE string.
def pki_import_certificate(self, certificate): self.make_request( CertificateImportError, method="create", resource="pki_import_certificate", headers={"content-type": "multipart/form-data"}, files={ # decode certificate or use it as it ...
[ "def import_certificate(cert): # pylint: disable=unused-argument\n pass", "def test_import_cert(cert_path, thumbprint, certs, json_certs):\n kwargs = {\"name\": cert_path}\n mock_value = MagicMock(return_value=cert_path)\n with patch.dict(win_pki.__salt__, {\"cp.cache_file\": mock_value}), patch(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start renewal process on component when using external PKI mode. It generates new private key and prepare a new certificate request.
def pki_renew_certificate(self): self.make_request( CertificateError, method="update", resource="pki_start_certificate_renewal", )
[ "def _initkeycertificate(self):\n\n if self.privatekey and self.fingerprint and self.certificate and self.publickeyxml:\n return # all set up\n\n filename = sanatizefilename(self.options['STAGECERTIFICATEFILE']['Value'])\n\n self.privatekey = syhelpers.tls.load_privatekey(filename)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the certificate request if any is defined for this component.
def pki_delete_certificate_request(self): self.make_request(method="delete", resource="pki_delete_certificate_request")
[ "def test_delete_certificate_signing_request(self):\n pass", "def test_certificate_delete(self):\n response = self.client.open(\n '/api/v1.0/domain/{domainName}/certificate/{certificateId}'.format(domainName='domainName_example', certificateId='certificateId_example'),\n method...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new HTTP Proxy service. Proxy must define at least one primary address but can optionally also define a list of secondary addresses.
def create( cls, name, address, proxy_port=8080, username=None, password=None, secondary=None, comment=None, ): json = { "name": name, "address": address, "comment": comment, "http_proxy_port": pr...
[ "def create_proxy(self, addr='127.0.0.1', port=0, proxy_config=None, options=None):\n if self._proxy_mgr_addr is not None and self._proxy_mgr_port is not None:\n # TODO: ask the proxy manager to create a proxy and return that\n pass\n\n if options is None:\n options = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The proxy service for this proxy server configuration
def proxy_service(self): return self.data.get("http_proxy")
[ "def getRemoteConfigServiceProxy(self):", "def get_proxy(self):\n address = next(self._address_pool_cycle) # pick random address\n proxy = {\"http\": address, \"https\": address}\n return proxy", "def proxy(self):\n if self._proxy is not None:\n if self._proxy[:7] == \"ht...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The specified services for inspection. An inspected service is a reference to a protocol that can be forwarded for inspection, such as HTTP, HTTPS, FTP and SMTP.
def inspected_services(self): return [ InspectedService(**service) for service in self.make_request(resource="inspected_services") ]
[ "def get_servicesinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Firewall\n try:\n fw = ''\n firewalld = get_service(ns, 'firewalld')\n if firewalld and firewalld.Status == 'OK':\n fw = 'on (firewalld)'\n else:\n iptables ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP address (Required)
def address(self): return self.data.get("ntp_host_name")
[ "def ntp_host_name(self):\n return self.data.get(\"ntp_host_name\")", "def getNTPSetup(self):\n return NTPSetup(self.__screen)", "def get_tilemill_server_address(self):\n return Common.prepare_config_address(self.tilemill_server_address)", "def get_ntp_cfg(self):\n self.navigate_to(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Host Name (Not Required)
def ntp_host_name(self): return self.data.get("ntp_host_name")
[ "def address(self):\n return self.data.get(\"ntp_host_name\")", "def hostname(self):\n return \"host%d\" % (self.host_id)", "def host_name(self):\n return self.__host_name", "def hostname(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"host...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key Type (Required)
def ntp_auth_key_type(self): return self.data.get("ntp_auth_key_type")
[ "def ntp_auth_key(self):\n return self.data.get(\"ntp_auth_key\")", "def ntp_auth_key_id(self):\n return self.data.get(\"ntp_auth_key_id\")", "def type(self) -> Optional[pulumi.Input['PublicKeyType']]:\n return pulumi.get(self, \"type\")", "def keytype(authkey):\n if authkey is None:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key ID (Not Required) value between 1 65534
def ntp_auth_key_id(self): return self.data.get("ntp_auth_key_id")
[ "def ntp_auth_key(self):\n return self.data.get(\"ntp_auth_key\")", "def ntp_auth_key_type(self):\n return self.data.get(\"ntp_auth_key_type\")", "def _make_private_key(self):\n\t\treturn int(binascii.hexlify(os.urandom(16)), 16)", "def _new_session_id(self):\n return os.urandom(32).encod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The NTP Authentication Key (Not Required)
def ntp_auth_key(self): return self.data.get("ntp_auth_key")
[ "def ntp_auth_key_id(self):\n return self.data.get(\"ntp_auth_key_id\")", "def ntp_auth_key_type(self):\n return self.data.get(\"ntp_auth_key_type\")", "def get_ntp_enabled(self):\n return None", "def getNTPSetup(self):\n return NTPSetup(self.__screen)", "def dns_api_key(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a log from a string
def __import_log_from_string(log_string, parameters=None, variant=DEFAULT_VARIANT): temp_file = string_to_file.import_string_to_temp_file(log_string, "xes") return apply(temp_file, parameters=parameters, variant=variant)
[ "def __init__(self, log_string: str):\n if (groups := balsa_log_regex.match(log_string)) is None:\n self.time_stamp = datetime.now()\n self.name = \"\"\n self.file_name = \"\"\n self.line_number = 0\n self.function_name = \"\"\n self.log_level...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter the queryset to all instances matching the given value on the specified lookup field.
def filter_queryset(self, value, queryset, field_name): filter_kwargs = { "%s__%s" % (self.lookup_field or field_name, self.lookup): value } return qs_filter(queryset, **filter_kwargs)
[ "def filterRecsByField(self, field_name, value):\n assert 0, u'Empty method'", "def do_field_filtering(self, request, queryset):\n fields = set(self.get_api_fields(queryset.model)).union({'id'})\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process specified is not running
def test_no_such_process(self): pass
[ "def _is_running(process):\n with hide('output'):\n s = run('ps auwx')\n for x in s.split('\\n'):\n if re.search(process, x):\n print '%s running' % process\n return True\n\n return False", "def _check_process_is_running(self, name: str):\n for proc in psutil.pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send messages to the status window (the only window)
def test_status_window(self): pass
[ "def _update_status_bar(self, message):\n self.window().status_bar = message", "def display_window(instrument, window_num=1, status='ON'):\n status.upper()\n if window_num != 1:\n command = ':DISPlay:WINDow%d:STATe %s' % (window_num, status)\n instrument.write(command)", "def OnStatus...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the fort battle start time left
def _getFortBattleTimer(self): if self.fortState.getStateID() == CLIENT_FORT_STATE.HAS_FORT: fortBattle = self.fortCtrl.getFort().getBattle(prb_getters.getBattleID()) if fortBattle is not None: return fortBattle.getRoundStartTimeLeft() return 0
[ "def get_timeAvailable(self):\r\n\r\n return self._timeAvailable", "def time_left(self):\r\n if self._timestamp is None:\r\n return 0\r\n else:\r\n return self.timeout-(time.time()-self._timestamp)", "def start_time(self):\n # if this hunt is configured for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows fort battle unit window
def _showWindow(self): pInfo = self._entity.getPlayerInfo() if pInfo.isInSlot and not pInfo.isReady: g_eventDispatcher.showFortWindow()
[ "def show1(self,win):\n\n # display invader\n # -------------\n win.addstr(self.yPos, self.xPos,\"-o-\")\n\n win.refresh()", "def mgs_ur_show_launcher():\n window = pm.window(title='Launch Game of Ur')\n\n pm.columnLayout()\n pm.text(\"Click to launch the game,\\nor drag (with...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a class from a package.module.class string
def create_class(pkg_class: str): splits = pkg_class.split(".") clfclass = splits[-1] pkg_module = splits[:-1] class_ = getattr(import_module(".".join(pkg_module)), clfclass) return class_
[ "def get_class_from_string(class_name: str) -> Type[Any]:\n\n parts = class_name.split(\".\")\n module_name = \".\".join(parts[:-1])\n cls: Type[Any] = __import__(module_name)\n for comp in parts[1:]:\n cls = getattr(cls, comp)\n return cls", "def stringToClass(cls_str):\n import_stg1 = cls...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a function from a package.module.function string
def create_function(pkg_func: list): splits = pkg_func.split(".") pkg_module = ".".join(splits[:-1]) cb_fname = splits[-1] pkg_module = __import__(pkg_module, fromlist=[cb_fname]) function_ = getattr(pkg_module, cb_fname) return function_
[ "def _get_function_from_str(path: str) -> Callable:\n module_name, _, function_name = path.rpartition(\".\")\n module = importlib.import_module(module_name)\n function = getattr(module, function_name)\n return function", "def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads the current mission and returns it in a list. It is used in save_mission() to get the file information to save.
def download_mission(): print(" Download mission from vehicle") missionlist = list() cmds = vehicle.commands cmds.download() cmds.wait_ready() for cmd in cmds: missionlist.append(cmd) return missionlist
[ "def download_mission():\n # print \" Download mission from vehicle\"\n missionlist=[]\n cmd_list = vehicle.commands\n cmd_list.download()\n cmd_list.wait_ready()\n for cmd in cmd_list:\n missionlist.append(cmd)\n return missionlist", "def get_latest_mission_from_github():\n if core...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict (that must contain a "phrase", "answer") and converts it to a BooleanQA format
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None): if 'context' not in example: example['context'] = '' if 'id' not in example: example['id'] = self.create_qid(example) if do_print: print('a:%s d1:%s d2:%s || Q:%s' % ...
[ "def build_examples(to_predict):\n examples = []\n\n for row in to_predict:\n context = row[\"context\"]\n for qa in row[\"qas\"]:\n qa[\"answers\"] = [{\"text\": \" \", \"answer_start\": 0}]\n qa[\"is_impossible\"] = False\n example = {\"context\": context, \"qas\":...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save_dataset() automatically saves the artiset if the config output_file contains the string _sample.jsonl it will be saved in a more readable format otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
def save_dataset(self): # Move non-required columns to metadata: artiset_data_with_metadata = [] for example in self.artiset_data: if 'metadata' not in example: new_example = {'metadata':{}} else: new_example = {'metadata': example['metada...
[ "def save_datasets(self,fname='default',ftype=None):\n if not ftype: \n ftype = self.config['datasets_ftype']\n \n # Check if data are in dictionaries (sparse matrices)\n if (\n hasattr(self,'train') or \n hasattr(self,'val') or \n hasattr(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds user in rooms associated to its groups
async def _set_user_in_group_rooms( app: web.Application, user_id: UserID, socket_id: SocketID ) -> None: primary_group, user_groups, all_group = await list_user_groups(app, user_id) groups = [primary_group] + user_groups + ([all_group] if bool(all_group) else []) sio = get_socket_server(app) for g...
[ "def add_group_users():\n group_id = request.args.get('group_id', 0)\n group = (db_session.query(Group)\n .filter(Group.id == group_id)\n .first())\n return render_template('add_group_users.html',\n group=group)", "def add_player_to_ro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Based on the counts and the input file, calculate log likelihood. Write the trigram, log likelihood to output_file.
def write_output(Count_trigram, Count_bigram, input_file, output_name): output_file = file(output_name, "w") input_file.seek(0) l = input_file.readline() while l: line = l.strip() fields = line.split(" ") assert len(fields)==3 log_pr = cal_trigram_param(Count_trigram, Cou...
[ "def process_files(args, logger):\n writer = get_open_function(args.output)\n logger.info(\"Writing outputs to {0}\".format(args.output))\n\n with writer(args.output, \"wt\") as o:\n # Write header row as comment\n o.write(\"#\" + \"\\t\".join(COLUMN_NAMES) + \"\\n\")\n if len(args.inp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inicializa una Instancia de la clase ReservadorDeVuelos
def __init__(self): self.__vuelos = {}
[ "def __init__(self, nome, vagas=0):\n self.nome = nome\n self.vagas = vagas\n self.alunos = [] # self.alunos é um atributo global criado automaticamente", "def regice_init(self):\n for peripheral_name in self.svd.peripherals:\n peripheral = self.svd.peripherals[peripheral_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If this is an airflow run, generate a name to reflect it still awaits a sync. Otherwise, generate a human friendly name for the run.
def _generate_run_name(self, af_context: Optional[AirflowTaskContext]) -> str: if af_context is not None: return f"Airflow-run-await-sync_{self.run_uid}" return get_random_name(seed=self.run_uid)
[ "def _get_task_name(task):\n\n if task.is_generate_resmoke_task:\n return task.generated_task_name\n\n return task.name", "def taskName(self, name):\n\n return self.uniqueName(name)", "def get_name(self):\n try:\n return self.task.split('.')[-1]\n except NotImplement...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads the result of the run by name, default is the default result name.
def load_from_result(self, name=RESULT_PARAM, value_type=None): # type: (Optional[str], Optional[str]) -> Any return self.run_executor.result.load(name, value_type)
[ "def load_by_name(self, name):\n return self.load(self.names.get(name, 0))", "def get_result(self, output_name):\n return self._results[output_name]", "def load(self):\n results_fn = os.path.join(self.full_path, self.output_filename)\n self.results = rlpy.Tools.results.load_single(re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add object to end of the array.
def append(self, obj): if self._n == self._capacity: # not enough room self._resize(2 * self._capacity) # so double capacity self._A[self._n] = obj self._n += 1
[ "def append(self, obj):\n if self._n == self._capacity: # Not enough room, double capacity\n self._resize(2 * self._capacity)\n self._A[self._n] = obj\n self._n += 1", "def append(self, object) :\n self.objects.append(object)", "def append(self, value: ob...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get access to optimizer object
def optimizer(self) -> OptimizerBase: return self._optimizer
[ "def optimizer(self):\n return self.optimizers[0]", "def optimizer(self):\n return self._optimizer.__class__.__name__", "def get_optimizer(optimizer, **kwargs):\n return optimizers[optimizer](**kwargs)", "def make_optimizer(self):\n raise NotImplementedError", "def get_optimizer(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the output encoder decoder.
def output_encoder_decoder(self) -> Optional[LabelEncoderDecoder]: return self._output_encoder_decoder
[ "def _create_decoder(self):\n params = self.params['decoder_params']\n return self.params['decoder'](params=params, mode=self.mode, model=self)", "def getRightEncoder(self) -> wpilib.Encoder:\n return self.right_encoder", "def _get_conv_decoder(self, decoder_phase):\n if decoder_phase == \"past\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A context manager to instruct network that it enter training mode. While in training mode, the network enables regularization
def training_mode(self) -> ContextManager: if self._verbose_logging: logger.debug(f"Entering training mode, enabling regularization") self._enabled_regularization = True yield self._enabled_regularization = False if self._verbose_logging: logger.debug(f"Le...
[ "def train(self) -> None:\r\n\r\n self.training = True", "def set_train(self):\n BaseModule.train_flag = True", "def set_train(self):\n # global setting in dygraph\n # NOTE(chenweihang): nn.Layer also can be used in static mode,\n # but _dygraph_tracer() can not be called in stati...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method preprocesses raw tweets to cut them from unnecessary info along with some aggegation operations
def preprocess_raw_tweets(self, raw_tweets): def wait_for_awhile(): wait = 10 time.sleep(wait) twts = list() for user_data in raw_tweets: try: recent_tweets = [twt for twt in user_data['tweets']] # Aggregate the tweets to cre...
[ "def cleaner(tweet):\n\n cleaned_tweet = []\n cleaned_text = process(tweet.text)\n\n cleaned_tweet.append(tweet.id)\n cleaned_tweet.append(tweet.date)\n cleaned_tweet.append(tweet.text)\n cleaned_tweet.append(cleaned_text)\n cleaned_tweet.append(tweet.retweets)\n\n\n # Use hashtags and add t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a spike train of a marked inhomogeneous Poisson process
def generate_spikes(model, parameter, trajectory_data): n_neurons = len(model['spike_rates']); spike_rates = model['spike_rates']; position_centers = model['position_centers']; position_std = model['position_std']; mark_centers = model['mark_centers']; mark_std = model['mark_std']; times = trajector...
[ "def poisson_train(ISI, time):\r\n# -----------------------------------------------------------------------------\r\n train_length = 0\r\n while train_length < time:\r\n candidate = np.random.poisson(ISI, (time/ISI)*2)\r\n train_length = sum(candidate)\r\n i = 0; train_length = 0\r\n while...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes (smoothed) variance within groups of tensor x.
def compute_grouped_variance(x, grouped_dim, smoothing_delta=0.0): grouped_shape = (x.shape[0], grouped_dim, -1) x_centered = center_data(x.view(grouped_shape)) return ((torch.norm(x_centered, dim=-1, p=2.0)) ** 2 + smoothing_delta) / x_centered.shape[-1]
[ "def calculate_variance(X):\n return np.var(X,axis=0)", "def ts_var(self, x):\r\n x.name = 'variance'\r\n return x.rolling(self.window).var()", "def prod_variance(X, varX, Y, varY):\n z = X**2.0 * varY + varX * varY + varX * Y**2.0\n return z", "def variance_filter(data, windowsize):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Groups layer activation with some power of smoothed variance.
def group_neurons_with_variance(x, grouped_dim=64, group_conv=False, group_fc=False, grouping_power=0.5, smoothing_delta=0.0): if group_conv and x.dim() == 4: assert x.shape[1] > 3, 'tried to group the input to the network' assert np.prod(list(x.shape[1:])) // grouped...
[ "def _smoothing_update(self):\n gain = cho_solve(cho_factor(self.x_cov_pr), self.xx_cov).T\n self.x_mean_sm = self.x_mean_fi + gain.dot(self.x_mean_sm - self.x_mean_pr)\n self.x_cov_sm = self.x_cov_fi + gain.dot(self.x_cov_sm - self.x_cov_pr).dot(gain.T)", "def __init__(self, layer: MultiLaye...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses explore selectors with the format 'model_name/explore_name'.
def parse_selectors(selectors: List[str]) -> DefaultDict[str, set]: selection: DefaultDict = defaultdict(set) for selector in selectors: try: model, explore = selector.split("/") except ValueError: raise SpectaclesException( f"E...
[ "async def _query_explore(\n self, session: aiohttp.ClientSession, model: Model, explore: Explore\n ) -> str:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query_task_id = await self._run_query(\n session, model.name, explore.name, dimensions\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries selected explores and returns any errors.
def validate(self, mode: str = "batch") -> List[SqlError]: explore_count = self._count_explores() printer.print_header( f"Testing {explore_count} " f"{'explore' if explore_count == 1 else 'explores'} " f"[{mode} mode]" ) loop = asyncio.get_event_loop(...
[ "def test_query_grants_fail(cb):\n query = cb.select(Grant)\n with pytest.raises(ApiError):\n list(query)", "def do_query(self):\n print(\"[*] Beginning HackerTarget Query\")\n try:\n res = requests.get(self.ENDPOINT, verify=False, proxies=self.proxies)\n self._pri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and executes a query with a single explore.
async def _query_explore( self, session: aiohttp.ClientSession, model: Model, explore: Explore ) -> str: dimensions = [dimension.name for dimension in explore.dimensions] query_task_id = await self._run_query( session, model.name, explore.name, dimensions ) self.q...
[ "def query(self, sql):", "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def query(self, **kwargs):\n return self.iterate('query', **kwargs)", "def do(self, **kwargs):\n self._check_query_input(**kwargs)\n return Query(dag=self.dag, given...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and executes a query with a single dimension.
async def _query_dimension( self, session: aiohttp.ClientSession, model: Model, explore: Explore, dimension: Dimension, ) -> str: query_task_id = await self._run_query( session, model.name, explore.name, [dimension.name] ) self.query_tasks[...
[ "def query(session, Dim, **kwargs):\n return session.query(Dim).filter_by(**kwargs).first()", "def run(self):\n return self.cdb.db.query(\"dataset\", self.query)", "def _Dynamic_RunQuery(self, request, response):\n runquery_response = datastore_pb.QueryResult()\n self.__call('datastore_v3', 'Run...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the explores in the LookML project hierarchy.
def _count_explores(self) -> int: explore_count = 0 for model in self.project.models: explore_count += len(model.explores) return explore_count
[ "def count_explores(self) -> int:\n return len([explore for explore in self.iter_explores() if not explore.skipped])", "def _num_root(self) -> int:\n return sum(\n int(lineage.root.item.status != \"broken\") for lineage in self.lineages\n )", "def n_experiences(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Conform a dictionary of variables onto a new set of variables reindexed with dimension positional indexers and possibly filled with missing values. Not public API.
def reindex_variables( variables: Mapping[Any, Variable], dim_pos_indexers: Mapping[Any, Any], copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> dict[Hashable, Variable]: new_variables = {} dim_sizes = calculate_dimensions(variables) masked_dims = set() unch...
[ "def _normalize_indexes(\n self,\n indexes: Mapping[Any, Any],\n ) -> tuple[NormalizedIndexes, NormalizedIndexVars]:\n if isinstance(indexes, Indexes):\n xr_variables = dict(indexes.variables)\n else:\n xr_variables = {}\n\n xr_indexes: dict[Hashable, Inde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize the indexes/indexers used for reindexing or alignment. Return dictionaries of xarray Index objects and coordinate variables such that we can group matching indexes based on the dictionary keys.
def _normalize_indexes( self, indexes: Mapping[Any, Any], ) -> tuple[NormalizedIndexes, NormalizedIndexVars]: if isinstance(indexes, Indexes): xr_variables = dict(indexes.variables) else: xr_variables = {} xr_indexes: dict[Hashable, Index] = {} ...
[ "def align_indexes(self) -> None:\n\n aligned_indexes = {}\n aligned_index_vars = {}\n reindex = {}\n new_indexes = {}\n new_index_vars = {}\n\n for key, matching_indexes in self.all_indexes.items():\n matching_index_vars = self.all_index_vars[key]\n d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for uniqueness of both coordinate and dimension names across all sets of matching indexes. We need to make sure that all indexes used for reindexing or alignment are fully compatible and do not conflict each other.
def assert_no_index_conflict(self) -> None: matching_keys = set(self.all_indexes) | set(self.indexes) coord_count: dict[Hashable, int] = defaultdict(int) dim_count: dict[Hashable, int] = defaultdict(int) for coord_names_dims, _ in matching_keys: dims_set: set[Hashable] = set...
[ "def _need_reindex(self, dims, cmp_indexes) -> bool:\n if not indexes_all_equal(cmp_indexes):\n # always reindex when matching indexes are not equal\n return True\n\n unindexed_dims_sizes = {}\n for dim in dims:\n if dim in self.unindexed_dim_sizes:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not we need to reindex variables for a set of matching indexes.
def _need_reindex(self, dims, cmp_indexes) -> bool: if not indexes_all_equal(cmp_indexes): # always reindex when matching indexes are not equal return True unindexed_dims_sizes = {} for dim in dims: if dim in self.unindexed_dim_sizes: sizes = ...
[ "def check_index_consistency(self):\n dfs = [self._y, self._X_extra, self._X_extra_base, self._X_extra_unenc,\n self._X_select, self._X_select_base, self._X_select_unenc]\n\n indexes = [df.index for df in dfs if df is not None]\n\n for i in range(len(indexes)-1):\n idx1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute all aligned indexes and their corresponding coordinate variables.
def align_indexes(self) -> None: aligned_indexes = {} aligned_index_vars = {} reindex = {} new_indexes = {} new_index_vars = {} for key, matching_indexes in self.all_indexes.items(): matching_index_vars = self.all_index_vars[key] dims = {d for co...
[ "def computeAllIndex(self):\n indexes = self.klucb_vect(self.rewards / self.pulls, self.c * np.log(self.t_for_each_arm) / self.pulls, self.tolerance)\n indexes[self.pulls < 1] = float('+inf')\n self.index[:] = indexes", "def calc_indices(self, x, y):\n i1, i2 = self.calc_fractional_ind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with ``fi...
def align( *objects: T_Alignable, join: JoinOptions = "inner", copy: bool = True, indexes=None, exclude=frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: aligner = Aligner( objects, join=join, copy=copy, indexes=indexes, exclude_dims=exc...
[ "def deep_align(\n objects: Iterable[Any],\n join: JoinOptions = \"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n raise_on_invalid=True,\n fill_value=dtypes.NA,\n):\n from xarray.core.coordinates import Coordinates\n from xarray.core.dataarray import DataArray\n from xa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Align objects for merging, recursing into dictionary values. This function is not public API.
def deep_align( objects: Iterable[Any], join: JoinOptions = "inner", copy=True, indexes=None, exclude=frozenset(), raise_on_invalid=True, fill_value=dtypes.NA, ): from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset ...
[ "def _merge_dicts(self, x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n\n return z", "def _merge_asized(base: Asized, other: Asized, level: int = 0) -> None:\n base.size += other.size\n base.flat += other.flat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the channel_order_no of this ChannelOrderRequest.
def channel_order_no(self, channel_order_no): if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501 raise ValueError("Invalid value for `channel_order_no`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validatio...
[ "def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number", "def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id", "def set_order(self, order_key: str) -> None:\n if order_key not in self.orders:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the is_business_order of this ChannelOrderRequest.
def is_business_order(self, is_business_order): self._is_business_order = is_business_order
[ "def business_info(self, business_info):\n\n self._business_info = business_info", "def business_name(self, business_name: str):\n if business_name is None:\n raise ValueError(\"Invalid value for `business_name`, must not be `None`\") # noqa: E501\n\n self._business_name = busines...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the key_is_merchant_product_no of this ChannelOrderRequest.
def key_is_merchant_product_no(self, key_is_merchant_product_no): self._key_is_merchant_product_no = key_is_merchant_product_no
[ "def set_product_id(self, **kwargs):\n if self.is_quicklook():\n self._product_id = f'{self._obs_id}.quicklook'\n else:\n self._product_id = f'{self._obs_id}.continuum_imaging'", "def job_product_id(self, job_product_id):\n\n self._job_product_id = job_product_id", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the company_registration_no of this ChannelOrderRequest.
def company_registration_no(self, company_registration_no): if (self.local_vars_configuration.client_side_validation and company_registration_no is not None and len(company_registration_no) > 50): raise ValueError("Invalid value for `company_registration_no`, length must be less than...
[ "def channel_order_no(self, channel_order_no):\n if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501\n raise ValueError(\"Invalid value for `channel_order_no`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the vat_no of this ChannelOrderRequest.
def vat_no(self, vat_no): if (self.local_vars_configuration.client_side_validation and vat_no is not None and len(vat_no) > 50): raise ValueError("Invalid value for `vat_no`, length must be less than or equal to `50`") # noqa: E501 if (self.local_vars_configuration.client_si...
[ "def vat_details(self, vat_details):\n\n self._vat_details = vat_details", "def setKnotInV(*args, **kwargs):\n \n pass", "def ucapv(self, ucapv):\n\n self._ucapv = ucapv", "def setNonce(self, nonce):\n self[Header.PARAM_NONCE] = nonce", "def vif_uuid(self, vif_uuid):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }