query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Accepts Server challenge and sends signed Nonce. Also it is sent the protocols that the client is using, in order to verify if they were not downgraded
def accept_challenge(self,nonce2): logger.info("Sending POST to accept Challenge") if self.state=='START_CHALLENGE': snonce2 = self.sign_message(nonce2) self.challenge_nonce2 = snonce2 key, salt = self.derive_key(self.shared_key) if self.session_id != None: headers = { 'Content-Type': '...
[ "def start_challenge(self):\r\n\t\tif self.state=='KEY_EXCHANGE':\r\n\r\n\t\t\tlogger.info(\"Starting Challenge\")\r\n\t\t\tnonce = os.urandom(16)\r\n\t\t\tself.challenge_nonce = nonce\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Derives key from a given data
def derive_key(self, data, salt=None): digest=None if salt==None: salt=os.urandom(16) if self.digest == 'SHA-512': digest = hashes.SHA512() elif self.digest == 'SHA-256': digest =hashes.SHA256() key_size = 32 if self.cipher=='3DES': key_size = 16 # derive kdf = PBKDF2HMAC( ...
[ "def get_key_from_data(data):\n if 'key' in data:\n item_key = data['key']\n return item_key\n return None", "def _create_key(self):\n return uuid.uuid4().hex", "def key( self, digram ):\n\t\ta,b = digram.refdigram()\n\t\treturn str( a ) + self.keyseparator + str( b )"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
value from 0 to 0xffffff
def naivecolormap(value): # value2pixel(0.5) -> (0.5,0.5,0.5) red = (value & 0x00ff0000) >> 16 green = (value & 0x0000ff00) >> 8 blue = (value & 0x000000ff) >> 0 return (int(red), int(green), int(blue)) # rgb
[ "def colorUpdate(self):\n if self.value ==0:\n self.color = [255,255,255]\n return\n k = 0\n V = self.value\n while V>0:\n k += 18\n V //= 2\n self.color = [k,255-k,0]", "def hass_to_wilight_position(value: int) -> int:\n return min...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints out all residues (mod N.Z_p) of rotations of the form, along with the image mod N.Z_p of h. It then numerates all lattice cosets of the type Nlattice + h' which are padically equivalent to Nlattice + h. Finally, it prints out the orbit of these Nlattice + h' cosets under the isometries of the (global) form form ...
def enumerate_pAdic_class(form, h, N, p, opfile = None, rotations_only = True, debug = False): N = p**amf.order(N,p) if opfile == None: adjective = "proper " if rotations_only else '' opfile = 'output %d-adic %scls - form=(%d,%d,%d) h=(%d,%d,%d) N=%d.txt'%(p, adjective, form[0,0], form[1,1], form[2,2], h[0...
[ "def impmat(structure, freq):\n center = Matrix(structure.center)\n center_ = Matrix(structure.center_)\n edge_length = structure.edge_length\n\n edges_total = structure.edges_total\n triangles_total = structure.triangles_total\n speed_of_light = structure.speed_of_light\n\n wn = 2*PI*freq/spee...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a pass message to the user, for a given amout of time. timeout Time to display the message, in seconds
def DisplayPass(self, message = 'PASS', timeout=0): self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN ) time.sleep(timeout)
[ "def DisplayFail(self, message = 'FAIL', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)\n time.sleep(timeout)", "def DisplayError(self, message = 'ERROR', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)", "def display_timeout(self, timeout=600):\n\n w...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a failure message to the user, for a given amout of time. timeout Time to display the message, in seconds
def DisplayError(self, message = 'ERROR', timeout=0): self.DisplayMessage(message, fgcolor=colorama.Fore.RED)
[ "def DisplayFail(self, message = 'FAIL', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)\n time.sleep(timeout)", "def time_out():", "def DisplayPass(self, message = 'PASS', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN )\n time.sleep(timeout)", "def h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a failure message to the user, for a given amout of time. timeout Time to display the message, in seconds
def DisplayFail(self, message = 'FAIL', timeout=0): self.DisplayMessage(message, fgcolor=colorama.Fore.RED) time.sleep(timeout)
[ "def DisplayError(self, message = 'ERROR', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.RED)", "def time_out():", "def DisplayPass(self, message = 'PASS', timeout=0):\n self.DisplayMessage(message, fgcolor=colorama.Fore.GREEN )\n time.sleep(timeout)", "def handler(signum, frame):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of target qubits
def num_target_qubits(self): return self._num_target_qubits
[ "def num_targets(self):", "def nqudits(self) -> int:\n return self._nqudits", "def target_sizes(self):\n return Counter(self.targets.values())", "def num_evaluation_qubits(self) -> int:\n return self._num_evaluation_qubits", "def num_targets(self) -> int:\n return len(self.target...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds inverse of corresponding subcircuit to given circuit
def build_inverse(self, qc, q, q_ancillas=None, params=None): qc_ = QuantumCircuit(*qc.qregs) self.build(qc_, q, q_ancillas, params) try: qc_.data = [gate.inverse() for gate in reversed(qc_.data)] except Exception as exc: raise AquaError('Irreversible circuit! Ga...
[ "def build_controlled_inverse(self, qc, q, q_control, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build_controlled(qc_, q, q_control, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except AquaError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds controlled inverse of corresponding subcircuit to given circuit
def build_controlled_inverse(self, qc, q, q_control, q_ancillas=None, params=None): qc_ = QuantumCircuit(*qc.qregs) self.build_controlled(qc_, q, q_control, q_ancillas, params) try: qc_.data = [gate.inverse() for gate in reversed(qc_.data)] except AquaError: prin...
[ "def build_inverse(self, qc, q, q_ancillas=None, params=None):\n qc_ = QuantumCircuit(*qc.qregs)\n\n self.build(qc_, q, q_ancillas, params)\n try:\n qc_.data = [gate.inverse() for gate in reversed(qc_.data)]\n except Exception as exc:\n raise AquaError('Irreversible...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds inverse power of corresponding circuit. May be overridden if a more efficient implementation is possible
def build_inverse_power(self, qc, q, power, q_ancillas=None, params=None): for _ in range(power): self.build_inverse(qc, q, q_ancillas, params)
[ "def inverse_differential_power(cls, power, tangent_vec, base_point):\n (\n eigvectors,\n transconj_eigvectors,\n numerator,\n denominator,\n temp_result,\n ) = cls._aux_differential_power(power, tangent_vec, base_point)\n power_operator = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a file to be uploaded.
def add_file(self, fieldname, filename, content, mimetype=None): if mimetype is None: mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' self.files.append((fieldname, filename, mimetype, content))
[ "def add_file(self, fpath):\n if not os.path.isfile(fpath):\n print(\"cloudtalker: cannot find file\", fpath)\n return None\n #try to parse filename\n parsed = self.parse_filename(fpath)\n print(\"after parsing:\", parsed)\n if parsed is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a string representing the form data, including attached files.
def __str__(self): # Build a list of lists, each containing "lines" of the # request. Each part is separated by a boundary string. # Once the list is built, return a string where each # line is separated by '\r\n'. parts = [] part_boundary = '--' + self.boundary ...
[ "def document_form_data():\n\n return {\"document\": SimpleUploadedFile(\"myimage.png\", b\"file_content\")}", "def encode_multipart_formdata(\n self,\n ) -> Tuple[Optional[str], Optional[bytes]]:\n if not (self._fields or self._files):\n return None, None\n\n NEWLINE = b'\\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the (mode, count) of a set of data, including a tolerance window +/ window if > 0
def find_mode(data, window=0): vals = np.unique(data) counts = [len([x for x in data if abs(x-val) <= window]) for val in vals] bestix = np.argmax(counts) return (vals[bestix], counts[bestix])
[ "def mode_detrend(data, window=500, bins=None, threshold=3.0):\n d1 = data.view(np.ndarray)\n ends = [d1[:window], d1[-window:]]\n y = [float_mode(w, bins=bins) for w in ends]\n \n x0 = window / 2.0\n x1 = len(data) - x0\n m = (y[1] - y[0]) / (x1 - x0)\n b0 = y[1] - m * x1\n b1 = b0 +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Match audio against fingerprint hash table. Return top N matches as (id, filteredmatches, timoffs, rawmatches) If hashesfor specified, return the actual matching hashes for that hit (0=top hit).
def match_hashes(ht, hashes, hashesfor=None, window=1): # find the implicated id, time pairs from hash table hits = ht.get_hits(hashes) # Sorted list of all the track ids that got hits idlist = np.r_[-1, sorted([id for id, time, hash, otime in hits]), -1] # Counts of unique entries in the sorted lis...
[ "def return_matches(self, hashes: List[Tuple[str, int]],\n batch_size: int = 1000) -> Tuple[List[Tuple[int, int]], Dict[int, int]]:\n # Create a dictionary of hash => offset pairs for later lookups\n mapper = {}\n for hsh, offset in hashes:\n if hsh.upper() in m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in an audio file, calculate its landmarks, query against hash table. Return top N matches as (id, filterdmatchcount, timeoffs, rawmatchcount), also length of input file in sec, and count of raw query hashes extracted
def match_file(ht, filename, density=None, sr=11025, n_fft=512, n_hop=256, window=1, shifts=4, verbose=False): hq = audfprint.wavfile2hashes(filename, sr=sr, density=density, n_fft=n_fft, n_hop=n_hop, shifts=shifts) # Fake durations as largest hash time if len(hq) == 0: ...
[ "def analyze_file(filename):\n bps = 0\n seqs = 0\n input_iter = khmer.ReadParser(filename)\n unique = {}\n for k in (21, 31, 51):\n unique[k] = khmer.HLLCounter(ksize=k)\n for record in input_iter:\n bps += len(record.sequence)\n seqs += 1\n# for hll in unique.values()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the query fingerprints and the matching ones plotted over a spectrogram
def illustrate_match(ht, filename, density=None, sr=11025, n_fft=512, n_hop=256, window=1, shifts=4): # Make the spectrogram d, sr = librosa.load(filename, sr=sr) S = np.abs(librosa.stft(d, n_fft=512, hop_length=256, window=np.hanning(512+2)[1:-1])) S = 20.0*np.log10(np.maxi...
[ "def plotSpectrogram(self):\n\n\t\t#max freq represetnedf (nyquist constrained)\n\t\timgHeight = self.sampleRate/2\n\n\t\tself.p.y_range.end = imgHeight * self.numChannels\n\n\t\timgWidth = self.signalDuration\n\t\tself.p.x_range.end = imgWidth\n\n\t\tfor channelNum in self.activeChannels:\n\t\t\tchannelSignal = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interpret a byte as an unsigned int
def as_unsigned_char(byte): return unsigned_char.unpack(byte)[0]
[ "def __convert_to_unsigned_num(self, bytestring:str):\n return BitArray(bin=bytestring).uint", "def bytes_to_uint(raw_bytes):\n if not builtins.is_bytes(raw_bytes):\n raise TypeError(\"argument must be raw bytes: got %r\" %\n type(raw_bytes).__name__)\n # binascii.b2a_hex is writt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a (monochrome) PNG image and convert to 1bpp raw data This should work with any 8 bit PNG. To ensure compatibility, the image can be processed with Imagemagick first using the monochrome flag.
def read_png(path): buf = bytearray() # State for bit packing bit_cursor = 8 byte = 0 # Read the PNG image reader = png.Reader(filename=path) width, height, rows, metadata = reader.asRGB() # Loop over image and pack into 1bpp buffer for row in rows: for pixel in range(0, ...
[ "def read_raw(filename, height=979, width=1312, bayer = False):\r\n\r\n raw_file = open(filename,'rb')\r\n image = (np.fromfile(raw_file, count = height*width, dtype='uint16'))/256\r\n image = np.reshape(image, (height,width), 'C')\r\n\r\n if bayer == True:\r\n image = cv2.cvtColor(image, cv2.CO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that takes in the domain x, y (2D meshgrids) and a list of 2D arrays eta_list and creates an animation of all eta images. To get updating title one also need specify time step dt between each frame in the simulation, the number of time steps between each eta in eta_list and finally, a filename for video.
def eta_animation(X, Y, eta_list, frame_interval, filename): fig, ax = plt.subplots(1, 1) #plt.title("Velocity field $\mathbf{u}(x,y)$ after 0.0 days", fontname = "serif", fontsize = 17) plt.xlabel("x [m]", fontname = "serif", fontsize = 12) plt.ylabel("y [m]", fontname = "serif", fontsize = 12) pme...
[ "def create_animation(env, images):\n\n # We keep the borders, but remove top padding\n og_width = env.width\n og_height = env.height-4\n\n width = og_width * IMAGE_RESCALE\n height = og_height * IMAGE_RESCALE\n\n file_name = './videos/video_exp' + str(EXP) + '_seed' + str(SEED) + '.avi'\n co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that takes in the domain x, y (2D meshgrids) and a lists of 2D arrays u_list, v_list and creates an quiver animation of the velocity field (u, v). To get updating title one also need specify time step dt between each frame in the simulation, the number of time steps between each eta in eta_list and finally, a ...
def velocity_animation(X, Y, u_list, v_list, frame_interval, filename): fig, ax = plt.subplots(figsize = (8, 8), facecolor = "white") plt.title("Velocity field $\mathbf{u}(x,y)$ after 0.0 days", fontname = "serif", fontsize = 19) plt.xlabel("x [km]", fontname = "serif", fontsize = 16) plt.ylabel("y [km]...
[ "def eta_animation(X, Y, eta_list, frame_interval, filename):\n fig, ax = plt.subplots(1, 1)\n #plt.title(\"Velocity field $\\mathbf{u}(x,y)$ after 0.0 days\", fontname = \"serif\", fontsize = 17)\n plt.xlabel(\"x [m]\", fontname = \"serif\", fontsize = 12)\n plt.ylabel(\"y [m]\", fontname = \"serif\", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that makes a quiver plot of (U, V) at points (X, Y).
def quiver_plot(X, Y, U, V, plot_title): plt.figure() plt.title(plot_title, fontname = "serif", fontsize = 17) plt.xlabel("x [m]", fontname = "serif", fontsize = 12) plt.ylabel("y [m]", fontname = "serif", fontsize = 12) Q = plt.quiver(X[::4, ::4], Y[::4, ::4], U[::4, ::4], V[::4, ::4], unit...
[ "def quiver(\n ds: Dataset,\n x: Hashable,\n y: Hashable,\n ax: Axes,\n u: Hashable,\n v: Hashable,\n **kwargs: Any,\n) -> Quiver:\n import matplotlib as mpl\n\n if x is None or y is None or u is None or v is None:\n raise ValueError(\"Must specify x, y, u, v for quiver plots.\")\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that generates a Hovmuller diagram of eta as a function of x and t at a choosen ycoordinate
def hovmuller_plot(x, t, eta): X, T = np.meshgrid(x, np.array(t)) X = np.transpose(X) # Transpose for plotting T = np.transpose(T) # Transpose for plotting eta_hm = np.transpose(np.array(eta)) # Transpose for plotting plt.figure(figsize = (5, 8)) plt.pcolormesh(X, T, eta_hm, vm...
[ "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y", "def y_exact(t):\n return -epsilon*cos(omega*t)", "def HenonHeiles_Hamiltonian(t, u):\n points_positions = u.T[:2]\n points_mom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that takes a signal and its corresponding time array. Then plots the time signal as well as its Fourier transform.
def plot_time_series_and_ft(t, signal): t = np.array(t) signal = np.array(signal) # Plotting the time series. plt.figure(figsize = (8, 7)) plt.subplot(2, 1, 1) plt.plot(t, signal, linewidth = 2) plt.title("Time series of $\eta$ at center of domain", fontname = "serif", fontsize = 17) pl...
[ "def plot_inverse_fourier_transform(fs, wave, time, title):\n plt.figure(num=title+\" - \"+filename[:-4], figsize=(8, 5))\n plt.plot(time, wave, color=\"blue\", label=\"ifft(t)\")\n plt.legend(loc=1)\n plt.xlim(time[0], time[-1])\n plt.xlabel('Time (s)')\n plt.ylabel('ifft(t)')\n plt.title(titl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read all ATL08 from CSVs of a given year after extract_filter_atl08.py Write to a pickle file by year Return a geodataframe
def atl08_io( self, atl08_csv_output_dir, year_search, do_pickle=True, filename_regex='ATL08*.csv' ): dir_pickle = atl08_csv_output_dir filename_regex = os.path.join( atl08_csv_output_dir, year_search, filename_regex...
[ "def load_year(observatory=None, year=None, path=None):\n dates_in_year = pd.date_range(\n start=f'{year}-01-01', end=f'{year}-12-31', freq='D'\n )\n df = pd.DataFrame()\n for date in dates_in_year:\n ymd = date.strftime('%Y%m%d')\n file_name = f'{observatory}{ymd}dmin.min'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter raster filenames list by year.
def filter_raster_filenames_by_year( self, filenames: list, start_year: int, end_year: int ): new_list = [] years = [str(year) for year in range(start_year, end_year+1)] for f in filenames: date_match = re.search( ...
[ "def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add new field to config
def add_field(self, field): config = configparser.ConfigParser() config.read(self.path) config.set(self.section, field, '') with open(self.path, 'w') as config_file: config.write(config_file)
[ "def set_value(self, config_field, include_doc=False):\n raise NotImplementedError", "def add_field(self, field):\n # lots of stuff left, needs to be done here\n if not field.get('name'):\n field['name'] = reduce_to_alphanumeric(unicode(field.get('label')).lower())\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively retrieves the current path, given a (potentially) old path.
def retrieve_current_path(self, old_path): path = old_path detect_endless_loop = 0 while path is not None and path not in self.repo_files_path and detect_endless_loop < 50: if path in self.old_to_new_path: path = self.old_to_new_path[path] else: ...
[ "def parseNewPath(fs, directory, path):\n parentDir = directory\n if path.startswith(\"/\"):\n path = path[1:]\n parentDir = fs.rootDir\n if parentDir.absolutePath == directory.absolutePath:\n parentDir = directory\n if \"/\" in path:\n name = path[path.rindex(\"/\")+1:]\n parentDir = getFile...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks wether or not repo is a local or remote path to a repo.
def _is_remote_repository(repo: str) -> bool: return repo.startswith("git@") or repo.startswith("https://")
[ "def is_repo(repo_dir: Path): \n try:\n r = git.Repo(repo_dir)\n except git.InvalidGitRepositoryError:\n return False \n return True", "def has_local_repo(self):\n return all((self.local_path, self.local_file, self.local_repo))", "def test_missing_git_and_slash_url(self, _, __):\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clones the remote repo to path_to_folder.
def _clone_remote_repository(self, path_to_folder: str, repo: str) -> str: repo_folder = os.path.join(path_to_folder, self._get_repo_name_from_url(repo)) git.Repo.clone_from(url=repo, to_path=repo_folder) return repo_folder
[ "def clone(dirpath, git_url):\n purge_folder(dirpath)\n Repo.clone_from(git_url, dirpath, progress=Progress())", "def clone_to_folder(destination, endpoint):\n click.echo('... cloning ' + endpoint + ' to ' + destination)\n execute('git clone -q ' + endpoint)", "def sync(self):\n if not os.pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses repo url to get its name.
def _get_repo_name_from_url(url: str) -> str: last_slash_index = url.rfind("/") last_suffix_index = url.rfind(".git") if last_suffix_index < 0: last_suffix_index = len(url) if last_slash_index < 0 or last_suffix_index <= last_slash_index: raise Exception("Badly ...
[ "def _repo_name_from_url(url_decode: str):\n github_project_name = os.path.split(url_decode.path)[-1]\n return github_project_name.replace('.git', '')", "def repo_name(repo):\n\n if \"name\" not in repo or repo[\"name\"] is None:\n repo[\"name\"] = os.path.basename(repo[\"upstream\"])\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads a commit graph stored in the pickle format.
def load_commit_graph(self, path): self.commit_graph = nx.readwrite.gpickle.read_gpickle(path)
[ "def load_commit_graph_lines(self, path):\n\n self.commit_graph_lines = nx.readwrite.gpickle.read_gpickle(path)", "def pickle_to_graph(pickle):\n\treturn pickle.loads(pickle)", "def loadGraph(filename):\n with open(filename,\"rb\") as fp:\n return pickle.load(fp)", "def _load_graph(filename):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loards a commit graphe for lines stored in the pickle format.
def load_commit_graph_lines(self, path): self.commit_graph_lines = nx.readwrite.gpickle.read_gpickle(path)
[ "def __grab_patch__(self):\n\n patch = []\n line = self.buffer or self.fd.readline()\n\n while line:\n m = patterns['commit'].match(line)\n if m:\n patch = [line]\n break\n line = self.fd.readline()\n\n if not line:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the lines related to a given function and print them.
def find_lines_related_to_function(self, function_name, path): modified_in_commits = self.get_commits_that_modified_function(function_name, path) self.find_related_lines(path, modified_in_commits)
[ "def find_next_function(lines, line_number):\n assert False, \"Unimplemented!\"", "def show_device_functions(source):\n # type: (str) -> str\n for match in FUNCTION_PATTERN.finditer(source):\n print(match.group('qualifiers').replace('\\n', r'\\n'),\n match.group('function'), '(')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find lines in other files that are related to line in a given file, based on commit history.
def find_lines_related_to_lines(self, start_line, end_line, path, concurrent=False): cwd = os.getcwd() os.chdir(self.repo_folder) modified_in_commits = self.get_commits_that_modified_line(start_line, end_line, path) modified_in_commits = [commit[1:-1] for commit in modified_in_commits] ...
[ "def find_lines_related_to_function(self, function_name, path):\n\n modified_in_commits = self.get_commits_that_modified_function(function_name, path)\n self.find_related_lines(path, modified_in_commits)", "def get_commits_that_modified_line(self, start_line, end_line, path):\n\n # history = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of commits in which the given lines of a given file were modified.
def get_commits_that_modified_line(self, start_line, end_line, path): # history = self.git_repo2.git.log('-L', f'{start_line},{end_line}:{path}').split('\n') history = subprocess.run(['git', 'log', '-L', f'{start_line},{end_line}:{path}', '--format=\"%H\"', '-s'], capture_output=True, encoding='utf_8')...
[ "def get_commits_that_modified_function(self, function_name, path):\n\n\n history = subprocess.run(['git', 'log', '-L', f':{function_name}:{path}', '--format=\\\"%H\\\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\\n')\n modified_in_commits = [line for line in history if len(line) > ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of commits in which a function was modified.
def get_commits_that_modified_function(self, function_name, path): history = subprocess.run(['git', 'log', '-L', f':{function_name}:{path}', '--format=\"%H\"', '-s'], capture_output=True, encoding='utf_8').stdout.split('\n') modified_in_commits = [line for line in history if len(line) > 0] ...
[ "def get_commits():\n repo = git.Repo(\".\")\n commits = list(repo.iter_commits())\n return commits", "def get_commits(self):\n return get_commits(self.old, self.new, self.ref)", "def get_commits_that_modified_line(self, start_line, end_line, path):\n\n # history = self.git_repo2.git.log(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if an interval is contained in a list of intervals.
def interval_contained_in_list(list_intervals, interval): for (a, b) in list_intervals: if a <= interval[0] and interval[1] <= b: return True return False
[ "def __contains__(self, interval):\n return interval in self._driver", "def _intersects_with(cls, intervals: CommonIntervals, ci: CommonInterval) -> IntervalList:\n return [other for other in intervals\n if CommonInterval.intersect(ci, other) and ci.first_end <= other.first_end]", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts an interval in a list of intervals.
def insert_interval_in_list(list_intervals, interval): merge_left, merge_right = False, False for (a, b) in list_intervals: if b == interval[0] - 1: merge_left = True merge_left_pair = (a, b) if a == interval[1] + 1: merge_right = ...
[ "def insert_interval(intervals, new_interval):\n length = len(intervals)\n if length < 1:\n return [new_interval]\n\n i, start, end, merged = 0, new_interval.start, new_interval.end, []\n\n while i < length and intervals[i].end < start:\n merged.append(intervals[i])\n i += 1\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find files/folders that are modified together (ie. in same commit). Create an edge between them, and update its value.
def analyze_correlation_treecommit_graph(self, pairs_of_modified_files): for (node1, node2) in pairs_of_modified_files: if node1 in self.repo_files_path and node2 in self.repo_files_path: # Find common prefix path_prefix = os.path.commonpath([node1, nod...
[ "def test_change_two_files(self):\n tree = self.make_example_branch()\n self.build_tree_contents([\n ('goodbye', 'baz2\\n'),\n ('hello', 'foo2\\n\\n')])\n output = self.run_bzr('diff --stat-dir', retcode=1)[0]\n self.assertEqualDiff(output, '''\\\n . | 5 +++--\n 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as analyze_correlation_commit_lines_graph() but performs the computations concurently.
def analyze_correlation_commit_lines_graph_concurent(self, single_line=None): cwd = os.getcwd() os.chdir(self.repo_folder) commit_to_lines = {} # Print analyzing all the lines of the repo print('Print analyzing all the lines of the repo') file_lines = [] ...
[ "def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute correlation between a file and another one in commit graph based on value of edge. Correlation = Value of edge / max value of edge for this node
def compute_correlation(self, node_name, commit_graph, method='basic', alpha=0.5): number_modifications = commit_graph.nodes[node_name]["number_modifications"] neighbors_correlation = [] for neighbor in commit_graph.neighbors(node_name): number_modifications_same_commit = commit_g...
[ "def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the neighbor_correlation object created in compute_correlation() to merge and remove useless intervals.
def parse_neighbors_correlation(self, neighbors_correlation): correlation_intervals = {} for neighbor, correlation, num_mod in neighbors_correlation: filepath, line = neighbor.split(':') line = int(line) if filepath not in correlation_intervals: co...
[ "def remove_redundancies(self):\n start = timeit.default_timer()\n nrows_before = len(self.all_geometries.index)\n df = self.all_geometries.copy()\n df = df.round(10)\n og_cols = df.columns.tolist()\n # sort interatomic distance columns according to alphabetized bond types\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute correlation between a file/folder and another one in commit TreeGraph based on value of edge. Correlation = Value of edge / max value of edge for this node
def compute_same_level_correlation(self, node_path): def compute_same_level_correlation_iteration(tree_graph, splitted_path): if len(splitted_path) == 1 and splitted_path[0] in tree_graph.kids: self.compute_correlation(splitted_path[0], tree_graph.graph) elif len(splitt...
[ "def analyze_correlation_treecommit_graph(self, pairs_of_modified_files):\n\n for (node1, node2) in pairs_of_modified_files:\n \n if node1 in self.repo_files_path and node2 in self.repo_files_path:\n\n # Find common prefix\n path_prefix = os.path.commonpath...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dictionnary containing for each file a score saying if it should be included in a given commit.
def compute_files_that_should_be_in_commit(self, commit_hash): similar_commits = {} potential_nodes = set() # Get list of files modified in commit modified_files = [] modified_files_dict = {} for commit in pydriller.Repository(self.repo_folder, single=commit_hash).trave...
[ "def git_annotate_file_order(commits):\n file_commits = collections.defaultdict(list)\n\n for k, c in commits.items():\n if 'order' in c:\n for fname in c['files']:\n file_commits[fname].append((c['order'], k))\n c['file_order'] = {} # Use this as oppty to track ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a dataframe, with files as rows, commits as columns. The value in a cell is 0 if a file was not in a commit, 1 otherwise.
def create_commits_dataframe(self): files_commits = {} current_length = 0 columns = [] pbar = tqdm.tqdm(total=self.total_commits) for commit in self.repository_mining.traverse_commits(): current_length += 1 columns.append(commit.hash) for m...
[ "def create_commits_dataframe_lines(self):\n\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n columns.append(commit.hash)\n\n pbar.update(1)\n pbar.close()\n\n\n dataframe_list = []\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as create_commits_dataframe() but with lines as rows instead of files.
def create_commits_dataframe_lines(self): columns = [] pbar = tqdm.tqdm(total=self.total_commits) for commit in self.repository_mining.traverse_commits(): columns.append(commit.hash) pbar.update(1) pbar.close() dataframe_list = [] index = [] ...
[ "def create_commits_dataframe(self):\n\n files_commits = {}\n current_length = 0\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n current_length += 1\n columns.append(commit.hash)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the names of all the methods included in a python file.
def find_methods_in_python_file(self, file_path): methods = [] o = open(file_path, "r", encoding='utf-8') text = o.read() p = ast.parse(text) for node in ast.walk(p): if isinstance(node, ast.FunctionDef): methods.append(node.name) print(metho...
[ "def listMethods(self):\n methodNames = self.funcs.keys()\n methodNames.sort()\n return methodNames", "def _listOfMethods(self, lookinside):\n\t\ttry:\n\t\t\tif lookinside:\n\t\t\t\treturn dir(__import__(lookinside, globals={}, locals={}, fromlist=[], level=-1))\n\t\texcept ImportError:\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as create_commits_dataframe() but with functions instead of files as rows.
def create_commits_dataframe_functions(self): columns = [] pbar = tqdm.tqdm(total=self.total_commits) for commit in self.repository_mining.traverse_commits(): columns.append(commit.hash) pbar.update(1) pbar.close() dataframe_list = [] index =...
[ "def create_commits_dataframe(self):\n\n files_commits = {}\n current_length = 0\n columns = []\n\n pbar = tqdm.tqdm(total=self.total_commits)\n for commit in self.repository_mining.traverse_commits():\n\n current_length += 1\n columns.append(commit.hash)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a dimensionality reduction on a given dataframe, using the given method.
def dimensionality_reduction(self, df, method='tSNE'): if method == 'tSNE': tsne = sklearn.manifold.TSNE(n_components=2, perplexity=5, metric='precomputed') embedded_data = tsne.fit_transform(df) elif method == 'MCA': df.replace({0: "False", 1: "True"}, inp...
[ "def applyier(df, *args, **kwargs):\n # pandas default implementation doesn't know how to handle `dtypes` keyword argument\n kwargs.pop(\"dtypes\", None)\n df = cls.frame_wrapper(df)\n result = fn(df, *args, **kwargs)\n\n if (\n not isinstance(re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes a distance matrix using the jaccard distance on the inputed dataframe.
def get_distance_matrix(self, df): dist = sklearn.neighbors.DistanceMetric.get_metric('jaccard') distance_matrix = dist.pairwise(df.iloc[:,:].to_numpy()) print(f'Distance matrix : {distance_matrix}') print(f'{len(distance_matrix)}, {len(distance_matrix[0])}') distance_df = pd.D...
[ "def calculate_distance_matrix(data_frame, columns):\n\n # Initialise empty distance matrix\n num_rows = len(data_frame)\n distance_matrix = np.zeros((num_rows, num_rows))\n\n x_inds, y_inds = np.triu_indices(len(data_frame))\n\n for c in columns:\n distance_matrix[x_inds, y_inds] += calculate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clusters a dataframe using a given method.
def cluster_dataframe(self, df, method='HDBSCAN', distance_matrix=True, min_size=2, max_eps=None, join_clusterless_samples=True): if method == 'HDBSCAN': clusterer = hdbscan.HDBSCAN(min_cluster_size=2, cluster_selection_epsilon=0.5) clusterer.fit(df) elif method == 'OP...
[ "def get_cluster(df, cluster_method):\r\n df_sub = df.iloc[0:,\r\n [df.columns.get_loc(\"Sharpe\"),\r\n df.columns.get_loc(cluster_method)]]", "def dynamicTreeCut(distance_df, func='hybrid', method='average', **cluster_kws):\n stats = importr('stats')\n dynamicT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of common commits between two clusters. Takes a dataframe containing the commits as columns and the files/lines/... as rows. Takes a dict containing the clusters.
def count_clusters_common_commits(self, df, clusters, lines=False): clusters_extended = {} for key, value in clusters.items(): number_common_commits = 0 for column in df: number_common_files_commit = 0 for filename in value: ...
[ "def count_clusters(dsetclusters, dsetreads):\n\n countdict = {\n 'All': sum(dsetclusters['morethan1']['All']['info_dict'].values()),\n 'PAS': sum(dsetclusters['morethan1']['wPAS']['info_dict'].values())}\n\n return countdict", "def _count_cluster_reannotations(operons: List[genes.Operon],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print all the commits of a repo.
def print_commits(self): for commit in self.repository_mining.traverse_commits(): print(f'Commit : {commit.hash}') print(f'Parents : {commit.parents}')
[ "def all_commits(repo: Optional[str] = None) -> List[str]:\n with Repo.open(repo) as _repo:\n return _repo.scm.list_all_commits()", "def collect_commits(self, args):\n\t\t# call a get_repo function\n\t\trepo_list = self.get_repo(args)\n\t\tprint(\"\\n\\tRepositories:\\n \", repo_list)\n\t\ttry:\n\t\t\tf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the entropy of a commit graph.
def compute_entropy(self, commit_graph): # Entropy computation is not perfect # * New size won't be the sum of old sizes exactly # * We have to take into account the times when node1 and node2 were modified # together with one of their neighbor entropy = 0 for node in ...
[ "def compute_entropy(node):\r\n total = len(node)\r\n appearance = sum(node)\r\n not_appearance = len(node) - sum(node)\r\n entropy = 0\r\n if appearance > 0:\r\n entropy -= (appearance / total) * math.log(appearance / total, 2)\r\n if not_appearance > 0:\r\n entropy -= (not_appearan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge nodes of commit graph.
def merge_nodes(self, node1, node2, initial_commit_graph, df): new_commit_graph = copy.deepcopy(initial_commit_graph) # Etapes pour merger les nodes # 1. Get list of out connections with a dict # eg. {node3 : 5, node4 : 6} # 2. Get list of in connections with a dict # 3...
[ "def _merge_cfgnodes(self, cfgnode_0, cfgnode_1):\n\n assert cfgnode_0.addr + cfgnode_0.size == cfgnode_1.addr\n new_node = cfgnode_0.merge(cfgnode_1)\n\n # Update the graph and the nodes dict accordingly\n self._model.remove_node(cfgnode_1.block_id, cfgnode_1)\n self._model.remov...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints the clusters contained in the file 'name'.
def display_interesting_clusters_extended(self, name): with open(name, "rb") as fp: clusters_extended = pickle.load(fp) interesting_clusters = 0 for cluster, value in clusters_extended.items(): modified_files = [] for function in value[1]: ...
[ "def handle_cluster(file_path):\n # create the output directory if it doesn't exist\n try:\n os.mkdir(\"output\")\n except OSError as e:\n print \"INFO: directory 'output' already exists, will probably overwrite the old data\"\n\n # the paths to the created lonely cluster files gonna be st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the tf values for each file in repo.
def compute_tf(voc_to_index, file_to_identifiers): tf = {} for file_path in file_to_identifiers.keys(): tf[file_path] = [0 for _ in range(len(voc_to_index))] for word in file_to_identifiers[file_path]: tf[file_path][voc_to_index[word]] += 1 num_i...
[ "def _compute_tf(self, filename):\n total_terms = len(self._hash_map.keys())\n\n tf_dict = {filename: {}}\n\n for key, value in self._hash_map.iteritems():\n tf = value / float(total_terms)\n tf_dict[filename][key] = float(\"{0:.6f}\".format(tf))\n\n # save tf_dict ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a snake or camel case string into its composing words.
def split_sentence(word): # Snake split splitted_snake_sentence = word.split('_') # camel_word = re.sub(r'_(.)', lambda m: m.group(1).upper(), word) splitted_sentence = [] for snake_word in splitted_snake_sentence: camel_words = re.findall(r'.+?(?:(?<=[a-z])(?=[A-Z...
[ "def wordSplit(cls, s, lower=False):\n words = []\n for word in s.split(' '):\n if lower:\n word = word.lower()\n words.append(word.strip())\n return words", "def string_split_2():\n s = 'dog lion snake elephant cow donkey goat duck'\n return s.split...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a semantic analysis on a repo to get a distance matrix containing the cosine distance between each file.
def semantic_analysis(self): file_to_identifiers = self.get_corpus() self.preprocess_words(file_to_identifiers) print(file_to_identifiers) voc_size, voc_to_index = self.compute_voc(file_to_identifiers) tf = self.compute_tf(voc_to_index, file_to_identifiers) idf = sel...
[ "def compute_similarity():\n movie_data = pd.read_csv(\"movie_recsys/datasets/movie_data.csv\")\n\n # Compute TF-IDF representation.\n tfidf = TfidfVectorizer(stop_words=\"english\")\n tfidf_matrix = tfidf.fit_transform(movie_data[\"story\"])\n\n # Compute Cosine Similarity.\n cosine_sim_scores = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of all docs in database.
def find_all(self): return self.documents
[ "def get_all_docs(self):\n docs = []\n cursor = self.db.scores.find({})\n for document in cursor:\n docs.append(document)\n # print(docs)\n return(docs)", "def getAllDocuments():\n return [Document(d) for d in HopperLowLevel.allDocuments()]", "def get_docs():...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the first matching doc. If none is found, return None.
def find_one(self, where_dict): for document in self.documents: if self.check_document(document, where_dict): return document
[ "def one(self, *args, **kwargs):\n bson_obj = self.find(*args, **kwargs)\n count = bson_obj.count()\n if count > 1:\n raise MultipleResultsFound(\"%s results found\" % count)\n elif count == 1:\n try:\n doc = next(bson_obj)\n except StopIte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return matching list of matching doc(s).
def find(self, where_dict): matching_list = [] for document in self.documents: if self.check_document(document, where_dict): matching_list.append(document) print(matching_list) return matching_list
[ "def matches(self):\n return (SuperfastDocmatch(self, row_dct=row)\n for row in self.documents['rows'])", "def find_all(self):\n return self.documents", "def _get_documents(tx: Transaction, **parameters) -> List[Node]:\n params_strings = []\n if len(parameters) == 0:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of matching docs.
def count(self, where_dict): # return len(self.find(where_dict)) count = 0 for document in self.documents: if self.check_document(document, where_dict): count += 1 return count
[ "def docids_count():", "def document_count(self):\n #return len(self.fake_index_storage.keys())\n raise NotImplementedError()", "def __len__(self):\n with self._index.reader() as reader:\n return reader.doc_count()", "def doc_count(es_client, search_app):\n response = es_cli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete matching doc(s) from the collection.
def delete(self, where_dict): remove_list = self.find(where_dict) for document in remove_list: self.documents.remove(document)
[ "def delete_document(self, collection, query, multiple=False):\n if multiple:\n return self.connection[collection].delete_many(query)\n else:\n return self.connection[collection].delete_one(query)", "def delete(self, where_dict):\n def checker(document):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a map_function to each document, collating the results. Then applies a reduce function to the set, returning the result.
def map_reduce(self, map_function, reduce_function): map_results = [] for document in self.documents: map_results.append(map_function(document)) return reduce_function(map_results)
[ "def map_reduce(self, map_function, reduce_function):\n return reduce_function(list(map(map_function, self.collection)))", "def reduce_func(self, reduce_function):\n return reduce_function(self.collection)", "def reduce_function(word_maps):\n # Reduce all the data by combining all the parts tha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of the sorted names of the collections in the database.
def get_names_of_collections(self): return sorted(self.collections.keys())
[ "def get_collections(db):\n res = None\n if db:\n res = db.list_collection_names()\n return res", "def __list_collection__(dbname):\n coll_str = run(\"\"\"mongo %s --eval \"printjson(db.getCollectionNames())\" --quiet\"\"\" % dbname)\n if coll_str:\n collections = json.loads(coll_str)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a vote object into minimal CSV line.
def _vote_csv_row(vote): rep = rep_log10(vote['reputation']) return "%s,%s,%s,%s" % (vote['voter'], vote['rshares'], vote['percent'], rep)
[ "def to_csv(self):\n pass", "def to_csv(self) -> str:\n return \"Movie\\n\\\"{}\\\",{},{},{},{},{}\".format(\n self.get_name(), self.get_runtime(),\n self.get_provider(), self.get_person(),\n self.is_started(), self.is_finished()\n )", "def as_csv(self):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output message of backtesting engine.
def output(self, msg): print(f"{datetime.now()}\t{msg}")
[ "def output(msg):\r\n sys.stdout.write(msg+\"\\n\")", "def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")", "def output(message):\n sys.stdout.write(message + \"\\n\")\n sys.stdout.flush()", "def output(self,code,msg):\n self.sendLine(\"%s %s\" % (code, msg))", "def print_output(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear all data of last backtesting.
def clear_data(self): self.strategy = None self.tick = None self.bar = None self.datetime = None self.algo_count = 0 self.algos.clear() self.active_algos.clear() self.trade_count = 0 self.trades.clear() self.logs.clear() self.dai...
[ "def clear_current_data(self):\n self.current_acc_data.clear()\n self.current_bvp_data.clear()\n self.current_eda_data.clear()\n self.current_hr_data.clear()\n self.current_ibi_data.clear()\n self.current_temp_data.clear()\n self.current_eye_tracking_data.clear()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancel order by vt_orderid.
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str): pass
[ "def cancel_order(self, order_id: int) -> None:\n raise NotImplementedError(\"Should implement cancel_order()\")", "def cancel_order(self, **params):\n return self._delete('order', True, data=params)", "async def cancel_order(self, **params):\r\n return await self.client_helper(\"cancel_ord...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Put an event to update strategy status.
def put_strategy_event(self, strategy: SpreadStrategyTemplate): pass
[ "def update(self, event):\n raise NotImplementedError('update event is not implemented')", "def process_status_update(self, event: StrategyStatusChangeEvent):\n self._callnext = (event.status != StrategyStatus.PAUSED.value)\n if event.status == StrategyStatus.PAUSED.value:\n self.o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where lines are 3tuples, each consisting of a word pair and a similarity value, separated by `delimiter`. An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
def evaluate_word_sims(model, name, pairs, delimiter='\t', restrict_vocab=300000, case_insensitive=True, dummy4unknown=False): ok_vocab = list(model.vocabulary.items()) #restrict_vocab ok_vocab = {w.lower(): v for w, v in ok_vocab} if case_insensitive else dict(ok_vocab) simila...
[ "def pearson_corr(pairs: Sequence[Pair]) -> float:\n X = tuple(p.x for p in pairs)\n Y = tuple(p.y for p in pairs)\n return Chapter04.ch04_ex4.corr(X, Y)", "def match_pair(pair, sim_method):\r\n doc1, doc2 = get_texts(pair)\r\n ents1 = extract_ents(nlp(doc1))\r\n ents2 = extract_ents(nlp(doc2))\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate score by section, helper for
def log_evaluate_word_analogies(name, section): correct, incorrect = len(section['correct']), len(section['incorrect']) if correct + incorrect > 0: score = correct / (correct + incorrect) logger.info("{} {}: {:.1f}% ({}/{})".format(name, section['section'], 100.0 * score, correct...
[ "def findSectionScore(soup):\n\n #wordCount and score are used for each section, totalScore and totalWords are for the entire article\n wordCount = 0\n sectionScores = {}\n currentSection = ''\n score = 0\n totalScore = 0\n totalWords = 0\n\n # we go through all of the text in the article\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the number of bank accounts
def count_accounts(): return BankAccount.__no_of_accounts
[ "def get_account_count(self):\n self.hive.rpc.set_next_node_on_empty_reply(False)\n if self.hive.rpc.get_use_appbase():\n ret = self.hive.rpc.get_account_count(api=\"condenser\")\n else:\n ret = self.hive.rpc.get_account_count()\n return ret", "def num_withdrawals...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a constant that scales F to E.
def findConst(F, E): for k in range(2): for l in range(2): if E[k, l]!=0: return F[k, l]/E[k, l]
[ "def as_constant(self, eps=1e-14):\n if self.is_scalar_field():\n maxval = self.f.vector().max() # global (!) maximum value\n minval = self.f.vector().min() # global (!) minimum value\n if (maxval - minval) < eps:\n return maxval\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process alarm and return additional data to be added to the alarm data. Clientside configuration must be pushed as "config" key
def get_data(self, alarm, config): return {}
[ "def create_alarm(self, config_alarm_info):\n mon_plugin = MonPlugin()\n plugin_uuid = mon_plugin.configure_rest_plugin()\n alarm_uuid = mon_plugin.configure_alarm(config_alarm_info)\n return alarm_uuid", "def _get_alarm_data(self):\n return self._replace_id(self.request.get_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple put/get on ranges of rows, hitting multiple sstables
def rangeputget_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor = self.cql_connection(node1).cursor() self.create_ks(cursor, 'ks', 2) self.create_cf(cursor, 'cf') tools.range_putget(cluster, curs...
[ "def range(self, row, start, end):\n row_str = idkey_as_str(row)\n start_str = start.strftime(_TIME_FORMAT)\n end_str = end.strftime(_TIME_FORMAT)\n fmt = u\"Range(%s=%s,%s,%s)\"\n return PQLQuery(fmt % (self.name, row_str, start_str, end_str),\n self.index)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test wide row slices
def wide_row_test(self): cluster = self.cluster cluster.populate(3).start() [node1, node2, node3] = cluster.nodelist() cursor = self.cql_connection(node1).cursor() self.create_ks(cursor, 'ks', 1) self.create_cf(cursor, 'cf') key = 'wide' for x in xrang...
[ "def test_fixed_axes_count_slice(self):\n self.assert_tensor_equal(\n Selection()[:, :, :, 2:4].apply(sample_tensor()),\n sample_tensor()[:, :, :, 2:4])", "def rowCheck(self, i):\n #row is list of tuples\n #row represents a row of pixels of a photo\n row = self.ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a new race registry with no runners entered.
def __init__(self) -> None: self.groups = {} self.runners = {} for c in Registry.CATEGORIES: self.groups[c] = []
[ "def initialize(self):\n if self.initialized:\n raise RuntimeError(\"The registry is already initialized\")\n\n for specifier, serializer in self._prematurely.items():\n model = apps.get_model(specifier)\n self._serializers[model] = self._get_serializer(model, serializ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register runner with email andd category.
def register(self, email: str, category: str) -> None: # remove the runner from all categories they are # currently in. for c in Registry.CATEGORIES: if email in self.groups[c]: self.groups[c].remove(email) self.groups[category].append(email) self.grou...
[ "def add_runner(self, runner):\n category = int(runner.get_time() / 10)\n if runner not in self._runners:\n self._runners.append(runner)\n\n if runner.get_time() < 40:\n if category not in self._categories:\n self._categories[category] = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register the student into ClassList
def register(self, student: list) -> None: self.students[student[0]] = student[1:]
[ "def addStudent(self,student):\n self.__classlist.append(student)", "def add_student_to_class(self, student, start_date=None):\n #student_key = student.key()\n #try:\n #self.students_list.index(student_key)\n #except:\n #self.students_list.append(student_key)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the average score of last n times
def calculate_average(self, n: int) -> int: total = 0 counter = 0 i = 0 while counter != n: total += self.history[i] i += 1 counter += 1 return counter / n
[ "def average_six(n):\n total=0\n for i in range(n):\n total=total+(six_heads())\n return (total/n)", "def average_loss(stats: Stats, n: int = 10) -> float:\n loss = pd.DataFrame(stats[\"loss\"])[\"singleton\"]\n return loss.iloc[-n:].mean()", "def get_mean_score_nth_test():\n cursor = c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modify the price with its discount
def discount(self, discount: float) -> None: self.price = self.price * discount
[ "def discount_update(self, discount, actor):\n\n finance = self.cart['finance']\n try:\n # validate discount value\n try:\n discount = Decimal(discount)\n except:\n discount = Decimal(0)\n\n subtotal = finance['prod_cost'] + fin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Spins off a process that runs as a daemon.
def StartDaemon(self): # To spin off the process, use what seems to be the "standard" way to spin # off daemons: fork a child process, make it the session and process group # leader, then fork it again so that the actual daemon process is no longer # a session leader. # # This is a very simplifi...
[ "def daemonize_start():\n\n if _detach:\n if _fork_and_wait_for_startup() > 0:\n # Running in parent process.\n sys.exit(0)\n\n if sys.platform != 'win32':\n # Running in daemon or monitor process.\n os.setsid()\n\n if _monitor:\n saved_daemoniz...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create household_structures before dispatch, if they don't exist.
def pre_dispatch(self, plot, **kwargs): survey = kwargs.get('survey', None) if survey: surveys = [survey] else: surveys = self.get_surveys() HouseholdStructure = get_model('bcpp_household', 'householdstructure') for survey in surveys: for house...
[ "def create_structures(dont_load_entities: bool = False) -> object:\n\n if not dont_load_entities:\n load_entities()\n\n default_webhooks = Webhook.objects.filter(is_default=True)\n for corporation in EveCorporationInfo.objects.all():\n EveEntity.objects.get_or_create(\n id=corpora...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dispatches all member status for this subject, e.g SubjectAbsentee, SubjectUndecided, ....
def dispatch_member_status_instances(self, app_label, registered_subject, user_container, **kwargs): member_status_models = self.get_member_status_models(app_label) for member_status_cls in member_status_models: member_status = member_status_cls.objects.filter(registered_subject=registered_s...
[ "def transition(status_to, ticket_data):\n if status_to == \"In Progress\":\n email_address, result = get_group_details(ticket_data)\n action_group_membership_change(email_address, result[0], ticket_data)", "def UserStatus(self, Status):", "async def _check_registered_sub_status(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get labelled preds by thresholding the raw probability values and joining them up
def threshold_predictions(preds, classes, prediction_threshold=0.5): labelled_preds = [' '.join([classes[i] for i, p in enumerate(pred) if p > prediction_threshold]) for pred in preds] return labelled_preds
[ "def __predict_label(self, label_probs):\n def driver(prob):\n candidate = np.argmax(prob)\n if candidate == 0 and prob[0] > self.model_paras['threshold_positive']:\n return 0\n elif candidate == 2 and prob[2] > self.model_paras['threshold_negative']:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click button to close the dialog
def click_button_close(self): # AutoGen method click_link: None self.click_element(self.BUTTON_CLOSE)
[ "def close(self):\n\n Dialog.close(self)\n gui.no_modal_dialog=True", "def on_action_close(self, content):\n self.widget().close()", "def close_alert(self):\n self.nottreal.view.wizard_window.close_alert()", "def close_window(self):\r\n Window.close()", "def closePopup(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify comment with given text
def verify_comment_text(self, text=None): self.element_text_should_be(self.COMMENT, text)
[ "def test_comment_check_spam(self):\n check_kwargs = {\n # Akismet guarantees this will be classified spam.\n \"comment_author\": \"viagra-test-123\",\n **self.base_kwargs,\n }\n self.assertTrue(self.api.comment_check(**check_kwargs))", "def testComment(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test we fail setup when no dir found.
async def test_setup_fails_on_no_dir( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: with patch( "homeassistant.components.python_script.os.path.isdir", return_value=False ): res = await async_setup_component(hass, "python_script", {}) assert not res assert "Folder...
[ "def test_check_dir_existence_sub_dir_not_found(self):\n self.assertFalse(self.existing_dirs.append('unexpected_dir'))", "def test_default_output_dir_exists():\n\n assert os.path.exists(\"corems_output\")", "def test_DataDirPresent(self):\n\t\tself.assertEqual(os.path.isdir('Data'), True)", "def tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that enumerate is accepted and executed.
async def test_using_enumerate(hass: HomeAssistant) -> None: source = """ for index, value in enumerate(["earth", "mars"]): hass.states.set('hello.{}'.format(index), value) """ hass.async_add_job(execute, hass, "test.py", source, {}) await hass.async_block_till_done() assert hass.states.is_sta...
[ "def test_my_enumerate(self):\r\n self.assertTrue(list(my_enumerate([\"ejona\"])) == list(enumerate([\"ejona\"])))\r\n self.assertFalse(list(my_enumerate([\"ejona\"])) == list(enumerate([\"ejonaejona\"])))\r\n self.assertTrue(list(my_enumerate([3, 33, 11, 44, 67])) == list(enumerate([3, 33, 11,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test datetime and time modules exposed.
async def test_exposed_modules( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: caplog.set_level(logging.ERROR) source = """ hass.states.set('module.time', time.strftime('%Y', time.gmtime(521276400))) hass.states.set('module.time_strptime', time.strftime('%H:%M', time.strpti...
[ "def test_setup_datetime(self):\n\n # Build the package\n self.run_setup('build')\n\n import stsci.testpackage\n\n assert hasattr(stsci.testpackage, '__setup_datetime__')\n prev = stsci.testpackage.__setup_datetime__\n now = datetime.now()\n # Rebuild\n # So t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test time.sleep warns once.
async def test_sleep_warns_one( hass: HomeAssistant, caplog: pytest.LogCaptureFixture ) -> None: caplog.set_level(logging.WARNING) source = """ time.sleep(2) time.sleep(5) """ with patch("homeassistant.components.python_script.time.sleep"): hass.async_add_executor_job(execute, hass, "test.py", ...
[ "def _sleep(self):\n self.kill()", "def rand_sleep():\n time.sleep(random.uniform(0.75, 1.5))", "def test_dummy_timer (self):\n with Timeout(None):\n sleep(DELAY)\n sleep(DELAY)", "def test_sleep():\n job = Sleep()\n\n current = {\"index\": 0}\n\n def update_sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retreive our generic cookie for Subscene. This cookie defines which subtitle's language will be returned to us.
def _get_cookie(cls, referer): return {"Cookie" : "LanguageFilter=" + cls.SELECTED_LANGUAGE + "; " "ShowSubtitleDetails=true; " + "ShowSubtitlePreview=false;", "Referer" : referer}
[ "def get_lang_from_cookie(request, supported):\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n if lang_code and lang_code in supported:\n return lang_code\n else:\n return None", "def insert_lang_cookie(response):\n\n if 'lang' in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a referer URL for Subscene. page should start with a single slash, i.e. "/".
def _build_referer(cls, page): return "http://" + SUBSCENE_PAGES.DOMAIN + page
[ "def get_page_url(self):\n mission_part = 'mission=' + self.mission\n id_num = self.image_id.split('-')[2]\n id_part = 'roll=E&frame=' + id_num\n page_url = infopage + mission_part + '&' + id_part\n return page_url", "def create_wiki_url(page: str) -> str:\n return f\"https:/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a simple request, but adds the cookies needed for Subscene. If remove_tabs is true, the function removes all the "\t"s in the returned content.
def _my_perform_request(cls, page, remove_tabs = True): referer = cls._build_referer(page) cookie = cls._get_cookie(referer) content = Utils.PerformRequest( # Subscene's server will return 301 code (Redirection) when we # request pages with the HOST set to www.subsc...
[ "def main(request, response):\n\n token = \"ArQvBL/jhDJ62HaUm/ak0dIUYDjZAfeCQTXwa92cOrHZbL7R+bhb3qrVO2pHWkgJPgvIzvLX5m3wfaUJfOKY0Q4AAABqeyJvcmlnaW4iOiAiaHR0cHM6Ly93d3cud2ViLXBsYXRmb3JtLnRlc3Q6ODQ0NCIsICJmZWF0dXJlIjogIk9yaWdpbklzb2xhdGlvbkhlYWRlciIsICJleHBpcnkiOiAyMDAwMDAwMDAwfQ==\"\n\n header_order = request....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will try to locate the the sersies number string in the movie name and extract it. returning the movie name without it. And also, a represen tation of the season number in the format of "SXX"
def _extract_season_number(cls, movie_name): # The regex will return the season string. We'll remove it from the # movie_name. for season, code in SUBSCENE_CONSTS.TV_SERIES_SEASONS.iteritems(): # Concatenate the "Season" to the number. season = ' '.join([season, SUBS...
[ "def _getSeason(self):\n # TODO: Add a RegEx for matching out the Season Number\n pass", "def get_singlename_year(name, sex, year):\n\n #identify filename for that year\n filename = \"names/yob\" + str(year) + \".txt\"\n\n #open the file and search for that name and sex, extracting th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the year from the movie name. The year is located inside parentheses. Returns None on failure.
def _remove_year(cls, movie_name): result = Utils.getregexresults( SUBSCENE_REGEX.YEAR_REMOVER, movie_name, False) if not result: return None return result[0]
[ "def extract_year(title: str):\n year = -1\n match = regex.search('\\((\\d{4})\\)$', title.strip())\n if match:\n year = int(match.group(1).strip())\n title = title[:match.start()].strip()\n return title, year", "def get_filename_year(filename):\n new_filename = filename\n filename...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a single movie to the list as a MovieSubStage. Removes the season numbering and the year from the movie_name field.
def _add_movie(movie): movie_name = movie['MovieName'] movie_code = movie['MovieCode'] # Try to extract the season numbering (it might be a season result). movie_name = cls._extract_season_number(movie_name) # Remove the year. movie_name = ...
[ "def add_movie(movies):\n new_title = get_valid_selection(\"Title\")\n new_year = get_valid_year()\n new_category = get_valid_selection(\"Category\")\n movies.add_movie(Movie(new_title, new_year, new_category, False))\n print(\"{} ({} from {}) added to movie list\".format(new_title, new_category, new...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }