query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Initialize class for FontAnalyzer
def __init__(self): self.numCharacter = 0 self.fontStartIdx = 0 self.fontEndIdx = 0 self.charIntensityValDic = {} self.orderedIdxBasedOnIntensity = [] self.uKoreanCharList = [] self.fontName = '' self.fontSize = 0.0 #self.face = freetype.Face() ...
[ "def _init(self):\n self.stylesheet = self._get_stylesheet()\n self._register_fonts()", "def __init__(self):\n this = _coin.new_SoFont()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup the Korean characters during the initialize step.
def init_setuKoreanCharacterList(self, uKoreanString): encodeList = uKoreanString.encode('cp949') self.numCharacter = len(encodeList)/2 for i in range(self.numCharacter): char = encodeList[i*2] + encodeList[i*2+1] u_char = char.decode('cp949') self.uKoreanChar...
[ "def _init_unicode():\n global _unicode_properties\n global _unicode_key_pattern\n _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))\n _unicode_key_pattern = _build_unicode_key_pattern()", "def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup the font start and end index during the initialize step.
def init_setFontStartEndIdx(self, startIdx, endIdx): self.fontStartIdx = startIdx self.fontEndIdx = endIdx self.numCharacter = endIdx-startIdx+1
[ "def _init_font(self):\n for i, c in enumerate(C8_FONT):\n self.ram[i * 5: i * 5 + 5] = c", "def __init__(self):\n self.numCharacter = 0\n\n self.fontStartIdx = 0\n self.fontEndIdx = 0\n self.charIntensityValDic = {}\n self.orderedIdxBasedOnIntensity = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates DATA_DIRECTORY & OUTPUT_DIRECTORY if not exists
def create_directories(): if not os.path.exists(DATA_DIRECTORY): os.makedirs(DATA_DIRECTORY) if not os.path.exists(OUTPUT_DIRECTORY): os.makedirs(OUTPUT_DIRECTORY)
[ "def check_or_create_output_dir(self):\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)", "def create_data_directories(self):\r\n\r\n try:\r\n self.dir_variant_raw.mkdir(exist_ok=True, parents=True)\r\n self.dir_variant_effects.mkdir(exist_ok=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes repo issue data to DATA_DIRECTORY, one file per repository. Note that the data files saved are temporary and will be deleted at the end of the script.
def store_issue_data(): print "iterating over repos and saving closed issue data to data files..." repos = get_repos() for repo in repos: issue_data = get_issue_data(repo) with open(DATA_DIRECTORY + "/" + repo['owner'] + "_" + repo['name'], 'w') as outfile: json.dump(issue_data,...
[ "def write_issue_report():\n print \"preparing report...\"\n report = open(OUTPUT_DIRECTORY + \"/report-\" + time.strftime(\"%Y-%m-%dT%H:%M:%SZ\") + \".txt\", 'w')\n report_weeks = get_report_weeks()\n report_end_date = get_report_end_date()\n for week_number in range(0, report_weeks):\n week_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets issue data for the given repo
def get_issue_data(repo): headers = { 'Authorization': 'token ' + get_settings()["github_access_token"] } issues_url = GITHUB_BASE_URL + "repos/" + repo['owner'] + "/" + repo['name'] + "/issues?state=closed&per_page=100&since=" + get_report_start_date() json_data = [] while True: try: ...
[ "def collect_issues(self, args):\n\t\t# call a get_repo function\n\t\trepo_list = self.get_repo(args)\n\t\tprint(\"\\n\\tRepositories\\n\", repo_list)\n\t\ttry:\n\t\t\tfor repo_name in repo_list:\n\t\t\t\tprint(\"\\n\\t\" + repo_name + \" Repository\")\n\t\t\t\trepo = self.organization.get_repo(repo_name)\n\n\t\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads repository data from repos.json into JSON object
def get_repos(): try: with open("repos.json") as data_file: repos = json.load(data_file) return repos except: print "Error loading repos.json" sys.exit()
[ "def getGitData(username):\n\n # fetch access token for given username\n conn = create_connection('test.db')\n query = f\"SELECT token from Token WHERE g_username='{username}';\"\n result = execute_read_query(conn, query)\n token = (result[0])[0]\n \n # appropriate header for GitHub API '/usr' ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the link header in github request
def parse_link_header(link): links = {} linkHeaders = link.split(", ") for linkHeader in linkHeaders: (url, rel) = linkHeader.split("; ") url = url[1:-1] rel = rel[5:-1] links[rel] = url return links
[ "def parse_link_header(header):\n rval = {}\n # split on unbracketed/unquoted commas\n entries = re.findall(r'(?:<[^>]*?>|\"[^\"]*?\"|[^,])+', header)\n if not entries:\n return rval\n r_link_header = r'\\s*<([^>]*?)>\\s*(?:;\\s*(.*))?'\n for entry in entries:\n match = re.search(r_l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares github issue data into a single file report which is timestamped and saved to the OUTPUT_DIRECTORY
def write_issue_report(): print "preparing report..." report = open(OUTPUT_DIRECTORY + "/report-" + time.strftime("%Y-%m-%dT%H:%M:%SZ") + ".txt", 'w') report_weeks = get_report_weeks() report_end_date = get_report_end_date() for week_number in range(0, report_weeks): week_end_date = report_e...
[ "def main(git_log):\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n # Split commits by areas\n core = df[df['area']==Area.core.value]\n tests = df[df['a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes all files in the data directory
def cleanup_data_dir(): print "cleaning up data directory..." file_list = [ f for f in os.listdir(DATA_DIRECTORY) ] for f in file_list: os.remove(DATA_DIRECTORY + "/" + f)
[ "def purge_all_data() -> None:\n _confirm_intent('purge cardbuilder\\'s database and all downloaded data')\n with InDataDir():\n for file in glob.glob('*'):\n os.remove(file)", "def clear_data():\n directory_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a dictionary of settings from settings.txt
def get_settings(): settings = {} try: with open('settings.txt', 'r') as settings_file: for line in settings_file: kv = line.partition("=") settings[kv[0]] = kv[2].replace("\n", "") return settings except: print "settings.txt missing or not...
[ "def load_settings():\n settings = {}\n with open('settings.ini', 'r') as f:\n lines = f.readlines()\n for line in lines:\n if '//' not in line and line != '\\n': # ignoring comments and blank lines\n setting = line.split(\"=\")\n settings[setting[0]] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all the arrays
def get_arrays(): store = Store(array_config_path(), current_app.logger) array_dict = store.load_arrays() return [a.get_json() for a in array_dict.values()]
[ "def get_array(self): # real signature unknown; restored from __doc__\n pass", "def array(self):\n raise NotImplementedError", "def array_data(self):\r\n self.INC_array=[]\r\n self.local_IEN_array=[]\r\n self.KV_xi_array=[]\r\n self.KV_eta_array=[]\r\n \r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an array to the system. The array is specified in the body.
def add_array(json_body=None): error_data = validate_array_input(json_body) if error_data: return make_rest_response(error_data, 400) try: apitoken, array_id, array_name, purity_version = get_array_info(json_body[HOST], json_body[USERNAME], json_body[PASSWORD]) except Exception as e: ...
[ "def addElementArray(*args, **kwargs):\n \n pass", "def addArray(self, array):\n raise RuntimeError(\"Calling an abstract method\")", "def add_array(self, value):\n if isinstance(value, str):\n self._data += value.encode(\"utf-8\")\n elif isinstance(value, (bytes, bytea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update an array in the system. The properties specified in the body will be merged into the array
def update_array(array_id, json_body=None): # We don't allow changing the array id in put. if ArrayContext.ID in json_body: del json_body[ArrayContext.ID] store = Store(array_config_path(), current_app.logger) array_dict = store.load_arrays() if array_id not in array_dict: return m...
[ "def update(self, new_body_values):\n self.data = merge_dicts(self.data, new_body_values)\n return self", "def update(self, body):\n self.body = body", "def push_elastic_updates(object_class_name, id_list, recursion_depth):\n myclass = ObjectInfoAPI.get_class_object_from_name(object_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build a library of available styles
def _build_style_lib(template_dirs): template_files = _build_templates_list(template_dirs) style_lib = {} for style_file in template_files.values(): style_tpl = open(style_file) style_xml = style_tpl.read() style_tpl.close() style = _build_style(style_xml) if not styl...
[ "def generate_style(python_style, ui_style):\n\n return merge_styles([python_style, ui_style])", "def init_style() -> str:\n with open(os.path.join(os.getcwd(), \"gui\", \"style.css\")) as styles:\n return styles.read()", "def _get_stylesheets(settings, start=-1, end=-1):\n\tstylesheets = []\t\t# s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A reflection is a linear transformation which reflects a vector x with respect to a hyperplane through the origin represented by its normal vector v of unit length. Concretely, the reflection can be considered a linear transformation represented by a matrix P, i.e. `x' = Px`. Params
def reflection(x, u, apply=False): # grab dimension of column vector N = x.shape[0] # then normalize u v = u / l2_norm(u) if apply: # compute projection of x onto v proj = projection(x, v, norm=True) # and finally reflect refl = x - 2*proj return refl ...
[ "def point_reflector( mesh, **kwargs):\n\n # Setup the defaults\n model_config = dict(reflector_position=[(0.35, 0.42), (0.65, 0.42)], # as percentage of domain size\n reflector_radius=[0.05, 0.05],\n reflector_amplitude=[1.0, 1.0],\n backgr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The projection of b onto a is the orthogonal projection of b onto a straight line parallel to a. The projection is parallel to a, i.e. it is the product of a constant called the scalar projection with a unit
def projection(b, a, norm=False): if norm: proj = np.dot(np.dot(a, a.T), b) else: c = np.dot(a.T, b) / np.dot(a.T, a) proj = c * a return proj
[ "def proj(a,b):\n return np.dot(a,b) * b / (np.linalg.norm(b)**2)", "def proj(v,u):\r\n prefc = v.dot(u)/u.dot(u)\r\n return prefc*u", "def proj(A,B):\n return A - (A*B).sum()*B/(B**2).sum()", "def projection_matrix(B):\n # return np.eye(B.shape[0]) # <-- EDIT THIS to compute the projec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
L2 or "euclidean" norm.
def l2_norm(x): return np.sqrt(np.dot(x.T, x))
[ "def l2_norm(x):\n return np.linalg.norm(x)", "def L2Norm(ds):\n if len(ds) != 2:\n raise Exception(\"Expected 2 data sets, got %d.\" % (len(ds),))\n return numpy.sqrt(numpy.sum([x.dot(x) for x in ds[0]-ds[1]]))", "def L2_norm_dist(l1: Union[list, np.ndarray],\n l2: Union[list, n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if A is symmetric.
def is_symmetric(A): return np.allclose(A, A.T)
[ "def is_symmetric(self):\n matrix = Matrix(self.array)\n\n if len(self.array) == len(self.array[0]) and matrix == matrix.transposition():\n print(\"Matrix is symmetric\")\n return True\n else:\n print(\"Matrix is not symmetric\")\n return False", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grabs the diagonal elements of a square matrix A.
def diag(A): N = len(A) D = np.zeros([N, 1]) for i in range(N): D[i] = A[i, i] return D
[ "def diagonal_matrix(adjacency_matrix):\n return(np.diag(sum(adjacency_matrix)))", "def diagonal(a, *parms):\n return a.diagonal()", "def offdiag(A):\n mask = ((th.eye(A.shape[1])) == 0).type(th.float)\n offdiag = A * mask\n norm = th.sqrt(th.sum(offdiag ** 2))\n return norm", "def DiagonalM...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a square matrix whose diagonal elements are the elements of x.
def create_diag(x): N = x.shape[0] D = np.zeros([N, N]) for i in range(N): D[i, i] = x[i] return D
[ "def design_matrix(X):\n return np.c_[np.ones(X.shape[0]), X]", "def euclidean_distance_matrix(x):\n r = np.sum(x*x, 1)\n r = r.reshape(-1, 1)\n distance_mat = r - 2*np.dot(x, x.T) + r.T\n return distance_mat", "def _off_diagonal(x: torch.Tensor) -> torch.Tensor:\n n, m = x.shape\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills the diagonal elements of a square matrix A with 1's.
def unit_diag(A): m = len(A) for i in range(m): A[i, i] = 1 return A
[ "def fill_diagonal_(self, fill_value, wrap=False): # real signature unknown; restored from __doc__\n pass", "def fill_diagonal(a, val):\n if a.ndim < 2:\n raise ValueError(\"array must be at least 2-d\")\n if a.ndim == 2:\n # Explicit, fast formula for the common case. For 2-d arrays, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs consecutive dot products of the arrays in the list l from left to right. For example, given l = [A, B, C], returns `np.dot(np.dot(A, B), C)`.
def multi_dot(l): return reduce(np.dot, l)
[ "def _list_product(l):\n return reduce(lambda x, y: x*y, l, 1)", "def dot(L, K):\n if L == [] or K == []:\n return 0.0\n return L[0] * K[0] + dot(L[1:], K[1:])", "def vec_dot(a, b):\n return sum([va * vb for va, vb in zip(a, b)])", "def apply_consecutive_elements(l, fn):\n return [fn(i, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the k'th standard basis vector in R^n.
def basis_vec(k, n): error_msg = "[!] k cannot exceed {}.".format(n) assert (k < n), error_msg b = np.zeros([n, 1]) b[k] = 1 return b
[ "def get_unitvector(n,k):\r\n temp = np.zeros(n)\r\n temp[k] = 1\r\n return temp", "def basis(n: int, N: int): # Numpy does provide a method that does this but it's very slow\n vec = np.zeros([N, 1])\n vec[n] = 1.0\n return vec", "def victor_miller_basis(k, prec=10, cusp_only=False, var='q'):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an array of k'th standard basis vectors in R^n according to each k in ks.
def basis_arr(ks, n): error_msg = "[!] ks cannot exceed {}.".format(n) assert (np.max(ks) < n), error_msg b = np.zeros([n, n]) for i, k in enumerate(ks): b[i, k] = 1 return b
[ "def generatekey(k, n):\n mean = [0 for x in range(0, n)]\n cov = np.matrix(np.identity(n), copy=False)\n key = []\n for i in range(0, k):\n tmp = np.random.multivariate_normal(mean, cov)\n key.append(tmp)\n return key", "def x_vec(\n K): # wave numb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
log into Dribbble and fetch all of your likes using Selenium
def download_dribbble_likes(username, password, output_folder=None, bwd=None): # make a new webdriver by default should_clean_webdriver = False if bwd is None: bwd = BaseWebDriver() should_clean_webdriver = True # save to the current folder by default if output_folder is None: ...
[ "def log_in(self):\n print(\"Logging in.........\")\n self.driver.get('https://www.instagram.com/accounts/login/')\n time.sleep(1) # pause\n self.driver.find_element_by_xpath(\"//input[@name='username']\").send_keys(self.username)\n self.driver.find_element_by_xpath(\"//input[@nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the reading of Mar345 images
def test_read(self): for line in TESTIMAGES.split('\n'): vals = line.strip().split() name = vals[0] dim1, dim2 = [int(x) for x in vals[1:3]] mini, maxi, mean, stddev = [float(x) for x in vals[3:]] obj = mar345image() obj.read(UtilsTest.geti...
[ "def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = adscimage()\n obj.read(os.path.j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
insert multiple plant_history into the plant_history table
def insert_plant_history_record_list(plant_list): sql = """ INSERT INTO plant_history(plant_id, humidity, time_recorded) VALUES (%s,%s,%s)""" conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params...
[ "def insert_log_data(database, parsed_rows):\n session = sessionmaker(bind=database.engine)()\n\n for row in parsed_rows:\n insert_statement = insert(activity_log_table).values(\n activity_id=row.activity_id,\n activity_name=row.activity_name,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Window the data by angle (i.e., 'Measured Angle (deg)') as specifed by key.
def window(data_dict, key = 'Y', window_size = 5, interval = [0,270]): ida = iterable_data_array(data_dict, key) angle_ida = iterable_data_array(data_dict, 'Measured Angle (deg)') angle_centers = [window_size*i + window_size/2 - interval[0] for i in range(int((interval[1]-interval[0])/window_size))] windows = [(c...
[ "def average_over_same_angle(data_dict, key, centers_every = 10, tolerance = 2, ignore_first_n = 100, ignore_end_n = 0):\n\tida = iterable_data_array(data_dict, key)\n\tangle_ida = iterable_data_array(data_dict, 'Measured Angle (deg)')\n\t\n\tcenters = [i*centers_every for i in range(int(360/centers_every) + 1)]\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Center the data specified by key to ~zero. This operates by subtracting the mean(top_percentile(data), bottom_percentile(data)) from each data point. It is recommended you use symmetric top and bottom percentiles, (i.e., 90, 10 or 80, 20) though is not required.
def center_yaxis(data_dict, key = 'Y',top_percentile = 90, bottom_percentile = 'symmetric'): ida = iterable_data_array(data_dict, key) out = data_array_builder() if bottom_percentile == 'symmetric': bottom_percentile = 100 - top_percentile else: pass for row in ida: center = np.mean((np.percentile(row, top...
[ "def calculate_percentiles(self):\n self.percentile_low = np.percentile(self.data, 25)\n self.percentile_high = np.percentile(self.data, 75)", "def preprocess_test_data(test_data, mean_image):\n scale_pixels(test_data)\n #zero centre the data using the mean_image\n test_data -= mean_image",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit data to sine wave with specified periodicity.
def fit_sine(data_dict, anglekey = 'angle', key = 'Y', periodicity = 1, units = 'degrees'): if units != 'degrees' and units != 'radians': raise ValueError('units must be either degrees or radians. Not {}'.format(units)) ang_ida = iterable_data_array(data_dict, anglekey) ida = iterable_data_array(data_dict, key) ...
[ "def fit_sin(tt, yy):\n tt = np.array(tt)\n yy = np.array(yy)\n # assume uniform spacing\n ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0]))\n Fyy = abs(np.fft.fft(yy))\n # excluding the zero frequency\n guess_freq = abs(ff[np.argmax(Fyy[1:]) + 1])\n guess_amp = np.std(yy) * 2.**0.5\n guess_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invert the data specified by key.
def invert(data_dict, key): out_dict = data_dict.copy() ida = iterable_data_array(data_dict, key) out = data_array_builder() for i in ida: out.append(-1*i) out_dict.update({key:out.build()}) return out_dict
[ "def invert_model(self,data):\n if self._type == 'pca':\n return self._invert_pca(data)", "def is_inverted(key):\n ops = parse(key)\n return isinstance(ops[0], el.Invert)", "def dec(self, key: str) -> None:\n if key in self.data:\n if self.data[key]>1:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Average data specified by key at angles (key must be 'Measured Angle (deg)') specified by centers. This is typically used if you are dwelling at each angle from a specified set of angles for a long period of time in the measurement.
def average_over_same_angle(data_dict, key, centers_every = 10, tolerance = 2, ignore_first_n = 100, ignore_end_n = 0): ida = iterable_data_array(data_dict, key) angle_ida = iterable_data_array(data_dict, 'Measured Angle (deg)') centers = [i*centers_every for i in range(int(360/centers_every) + 1)] windows = [(ce...
[ "def average_by_angle(data):\n\t\n\t# Floors the angle data\n\tfloored_data = data.copy()\n\tlength = len(floored_data)\n\ti = 0\n\twhile i < length:\t\n\t\tfloored_data[i][0] = numpty.floor(floored_data[i][0])\n\t\ti += 1\t\n\t\n\t# Finds the average AR value for each value of floor\n\t\t\n\ttemp_array = numpty.ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invoke checkDocument service with provided text and optional key. If no key is provided, a default key is used. Returns list of Error objects.
def checkDocument(text, key=None): global _key if key is None: if _key is None: raise Exception('Please provide key as argument or set it using setDefaultKey() first') key = _key params = urllib.parse.urlencode({ 'key': key, 'data': text, ...
[ "def checkDocument(text, key=None):\r\n \r\n global _key\r\n if key is None:\r\n if _key is None:\r\n raise Exception('Please provide key as argument or set it using setDefaultKey() first')\r\n key = _key\r\n \r\n params = urllib.urlencode({\r\n 'key': key,\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invoke stats service with provided text and optional key. If no key is provided, a default key is used. Returns list of Metric objects.
def stats(data, key=None): global _key if key is None: if _key is None: raise Exception('Please provide key as argument or set it using setDefaultKey() first') key = _key params = urllib.parse.urlencode({ 'key': key, 'data': data, }) serv...
[ "def stats(data, key=None):\r\n \r\n global _key\r\n if key is None:\r\n if _key is None:\r\n raise Exception('Please provide key as argument or set it using setDefaultKey() first')\r\n key = _key\r\n\r\n params = urllib.urlencode({\r\n 'key': key,\r\n 'data': data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter a list leaving only Metric objects whose type matches 't'
def filterByType(metrics, t): return [m for m in metrics if m.type == t]
[ "def filter_by_type(list_, filter_type):\n filtered_list = []\n\n for item in list_:\n if type(item) is filter_type:\n filtered_list.append(item)\n\n return filtered_list", "def filter_type_amount(Type,amount,l):\n sol = []\n for item in l:\n item.type_of = item.type_of.str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter a list leaving only Metric objects whose key matches 'k'
def filterByKey(metrics, k): return [m for m in metrics if m.key == k]
[ "def filterByType(metrics, t):\r\n return [m for m in metrics if m.type == t]", "def _filter_observations(self, observations):\n filter_out = set(observations.keys()).difference(\n self._observations_allowlist\n )\n # Remove unwanted keys from the observation list.\n for filter_key in fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. Note that the message parameter can be either text or one of the SafeMIMExxx methods listed above. The "extra" argument is used for additional required message settings, with...
def send_mail(subject, message, from_email, recipient_list, cc_list=[], extra={}, fail_silently=False, auth_user=settings.EMAIL_HOST_USER, auth_password=settings.EMAIL_HOST_PASSWORD, tls=getattr(settings, 'EMAIL_TLS', False), encoding=settings.DEFAULT_CHARSET): return send_mass_mail([[subject, messa...
[ "def send_message(from_email, to_list, mime_multipart_mixed_message, settings=None):\n if not settings:\n raise Exception(\"no settings object provided\")\n\n mta = smtplib.SMTP(settings.email_host)\n mta.ehlo()\n mta.starttls()\n mta.login(settings.email_host_user, settings.email_host_passwor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends a message to each receipient in list. Given a datatuple of (subject, message, from_email, recipient_list), sends each message to each recipient list. Returns the number of emails sent. If from_email is None, the DEFAULT_FROM_EMAIL setting is used. If auth_user and auth_password are set, they're used to log in. No...
def send_mass_mail(datatuple, extra={}, fail_silently=False, auth_user=settings.EMAIL_HOST_USER, auth_password=settings.EMAIL_HOST_PASSWORD, tls=getattr(settings, 'EMAIL_TLS', False), encoding=settings.DEFAULT_CHARSET): try: SMTP = smtplib.SMTP if settings.EMAIL_DEBUG: SM...
[ "def send_mail(subject, message, from_email, recipient_list, cc_list=[], extra={}, fail_silently=False,\n auth_user=settings.EMAIL_HOST_USER, auth_password=settings.EMAIL_HOST_PASSWORD,\n tls=getattr(settings, 'EMAIL_TLS', False), encoding=settings.DEFAULT_CHARSET):\n return send_mass_mail([[subjec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save email data to tempfile
def save_email_data(emaildata): f = open(filename, 'w') # overwritten old data f.write(emaildata.encode()) f.close()
[ "def to_file(self):\n logging.info(\"About to persist %d mails of total %d bytes.\" % (len(self._mailq), self._mailq_bytes))\n \n for (acct_id, mail, bytes) in self._mailq:\n try:\n \n with tempfile.NamedTemporaryFile(\n prefix = \"%s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform forced photometry on random positions within a chip. The run method is borrowed directly from ProcessImageForcedTask. It pulls a SourceCatalog containing random points from fetchReferences(), attaches footprints from the exposure, then runs the measurement plugins on those footprints.
def run(self, dataRef): exposure = dataRef.get() refWcs = exposure.getWcs() refCat = self.fetchReferences(exposure) measCat = self.measurement.generateMeasCat(exposure, refCat, refWcs) self.log.info("Performing forced measurement on %s" % dataRef.dataId) print(self.mea...
[ "def run(self, dataRef, coord_file=None, dataset=None, out_root=None):\n lsst.log.debug('start run ,'+time.ctime())\n self.dataset = dataset\n if self.dataset == \"diff\":\n self.dataPrefix = \"deepDiff_\"\n elif self.dataset[:8] == \"deepDiff\":\n self.dataPrefix =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
!Write forced source table dataRef Data reference from butler; the forced_src dataset (with self.dataPrefix included) is all that will be modified. sources SourceCatalog to save
def writeOutput(self, dataRef, sources): dataRef.put(sources, self.dataPrefix + "forced_src")
[ "def writeOutput(self, dataRef, sources):\n dataRef.put(sources, self.dataPrefix + \"forcedRaDec_src\")", "def getSrcData(butler, dataId):\n srcCat = butler.get('deepCoadd_forced_src', dataId, immediate=True,\n flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS)\n calExp = butler.get(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes two binary sequences and an offset and returns the number of matching entries and the number of compared entries. d1 & d2 sequences offset offset of d2 relative to d1 sample_indices a list of indices to use for the comparison
def compare_sequences(d1, d2, offset, sample_indices=None): max_index = min(len(d1), len(d2)+offset) if sample_indices is None: sample_indices = range(0, max_index) correct = 0 total = 0 for i in sample_indices: if i >= max_index: break if d1[i] == d2[i-offset]: ...
[ "def align_sequences(d1, d2,\n num_samples=def_num_samples,\n max_offset=def_max_offset,\n correct_cutoff=def_correct_cutoff,\n seed=None,\n indices=None):\n max_overlap = max(len(d1), len(d2))\n if indices is None:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a set of random integers between 0 and (size1). The set contains no more than num_samples integers.
def random_sample(size, num_samples=def_num_samples, seed=None): random.seed(seed) if num_samples > size: indices = set(range(0, size)) else: if num_samples > size/2: num_samples = num_samples/2 indices = set([]) while len(indices) < num_samples: index...
[ "def RandomSmallSet(set_, size_):\n\tprint(\"Selecting a subset of \"+str(set_)+\" of size \"+str(size_))\n\ta=MSet(set_)\n\ta.Load()\n\tb=MSet(set_+\"_rand\")\n\tmols = random.sample(range(len(a.mols)), size_)\n\tfor i in mols:\n\t\tb.mols.append(a.mols[i])\n\tb.Save()\n\treturn b", "def sample_results(results, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes two sequences and finds the offset and which the two sequences best match. It returns the fraction correct, the number of entries compared, the offset. d1 & d2 sequences to compare num_samples the maximum number of entries to compare max_offset the maximum offset between the sequences that is checked correct_cuto...
def align_sequences(d1, d2, num_samples=def_num_samples, max_offset=def_max_offset, correct_cutoff=def_correct_cutoff, seed=None, indices=None): max_overlap = max(len(d1), len(d2)) if indices is None: ind...
[ "def compare_sequences(d1, d2, offset, sample_indices=None):\n max_index = min(len(d1), len(d2)+offset)\n if sample_indices is None:\n sample_indices = range(0, max_index)\n correct = 0\n total = 0\n for i in sample_indices:\n if i >= max_index:\n break\n if d1[i] == d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that postupdate hook was installed (or not, if installed=False)
def assert_postupdate_hooks(path, installed=True, flat=False): from glob import glob if flat: # there is no top level dataset datasets = glob(opj(path, '*')) else: ds = Dataset(path) datasets = [ds.path] + ds.subdatasets(result_xfm='paths', recursive=True, state='present') ...
[ "def test_post_installs(self):\n pass", "def _CheckForHookApproval(self):\n if self._ManifestUrlHasSecureScheme():\n return self._CheckForHookApprovalManifest()\n else:\n return self._CheckForHookApprovalHash()", "def enable_post_hooks(self) -> bool:\n return se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds GeoJSON with one Feature for each of the "files" Writes the resulting GeoJSON file to the "out_file_name"
def toc_example(layer_cnt,infiles,out_file_name): # "toc" is the dictionary that will be encoded to GeoJSON toc = {} toc["name"] = "NewFeatureType" toc["type"] = "FeatureCollection" toc["crs"] = {"type":"name", # "properties" : {"name":"urn:ogc:def:crs:OGC:1.3:CRS83"} ...
[ "def kml_multiple_to_geojson(infile_path, outdir_path, geojson_properties={}):\n data = __read_file(infile_path)\n coord_dict = __get_all_coords(data)\n if not os.path.exists(outdir_path):\n os.makedirs(outdir_path) \n for section_id, coords in list(coord_dict.items...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates a repository against a room
def validate_repo(room, path, settings): log = logging.getLogger(__name__) violations = [] log.info('Validating: ' + path + " with room: " + room.title) # Debug, show all required_files before filtering log.debug('Listing all required_files before filtering:') for rf in room.required_files: ...
[ "def test_repository_with_invalid_user_and_repo(self):\n self.instance.repository(None, None)\n\n assert self.session.get.called is False", "def test_repository_with_invalid_repo(self):\n self.instance.repository(\"user\", None)\n\n assert self.session.get.called is False", "def vali...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QDateTime.toPython` and `QDateTime.toPyDateTime`
def test_QDateTime_toPython_and_toPyDateTime(method): q_datetime = QtCore.QDateTime(NOW) py_datetime = getattr(q_datetime, method)() assert isinstance(py_datetime, datetime) assert py_datetime == NOW
[ "def test_QDate_toPython_and_toPyDate(method):\n q_date = QtCore.QDateTime(NOW).date()\n py_date = getattr(q_date, method)()\n assert isinstance(py_date, date)\n assert py_date == NOW.date()", "def test_QTime_toPython_and_toPyTime(method):\n q_time = QtCore.QDateTime(NOW).time()\n py_time = geta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QDate.toPython` and `QDate.toPyDate`
def test_QDate_toPython_and_toPyDate(method): q_date = QtCore.QDateTime(NOW).date() py_date = getattr(q_date, method)() assert isinstance(py_date, date) assert py_date == NOW.date()
[ "def test_QDateTime_toPython_and_toPyDateTime(method):\n q_datetime = QtCore.QDateTime(NOW)\n py_datetime = getattr(q_datetime, method)()\n assert isinstance(py_datetime, datetime)\n assert py_datetime == NOW", "def test_QTime_toPython_and_toPyTime(method):\n q_time = QtCore.QDateTime(NOW).time()\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QTime.toPython` and `QTime.toPyTime`
def test_QTime_toPython_and_toPyTime(method): q_time = QtCore.QDateTime(NOW).time() py_time = getattr(q_time, method)() assert isinstance(py_time, time) assert py_time == NOW.time()
[ "def test_QDateTime_toPython_and_toPyDateTime(method):\n q_datetime = QtCore.QDateTime(NOW)\n py_datetime = getattr(q_datetime, method)()\n assert isinstance(py_datetime, datetime)\n assert py_datetime == NOW", "def as_qtime(iso_time):\n return QTime.fromString(iso_time, Qt.ISODate)", "def test_Q...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QEventLoop.exec_` and `QEventLoop.exec`
def test_qeventloop_exec(qtbot): assert QtCore.QEventLoop.exec_ is not None assert QtCore.QEventLoop.exec is not None event_loop = QtCore.QEventLoop(None) QtCore.QTimer.singleShot(100, event_loop.quit) event_loop.exec_() QtCore.QTimer.singleShot(100, event_loop.quit) event_loop.exec()
[ "def test_qthread_exec():\n assert QtCore.QThread.exec_ is not None\n assert QtCore.QThread.exec is not None", "def test_qtextstreammanipulator_exec():\n assert QtCore.QTextStreamManipulator.exec_ is not None\n assert QtCore.QTextStreamManipulator.exec is not None", "def test_basic_run(daq, sig):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QThread.exec_` and `QThread.exec_`
def test_qthread_exec(): assert QtCore.QThread.exec_ is not None assert QtCore.QThread.exec is not None
[ "def test_qeventloop_exec(qtbot):\n assert QtCore.QEventLoop.exec_ is not None\n assert QtCore.QEventLoop.exec is not None\n event_loop = QtCore.QEventLoop(None)\n QtCore.QTimer.singleShot(100, event_loop.quit)\n event_loop.exec_()\n QtCore.QTimer.singleShot(100, event_loop.quit)\n event_loop.e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QLibraryInfo.location` and `QLibraryInfo.path`
def test_QLibraryInfo_location_and_path(): assert QtCore.QLibraryInfo.location is not None assert ( QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.PrefixPath) is not None ) assert QtCore.QLibraryInfo.path is not None assert QtCore.QLibraryInfo.path(QtCore.QLibraryInfo.PrefixPath) i...
[ "def test_QLibraryInfo_LibraryLocation_and_LibraryPath():\n assert QtCore.QLibraryInfo.LibraryLocation is not None\n assert QtCore.QLibraryInfo.LibraryPath is not None", "def _is_system_installed( self ):\n return self._system.test_library(self._library, self._headers)", "def test_get_operating_sys...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QLibraryInfo.LibraryLocation` and `QLibraryInfo.LibraryPath`
def test_QLibraryInfo_LibraryLocation_and_LibraryPath(): assert QtCore.QLibraryInfo.LibraryLocation is not None assert QtCore.QLibraryInfo.LibraryPath is not None
[ "def test_QLibraryInfo_location_and_path():\n assert QtCore.QLibraryInfo.location is not None\n assert (\n QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.PrefixPath)\n is not None\n )\n assert QtCore.QLibraryInfo.path is not None\n assert QtCore.QLibraryInfo.path(QtCore.QLibraryInfo.P...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test `QTextStreamManipulator.exec_` and `QTextStreamManipulator.exec`
def test_qtextstreammanipulator_exec(): assert QtCore.QTextStreamManipulator.exec_ is not None assert QtCore.QTextStreamManipulator.exec is not None
[ "def test_qthread_exec():\n assert QtCore.QThread.exec_ is not None\n assert QtCore.QThread.exec is not None", "def test_non_translated_messages_qt(qilinguist_action):\n build_worktree = TestBuildWorkTree()\n _project = build_worktree.add_test_project(\"translateme/qt\")\n qilinguist_action(\"updat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test scoped and unscoped enum access for qtpy.QtCore..
def test_enum_access(): assert ( QtCore.QAbstractAnimation.Stopped == QtCore.QAbstractAnimation.State.Stopped ) assert QtCore.QEvent.ActionAdded == QtCore.QEvent.Type.ActionAdded assert QtCore.Qt.AlignLeft == QtCore.Qt.AlignmentFlag.AlignLeft assert QtCore.Qt.Key_Return == QtCore.Qt....
[ "def test_enum_access():\n assert (\n QtWidgets.QFileDialog.AcceptOpen\n == QtWidgets.QFileDialog.AcceptMode.AcceptOpen\n )\n assert (\n QtWidgets.QMessageBox.InvalidRole\n == QtWidgets.QMessageBox.ButtonRole.InvalidRole\n )\n assert QtWidgets.QStyle.State_None == QtWidget...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test included elements (mightBeRichText) from module QtGui.
def test_qtgui_namespace_mightBeRichText(): assert QtCore.Qt.mightBeRichText is not None
[ "def has_richtext_widget(self):\n return self.has_field([self.rich_text_heading, strip_tags(self.rich_text)])", "def test_texts(self):\n self.assertEqual(self.dlg.texts(), ['WPF Sample Application'])", "def test_remove_disabled_parts_include(self):\n text = 'text <nowiki>tag</nowiki> te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test existence of `QFlags` typedef `ItemFlags` that was removed from PyQt6
def test_itemflags_typedef(): assert QtCore.Qt.ItemFlags is not None assert QtCore.Qt.ItemFlags() == QtCore.Qt.ItemFlag(0)
[ "def flags(self, index):\n if index.column() == 0:\n # The First Column is just a label and unchangable\n flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable\n elif not index.isValid():\n flags = Qt.ItemFlag(0)\n else:\n childPref = self.index2Pref(index)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a TranslatableModelForm for given model. Returned form class will enforce given language.
def translatable_modelform_factory(language, model, form=TranslatableModelForm, *args, **kwargs): if not issubclass(model, TranslatableModel): raise TypeError('The model class given to translatable_modelform_factory ' 'must be a subclass of hvad.forms.TranslatableModel. ' ...
[ "def _create_model_form(self):\n global Model\n Model = self.model\n class _ModelForm(ModelForm):\n class Meta:\n model = Model\n \n return _ModelForm", "def translatable_modelformset_factory(language, model, form=TranslatableModelForm,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a TranslatableModelFormSet for given model. Returned formset class will enforce given language.
def translatable_modelformset_factory(language, model, form=TranslatableModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, e...
[ "def translatable_modelform_factory(language, model, form=TranslatableModelForm, *args, **kwargs):\n if not issubclass(model, TranslatableModel):\n raise TypeError('The model class given to translatable_modelform_factory '\n 'must be a subclass of hvad.forms.TranslatableModel. '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an inline ModelFormSet for given translatable model. Returned formset class will enforce given language.
def translatable_inlineformset_factory(language, parent_model, model, form=TranslatableModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=...
[ "def translatable_modelformset_factory(language, model, form=TranslatableModelForm,\n formfield_callback=None, formset=BaseModelFormSet,\n extra=1, can_delete=False, can_order=False,\n max_num=None, fields...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines how to order the given translation queryset. Intended for overriding in user forms. Default behavior is to order lexicographically by language_code.
def order_translations(self, qs): return qs.order_by('language_code')
[ "def apply_ordering(self, query):\n ordering = request.args.get('ordering') or ''\n if ordering:\n order_list = []\n for keyword in ordering.split(','):\n desc, column = keyword.startswith('-'), keyword.lstrip('-')\n if column in self.model._meta.fie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crossvalidate instance with each of its translations in turn. Also check at least one translation would remain after saving the form.
def clean(self): super().clean() # Trigger combined instance validation master = self.instance stashed = get_cached_translation(master) for form in self.forms: set_cached_translation(master, form.instance) exclusions = form._get_validation_exclusions() ...
[ "def clean(self):\n if any(self.errors):\n return\n\n languages = []\n proficiencies = []\n language_duplicates = False\n\n for form in self.forms:\n if form.cleaned_data:\n print(form.cleaned_data)\n if form.cleaned_data['langua...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save translation for given translation form. Do it by loading it onto the master object and saving the master object so custom save() behavior is properly triggered.
def _save_translation(self, form, commit=True): obj = form.save(commit=False) assert isinstance(obj, BaseTranslationModel) if commit: # We need to trigger custom save actions on the combined model stashed = set_cached_translation(self.instance, obj) self.inst...
[ "def _save_translation(resource, target_language, user, content):\r\n fb = FormatsBackend(resource, target_language, user)\r\n return fb.import_translation(content)", "def save_data(self, translation, remarks, xmlid, langid, pagenum, audiofile, status=1):\n data = translation.upper()\n Transla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate an adaptive lasso prior using y_ols
def build_lasso_prior(nodes, y_ols, E, DT): y_sym_ols = unvectorize_matrix(DT @ E @ vectorize_matrix(y_ols), (nodes, nodes)) adaptive_lasso = np.divide(1.0, np.power(np.abs(make_real_vector(E @ vectorize_matrix(y_sym_ols))), 1.0)) prior = SparseSmoothPrior(smoothness_param=0.00001, n=len(E @ vectorize_matr...
[ "def build_lasso(alp,X_train,y_train):\n # Training model\n lasso_reg = Lasso(alpha=alp)\n lasso_reg.fit(X_train, y_train)\n\n calculate_rmse(X_train,y_train,lasso_reg)\n \n # Return the model\n return lasso_reg", "def lasso_regression(self, X, y):\n \n self.reg = Lasso().fit(X,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate nodal spatial positions based on CLI specs INPUTS xyz (xmin, xmax, ymin, ymax, zmin, zmax) tuple numElem (xEle, yEle, zEle) int tuple OUTPUT pos list of lists containing x, y, and z positions
def calc_node_pos(xyz, numElem): import numpy as n import warnings as w import sys if xyz.__len__() != 6: sys.exit("ERROR: Wrong number of position range limits input.") pos = [] for i, j in enumerate(range(0, 5, 2)): minpos = xyz[j] maxpos = xyz[j + 1] if maxpo...
[ "def xyz_to_coords_and_element_numbers(xyz: dict) -> Tuple[list, list]:\n coords = xyz_to_coords_list(xyz)\n z_list = [qcel.periodictable.to_Z(symbol) for symbol in xyz['symbols']]\n return coords, z_list", "def position(self):\n\t\t\n\t\tx_all,y_all,z_all = list(),list(),list()\n\t\tfor ant in self.ante...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write node file using calculated position data INPUTS pos list of lists of x, y, z positions nodefile nodes.dyn header_comment what version / syntax of calling command OUTPUTS nodes.dyn written (or specified filename)
def writeNodes(pos, nodefile, header_comment): nodesTotal = pos[0].__len__() * pos[1].__len__() * pos[2].__len__() NODEFILE = open(nodefile, 'w') NODEFILE.write("%s\n" % (header_comment)) NODEFILE.write("*NODE\n") NodeID = 0 for z in pos[2]: for y in pos[1]: for x in pos[0]...
[ "def write(s,filename,header=\"Opacity file written by optool.particle.write\"):\n\n if (s.np>1):\n raise TypeError('Writing is not supported for multi-particle objects')\n try:\n wfile = open(filename, 'w')\n except:\n raise RuntimeError('Cannot write to file: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write element file using calculated position data INPUTS pos list of lists of x, y, z positions elefile elems.dyn header_comment what version / syntax of calling command OUTPUTS elems.dyn written (or specified filename)
def writeElems(numElem, partid, elefile, header_comment): # calculate total number of expected elements elemTotal = numElem[0] * numElem[1] * numElem[2] ELEMFILE = open(elefile, 'w') ELEMFILE.write("%s\n" % (header_comment)) ELEMFILE.write('*ELEMENT_SOLID\n') # defining the elements with outwa...
[ "def _write_type_position_elements(type_element, position_element, filename,\n atomtypes=None):\n type_element.text = \"\\n\"\n position_element.text = \"\\n\"\n coords = open(filename,'r').readlines()\n for line in coords:\n line = line.rstrip()\n atomtype_index, xyz = line.split()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check to make sure that nodes exist at (x, y) = (0, 0) so that the focus / peak of an ARF excitation is captured by the mesh
def check_x0_y0(pos): import warnings as w if not 0.0 in pos[0] and not 0.0 in pos[1]: w.warn("Your mesh does not contain nodes at (x, y) = (0, 0)! This " "could lead to poor representation of your ARF focus.")
[ "def test_faces_refrence_valid_nodes(st: SpaceTime):\n for f in st.faces:\n for n in f:\n assert n in st.nodes", "def outside_arena():\r\n return not (0 < node.x < bounds[0] and 0 < node.y < bounds[1])", "def _check_that_node_from_body(node):\n n_ports = len(node.out_edges())\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return alphabetanew and alpha from normal distribution as specified by sd. Default is beta = 0.03 'alphabeta' is the alphabeta ratio If a negative value is returned it is resampled until positive
def alphacalc_normal(alphabeta, sd): beta = 0.03 # fixed beta in function ## get alpha beta to use from normal distribution if sd == 0: alphabetanew = alphabeta else: alphabetanew=np.random.normal(loc = alphabeta, scale = sd) ## make sure a positive value is returned ...
[ "def alphacalc_lognormal(alphabeta, sd_perc,set_beta=None):\n if set_beta==None:\n beta = 0.03 # fixed beta in function\n else:\n beta = set_beta\n #print('beta was set to:',beta)\n \n ## convert sd from percentage to absolute\n sd = alphabeta*sd_perc/100\n \n alphabeta_log...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return alphabetanew and alpha from normal distribution as specified by sd. Default is beta = 0.03 'alphabeta' is the alphabeta ratio mean sd supplied as percentage
def alphacalc_lognormal(alphabeta, sd_perc,set_beta=None): if set_beta==None: beta = 0.03 # fixed beta in function else: beta = set_beta #print('beta was set to:',beta) ## convert sd from percentage to absolute sd = alphabeta*sd_perc/100 alphabeta_lognormal = np.log...
[ "def alphacalc_normal(alphabeta, sd):\n \n beta = 0.03 # fixed beta in function\n \n ## get alpha beta to use from normal distribution\n if sd == 0:\n alphabetanew = alphabeta\n else:\n alphabetanew=np.random.normal(loc = alphabeta, scale = sd)\n \n ## make sure a positive valu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dose_actual from normal distribution around dose (Gy) as specified by sd (%) and shift (%). Default is dose = 2Gy, shift = 0%, and sd of 0% If a negative value is returned it is resampled until positive (use lognormal?) The standard deviation is of the nominal dose
def fracdose(dose, shift, sd): ## get actual dose to use from normal distribution based on shift dose_shift = dose + (dose*shift/100) ## if sd is zero, then no change to dose if sd == 0: dose_actual = dose_shift return dose_actual dose_actual=np.random.normal(loc ...
[ "def get_sd(self):\n variable = self.get_y_variable_name()\n y_values = self.line_data[variable]\n sd = np.std(y_values)\n\n return sd", "def get_sd(df,**kwargs):\n logger.debug(\"Get Standard Deviation...\")\n return df.std()", "def sd(x, na_rm=False):\n # =================...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the SF with input values. Note this is for a single dose delivery. The product of multiple fractions shoudld be taken to give overall SF
def SFcalc(alpha, beta, dose): SF = np.exp(-(alpha*dose) - (beta*(dose**2))) return SF
[ "def fullness_factor(self):\n#Calories must be min 30\n CAL = max(30, self.kcal)\n#PR proteins max 30\n PR = min(30, self.protein)\n#DF fiber 12 max\n fiber = 0 if self.fiber is None else self.fiber\n DF = min(12, fiber)\n#TF total fat 50 max\n TF = min(50, self.lipid)\n FF...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the TCP with input values. Based on cumulative SF and N0
def TCPcalc(sf, n0): TCP = np.exp(-n0*sf) return TCP
[ "def _build_cip_forward_open(self):\n cip_path_size = 0x02\n cip_class_type = 0x20\n\n cip_class = 0x06\n cip_instance_type = 0x24\n\n cip_instance = 0x01\n cip_priority = 0x0A\n cip_timeout_ticks = 0x0e\n cip_ot_connection_id = 0x20000002\n cip_to_conn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function to create a list of values of length 2n+1, or set spacing. n is the number of values either side of the mean to return The values are centred around the mean, m and have a range extending from +/ perc of m. values returned will not exceed the m+/perc specified
def range_list(m, perc=None, dif=None,n=None, spacing=None): ## ensure required parameters are passed if perc==None and dif==None: raise Exception('Need to specify a range with perc or dif') if n==None and spacing==None: raise Exception('Need to specify number or spacing of output')...
[ "def calculate_mean_on_range(start, end, list) -> float64:\n return float(sum(list[start:end]) / (end - start))", "def get_means(gap =.1, k=5):\n\n means = []\n mu = .9\n for _ in range(k):\n means.append(mu)\n mu = mu-gap\n return means", "def proportion_range(n):\n return [i/n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns (index,value) of closest match in list of values Useful for getting values for a specified dose. i.e. look up the index of the closest dose to that supplied
def closest_val(mylist,match): return min(enumerate(mylist), key=lambda x:abs(x[1]-match))
[ "def nearest (list, value):\n list = remove_out_of_domain(list)\n array = np.asarray(list)\n\n # find index of nearest list to value\n i = (np.abs(array-value)).argmin()\n return array[i]", "def find_closest(arr, val):\n diff = abs(arr-val)\n ind = int(diff.argmin())\n closest_val = float(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is used for getting a set of data which is normally distributed but is truncated between the range [lim_low,lim_high]. This is useful if I want to use a normal distribution but limit its values. This wrapper function is simpler to use for my purpose than the scipy function directly.
def norm_trunc(lim_low,lim_high,mean,std,size): results = sp.stats.truncnorm.rvs((lim_low-mean)/std, (lim_high-mean)/std, loc=mean, scale=std, size=size) return res...
[ "def get_truncated_normal(self,mean=0, sd=1, low=0, upp=10):\n return(truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd))", "def value_from_trunc_norm(loc, scale, trim_low, trim_high):\n # Determine the trim limits in the altered Gaussian space\n a = (trim_low - loc) / scale\n b =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fucntion to fit the NTCP model to supplied data and return the parameters. At somepoint in the process, if parameter values are not supplied this function will need calling to detemien them. i.e. if data is supplied, then fit the values, if not then use supplied vals. Funciton should only return fitted params, not do a...
def ntcp_data_fit(dose_data,ntcp_data,initial_params,ntcp_params): #plt.close() # close any open plots ## some example data to fit to and plot dose_data = dose_data#[55,60, 62, 67, 72, 65] ntcp_data = ntcp_data#[0.1,0.15,0.1,0.2,0.3, 0.19] ## specify some initial starting values initia...
[ "def fit(\n self,\n Xs: List[List[TParamValueList]],\n Ys: List[List[float]],\n Yvars: List[List[float]],\n parameter_values: List[TParamValueList],\n outcome_names: List[str],\n ) -> None:\n pass", "def fit_params(data_file, country, N, plot_fit=False):\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the TCP/NTCP curves. Select n random curves to plot. Can also plot the population with pop_plot=True (default)
def plot_TCP_NTCP(resultsTCP=None, resultsNTCP=None, TCP=True, NTCP=True, n=100, colors={'TCP':'green','NTCP':'red'},dark_color=True, pop_plot=True, xlabel='Nominal Dose (Gy)', ylabel='TCP / NTCP', alpha=0.03, plot_points=True, plot_percentiles=(5,...
[ "def plotting(eigenval, eigenvec, population_tsv):\n val = pd.read_csv(eigenval, header= None)\n vec = pd.read_csv(eigenvec, delimiter=' ',header=None, names= ['Sample name','pop','x','y'])\n codes= pd.read_csv(population_tsv,sep='\\t')[['Sample name', 'Sex', 'Superpopulation code']]\n eigen_pop= vec.jo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the lambda that sends the notification email to the user once the dashboard is deployed, it contains the URL to the landing page sagemaker notebook.
def make_dashboard_ready_email_inline_code(self): inline_code_str = dedent(""" import os import re import json import boto3 import textwrap def lambda_handler(event, context): landing_page_url = "https://" + re.sub(r"^(https*://)", "", event["landing...
[ "def index(integration):\n request = app.current_request\n validate_signature(request)\n\n try:\n event = request.headers[\"X-GitHub-Event\"]\n except KeyError:\n raise BadRequestError()\n\n sns_topics = SNS.list_topics()[\"Topics\"]\n topic_arns = {t[\"TopicArn\"].rsplit(\":\")[-1]:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loads the pedigree details for a prband
def load_ped(ped_path, proband_id): families = load_families(ped_path) families = [ f for f in families for x in f.children if x.get_id() == proband_id ] family = families[0] to_line = lambda x: '{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(x.family_id, x.get_id(), x.dad_id, x.mom_id, x.get_ge...
[ "def load_family_tree():\n log_info('. loading familyTree data')\n project_dir = get_project_directory()\n family_tree = load_familyTree_dictionary(project_dir)\n for_all_nodes(family_tree, update_node_name)\n for_all_nodes(family_tree, update_node_gender)\n for_all_nodes(family_tree, update_node_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes atan(x) with a truncated series expansion of n terms. Runs in the background allowing the machin function to execute.
def atan_series(x, n): xpower = x my_sum = x sign = 1 for i in range(1, n): xpower = xpower * x * x sign = -1 * sign term = sign * xpower / (2 * i + 1) my_sum = my_sum + term #print("Pi is: ", my_sum) return my_sum
[ "def atan(x):\n\n return math.degrees(math.atan(x))", "def _do_atan_taylor(data):\n dtype = data.dtype\n\n tensor_offset = tvm.const(TAN_PI_BY_EIGHT, dtype)\n deno = topi.multiply(data, tvm.const(TAN_PI_BY_EIGHT, dtype))\n deno = topi.add(deno, dc.one_const(dtype))\n molecule = topi.subtract(dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes pi using Machin's formula. Utilises atan_series.
def machin(n): pi = 4 * (4 * atan_series(0.2,n) - atan_series(1.0/239,n)) return pi
[ "def pi():\n getcontext().prec += 2 # extra digits for intermediate steps\n three = Decimal(3) # substitute \"three=3.0\" for regular floats\n lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24\n while s != lasts:\n lasts = s\n n, na = n+na, na+8\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
n is positive integer return the calculated cycle length for n
def cycle_length (n) : assert n > 0 if cache[n] != None : return cache[n] m = n count = 1 while m > 1 : if m < 600000 : if cache[m] != None : cache[n] = cache[m] + count - 1 return cache[n] if (m % 2) == 0 : m = (m // 2)...
[ "def reciprocal_cycles(n):\n res = [0]*(n+1)\n max_length = 0\n d = 0\n for i in range(1, n + 1):\n cycle_length = find_cycle_length(i)\n if cycle_length > max_length:\n max_length = cycle_length\n d = i\n res[i] = d\n return res", "def cycle_length(number...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a table in a dataset with the specified schema, and creates it if not found. Returns
def _get_table(self, dataset_name, table_name, schema): dataset_ref = client.dataset(dataset_name) try: client.get_dataset(dataset_ref) except NotFound: logger.info('Dataset {} not found. Creating.'.format(dataset_name)) client.create_dataset(bigquery.Dataset(...
[ "async def ensure_table(schema: dict):\n table_name = schema.get('TableName')\n if not table_name:\n return\n\n exists = await table_exists(table_name)\n if exists:\n return\n\n async with DynamoClientManager() as dynamodb:\n await dynamodb.create_table(**schema)\n waiter ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse list of filters.
def parse_filters(cls, l_filters): new_filters = [] for fil in l_filters: (fil_id, fil_params), = fil.items() if fil_params != None: new_filters.append(cls.f_create(fil_id, **fil_params)) else: new_filters.append(cls.f_create(fil_id)) ...
[ "def alarms_cmd_parse_filters(filters_args):\n filters = []\n for item in [item for sublist in filters_args for item in sublist if sublist and item]:\n if len(item.split('=',1))==2:\n filters.append(( item.split('=',1)[0], item.split('=',1)[1] ))\n else:\n raise ValueError(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse list of filter tuples.
def parse_tuples(cls, tuples): tup_filters = [] filters = None for tup_f in tuples: parameters = dict() for (key_id, val) in tup_f.items(): for (param_id, param_val) in val.items(): if param_id == "Filters" or param_id == "Readers": ...
[ "def parse_filter(filter_str):\n filter_lst = []\n op_lst = []\n for filter_elem in filter_str.split('|'):\n for item in filter_elem.split('&'):\n key, _, value = item.partition(':')\n filter_lst.append({key.strip():value.strip().split(',')})\n fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to get the content at `url` by making an HTTP GET request. If the contenttype of response is some kind of HTML/XML, return the text content, otherwise return None.
def simple_get(url): try: with closing(get(url, stream=True)) as resp: if is_good_response(resp): return resp.content else: return None except RequestException as e: log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None
[ "def simple_get(url):\r\n\ttry:\r\n\t\twith closing(get(url, stream=True)) as resp:\r\n\t\t\tif is_good_response(resp):\r\n\t\t\t\treturn resp.content\r\n\t\t\telse:\r\n\t\t\t\treturn None\r\n\texcept RequestException as e:\r\n\t\tlog_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n\t\treturn Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
It is always a good idea to log errors. This function just prints them, but you can make it do anything.
def log_error(e): print(e)
[ "def log_error(e):\r\n\tprint(e)", "def error(what,say):\n print 'ERROR: ', what, say", "def print_api_error(error):\n sys.stderr.write('\\nERROR: %s\\n' % error)", "def error_print():\n print(\"ERROR: Invalid Entry!\")", "def print_error(self):\n print('\\n'.join(self.error_buffer))", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a string of the compressed file path, the data is decompressed back to its original form, and written into the output file path if provided. If no output file path is provided, the decompressed data is returned as a string
def decompress(self, input_file_path, output_file_path=None): data = bitarray(endian='big') output_buffer = [] # read the input file try: with open(input_file_path, 'rb') as input_file: data.fromfile(input_file) except IOError: print('Coul...
[ "def decompress(self, compressed_path, reconstructed_path, original_file_info=None):\n shutil.copyfile(compressed_path, reconstructed_path)", "def compress_string(uncompressed_string):\n bytes_buffer = BytesIO()\n with GzipFile(mode='wb', fileobj=bytes_buffer) as f:\n f.write(uncompressed_stri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the longest match to a substring starting at the current_position in the lookahead buffer from the history window
def findLongestMatch(self, data, current_position): end_of_buffer = min(current_position + self.lookahead_buffer_size, len(data) + 1) best_match_distance = -1 best_match_length = -1 for j in range(current_position + 1, end_of_buffer): start_index = max(0, current_position ...
[ "def longest(string, matches):\n try :return max([m for m in matches if fnmatch(string, m)], key=len)\n except: return None", "def _find_max(self, phrase, string):\n max_index = 0\n regex = self._make_re_from_phrase(phrase)\n matches = regex.finditer(string)\n for match in matche...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to add migration for task definition dictionary to the ParseTasks class The function should only take the dict of task definitions as an argument
def register_migration(base_version, target_version): def migration_decorator(func): """ Return decorated ParseTasks object with _migrations dict attribute Here all registered migrations are inserted """ @wraps(func) def migration(*args): """Decorator fo...
[ "def migration_decorator(func):\n\n @wraps(func)\n def migration(*args):\n \"\"\"Decorator for migration function\"\"\"\n return func(*args)\n\n if not hasattr(ParseTasks, '_migrations'):\n ParseTasks._migrations = {} # pylint: disable=protected-access\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return decorated ParseTasks object with _migrations dict attribute Here all registered migrations are inserted
def migration_decorator(func): @wraps(func) def migration(*args): """Decorator for migration function""" return func(*args) if not hasattr(ParseTasks, '_migrations'): ParseTasks._migrations = {} # pylint: disable=protected-access if not base_version...
[ "def register_migration(base_version, target_version):\n\n def migration_decorator(func):\n \"\"\"\n Return decorated ParseTasks object with _migrations dict attribute\n Here all registered migrations are inserted\n \"\"\"\n\n @wraps(func)\n def migration(*args):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator for migration function
def migration(*args): return func(*args)
[ "def migration_decorator(func):\n\n @wraps(func)\n def migration(*args):\n \"\"\"Decorator for migration function\"\"\"\n return func(*args)\n\n if not hasattr(ParseTasks, '_migrations'):\n ParseTasks._migrations = {} # pylint: disable=protected-access\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to add parse type for task definition dictionary.
def register_parsing_function(parse_type_name, all_attribs_keys=False): def parse_type_decorator(func): """ Return decorated ParseTasks object with _parse_functions dict attribute Here all registered migrations are inserted """ @wraps(func) def parse_type(*args, **k...
[ "def conversion_function(func):\n\n @wraps(func)\n def convert_func(*args, **kwargs):\n \"\"\"Decorator for parse_type function\"\"\"\n return func(*args, **kwargs)\n\n if not hasattr(ParseTasks, '_conversion_functions'):\n ParseTasks._conversion_functions = {} # pylint: disable=prote...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator for parse_type function
def parse_type(*args, **kwargs): return func(*args, **kwargs)
[ "def conversion_function(func):\n\n @wraps(func)\n def convert_func(*args, **kwargs):\n \"\"\"Decorator for parse_type function\"\"\"\n return func(*args, **kwargs)\n\n if not hasattr(ParseTasks, '_conversion_functions'):\n ParseTasks._conversion_functions = {} # pylint: disable=prote...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Marks a function as a conversion function, which can be called after performing a parsing task. The function can be specified via the _conversions control key in the task definitions.
def conversion_function(func): @wraps(func) def convert_func(*args, **kwargs): """Decorator for parse_type function""" return func(*args, **kwargs) if not hasattr(ParseTasks, '_conversion_functions'): ParseTasks._conversion_functions = {} # pylint: disable=protected-access Pa...
[ "def set_conv_funct(self, function):\n self.convertion_function = function", "def register_converter(self, converter, func):\n self.converter_mapping[converter] = func", "def _convert(value, conversion_function):\n if conversion_function is not None:\n value = conversion_function(value)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert specified forms from form name strings to objects.
def get_forms(data): forms = {} for datum in data: # Should streamline setUps. Currently in both tuple and dict. try: file = datum[0]['file'] except KeyError: file = datum['inputs']['file'] if file not in forms: ...
[ "def formFactory(form_name):\n try:\n return globals()[form_name]\n except KeyError:\n return None", "def gen_forms(options, logger, args):\n\n if len(args) != 1 or not os.path.isfile(args[0]):\n print \"Please specify one correct file for parse\"\n\n models = parse(options, logge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests for media fields.
def test_media_fields_in_prompts(): def asserts(item_dict): """Iterate through asserts.""" for key, val in item_dict.items(): for media_type in OdkPromptTest.media_types: if key.startswith(media_type) and val: ...
[ "def test_post_media(self):\n pass", "def test_get_embedded_media_validate_rest_of_fields(self):\n\n self.app.config[\"MULTIPART_FORM_FIELDS_AS_JSON\"] = True\n resource_with_media = {\n \"image_file\": {\"type\": \"media\"},\n \"some_text\": {\"type\": \"string\"},\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }