query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Add a temporary directory for modules to sys.path.
def modules_tmpdir(tmpdir, monkeypatch): tmp = tmpdir.mkdir('tmp_modules') monkeypatch.syspath_prepend(str(tmp)) return tmp
[ "def setup_sys_path():\n par_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n lnk = os.path.join(par_dir, 'reverse-debugger')\n if not os.path.isdir(lnk): # possibly a symlink\n raise AssertionError(\n \"these tests require ../reverse-debugger to point to \"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a module for a fake extension.
def test_module(modules_tmpdir, test_app): fake_extension = modules_tmpdir.join('fake_extension.py') fake_extension.write('\n'.join(( 'from henson import Extension', 'class FakeExtension(Extension):', ' def register_cli(self): pass', )))
[ "def _create_module(file_path, file_content):\n # create dummy module import os\n with open(file_path, 'w') as mod_file:\n mod_file.write(file_content)\n\n spec = util.spec_from_file_location(\n os.path.basename(file_path[:-3]), file_path)\n module_obj = util.module...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that HensonCLIDirective.prepare_autoprogram doesn't change prog.
def test_hensonclidirective_doesnt_change_prog(test_directive): test_directive.options['prog'] = 'testing' test_directive.prepare_autoprogram() assert test_directive.options['prog'] == 'testing'
[ "def test_hensonclidirective_sets_prog(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.options['prog'] == 'henson --app APP_PATH'", "def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.arguments == ('he...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that HensonCLIDirective.prepare_autoprogram sets the parser.
def test_hensonclidirective_sets_parser(test_directive): test_directive.prepare_autoprogram() assert test_directive.arguments == ('henson.cli:parser',)
[ "def test_hensonclidirective_sets_prog(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.options['prog'] == 'henson --app APP_PATH'", "def test_hensonclidirective_doesnt_change_prog(test_directive):\n test_directive.options['prog'] = 'testing'\n test_directive.prepare_aut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that HensonCLIDirective.prepare_autoprogram sets prog.
def test_hensonclidirective_sets_prog(test_directive): test_directive.prepare_autoprogram() assert test_directive.options['prog'] == 'henson --app APP_PATH'
[ "def test_hensonclidirective_doesnt_change_prog(test_directive):\n test_directive.options['prog'] = 'testing'\n test_directive.prepare_autoprogram()\n assert test_directive.options['prog'] == 'testing'", "def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that HensonCLIDirective.register_cli doesn't fail.
def test_hensonclidirective_register_cli(test_directive): # This will only test that it runs without raising an exception. test_directive.register_cli()
[ "def test_cli_fix():\n assert Cli is BaseCl", "def test_cli_invalid_option(self):\n returncode, output = run_cli(main, \"-x\", merged=True)\n assert returncode != 0\n assert \"Error:\" in output", "def test_cli_usage(self):\n for options in [], [\"-h\"], [\"--help\"]:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that _import_extension returns the extension.
def test_import_extension(test_module): import_path = 'fake_extension:FakeExtension' extension = sphinx._import_extension(import_path) assert issubclass(extension, Extension)
[ "def verify_extension_and_import():\n\n file_path = read_input_file_path()\n file_name, file_extension = os.path.splitext(file_path)\n file_extension = file_extension.replace(\"'))\", \"\")\n print(\"File Extension: \", file_extension)\n if file_extension == \".xes\":\n log = import_xes(file_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that setup registers the directive.
def test_setup(): class SphinxApplication: def add_directive(self, directive, cls): self.directive = directive self.cls = cls app = SphinxApplication() sphinx.setup(app) assert app.directive == 'hensoncli' assert app.cls is sphinx.HensonCLIDirective
[ "def setUp(self):\r\n\r\n self.DUT = Component()", "def setUp(self):\r\n\r\n self.DUT = Allocation()", "def setUp(self):\n self.isa = ISA()", "def setUp(self):\n SelTestBase.setUp(self)\n self.addDevice()", "def setUp(self):\n bed_patient = PatientsGenerator(0, 1, 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
deserialize callback data into Action, CommandType, data
def deserialize_data(data): deserialized = json.loads(data) action = Action(deserialized[CallbackData.ACTION.value]) command = CommandType(deserialized[CallbackData.COMMAND.value]) data = deserialized[CallbackData.DATA.value] return { CallbackData.ACTION: action, CallbackData.COMMAND...
[ "def deserialize(transition_params):", "def unwrap_callback_data(encoded_data=\"\"):\n delimiter_position = encoded_data.find(\":\")\n\n hash = None\n data = None\n\n if delimiter_position > -1:\n hash = encoded_data[:delimiter_position]\n data = encoded_data[deli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if borders intersect
def border_intersection(border, avg_border): # to do: adjustable parameter? return intersected(border[0], border[1], avg_border[0], avg_border[1], 0.6)
[ "def is_inter(inter: Point, border0: Point, border1: Point):\n inter_in_AB = point_in_seg(inter, Point(x=xa, y=ya), Point(x=xb, y=yb))\n if not inter_in_AB:\n return False\n inter_in_border = point_in_seg(inter, border0, border1)\n if not inter_in_border:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the fraction of the shortest interval that is covered by the intersection of the two
def intersection(begin1, end1, begin2, end2): lower = (end1 <= end2) and (end1 > begin2) bigger = (end1 > end2) and (end2 > begin1) if lower: intersection = end1 - max([begin1, begin2]) smallest = min((end1 - begin1, end2 - begin2)) ans = (intersection / smallest) elif bigger: ...
[ "def calculate_intersection(p1, p2, q1, q2):\n num_s = float((p1[0] - q1[0]) * (q1[1] - q2[1]) - (p1[1] - q1[1]) * (q1[0] - q2[0]))\n den_s = float((p1[0] - p2[0]) * (q1[1] - q2[1]) - (p1[1] - p2[1]) * (q1[0] - q2[0]))\n if (den_s == 0):\n # Intersection undefined\n return (float(\"nan\"), fl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Correct borders based on averaged borders
def border2average_correction(borders, averaged_borders): if len(borders) == 0: return averaged_borders #if len(averaged_borders) == 0: # return borders # Use the fractional overlap between borders and averaged_borders to construct # a mapping matrix between the two (a possible fix for ...
[ "def _borders(self):\r\n nx, ny = self.ncols-1, self.nrows-1\r\n options = self.options\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n cell.reformat(**self._cellborders(ix,iy,nx,ny,options))", "def testDetectBorder(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collapse features from the same component based on peaks similarities
def collapse_mzrtgroup(mzrtgroup, code): label2idx = defaultdict(list) for idx, feature in enumerate(mzrtgroup): label2idx[feature.similarity_group].append(idx) unique_labels = list(set(label for label in label2idx)) # to do: not the best way :) # find most intense peaks in each feature id...
[ "def feature_collapsing(features):\n new_features = []\n group_number = 0\n mzrtgroup = []\n for feature in features:\n if feature.mzrtgroup == group_number:\n mzrtgroup.append(feature)\n else:\n # assert feature.mzrtgroup == group_number + 1 # to do: there are a cas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collapse features from the same component based on peaks similarities with the use of 'collapse_mzrtgroup'
def feature_collapsing(features): new_features = [] group_number = 0 mzrtgroup = [] for feature in features: if feature.mzrtgroup == group_number: mzrtgroup.append(feature) else: # assert feature.mzrtgroup == group_number + 1 # to do: there are a case, when borde...
[ "def collapse_mzrtgroup(mzrtgroup, code):\n label2idx = defaultdict(list)\n for idx, feature in enumerate(mzrtgroup):\n label2idx[feature.similarity_group].append(idx)\n unique_labels = list(set(label for label in label2idx)) # to do: not the best way :)\n\n # find most intense peaks in each fea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Point the dish at a particular azimuth and elevation. Blocks until the dish has stopped moving.
def point(self, az,el): print('Moving to position ' + str(az)+' , '+str(el)+'.') while True: self.client.set_azimuth_position(az) self.client.set_elevation_position(el) time.sleep(1) status = self.client.status if status.get("ShutdownError") =...
[ "def move_arm(self):\n target_dx = np.random.uniform(-self._hp.x_range, self._hp.x_range) - self._previous_target_qpos[0]\n target_dy = np.random.uniform(0.12, self.high_bound[2]) - self._previous_target_qpos[1]\n self.step(np.array([target_dx, target_dy, -1]))", "def move(self):\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test creating user when same user already exist
def test_create_user_already_exists(self): # user data payload = { 'email': 'test@test.com', 'password': 'password12345', 'name': 'Test User' } # create user self.create_user(**payload) # try to create duplicate user by # sendin...
[ "def test_create_duplicated_user(self):\n c = Client()\n c_request = {\n 'email': 'test2@yahoo.com',\n 'username': 'test2@yahoo.com',\n 'password': '2faf3sf3',\n 'first_name': 'firstN',\n 'last_name': 'lastN',\n }\n response = c.post...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that token is not created if user does not exist
def test_create_token_user_does_not_exist(self): payload = {'email': "test@test.com", 'password': "wrongpassword"} # send auth request res = self.client.post(self.TOKEN_URL, payload) # verify response code self.assertEqual(res.s...
[ "def test_create_token_not_existed_user(self):\n payload = {\n \"email\": \"test@gmail.com\",\n \"password\": \"Test1234\"\n }\n\n # we are trying to generate token for not created user\n response = self.client.post(TOKEN_URL, payload)\n self.assertNotIn(\"to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Task 3 Check if X is divisible by Y (do it in one line of code), print 'X is divisible by Y' or 'X is not divisible by Y'. (1p)
def task3(x: typing.Optional[str] = None, y: typing.Optional[str] = None): try: print(f'x = {x} is divisible by y = {y}' if int(x) % int(y) == 0 else f'x = {x} is not divisible y = {y}') except ValueError: print(f'Invalid input arguments: ({x}, {y})')
[ "def is_divisible(x,y):\n if x % y ==0:\n return true\n else:\n return false", "def get_is_divisible(num1, num2):\n if num1 % num2 == 0:\n return \"Yes\"\n return \"No\"", "def divisible(number):\n\n if number % 2 == 0:\n\n return \"it is divisible by 2\"\n\n elif n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert kitti locations, dimensions and angles to corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): # 'length' in kitti format is in x axis. # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) # center in kitti format is [0.5, 1.0, 0.5] in xyz. corners = corners_nd(dims, origin=origin) # corners: [N, 4, 2] ...
[ "def box_label_to_corners(chosen_obj):\n # get 3D bounding box corners\n w = chosen_obj.box.length # for x\n h = chosen_obj.box.width # for y\n height = chosen_obj.box.height\n z = chosen_obj.box.center_z\n\n # corners in non-rotated\n obj_corners = np.array([[-w/2,-h/2],[w/2,-h/2],[-w/2,h/2],[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function removes the background from a given image, resulting in a background solely containing the horse head. The background is removed by using a convex hull. The image will be saved in the cropped directory if a photonumber is specified.
def remove_background(img, landmarks, photonumber = None): #check the number of landmarks to define the head pose and the given contour landmarks if len(landmarks) == 44: start = landmarks[0:10] end = landmarks[24:44] elif len(landmarks) == 45: start = landmarks[0:6] e...
[ "def remove_image_background(image):\n image2 = np.copy(image)\n kernel = np.ones((1, 5), np.uint8)\n lines1 = np.copy(image)\n lines1 = cv2.dilate(lines1, kernel, iterations=17)\n lines1 = cv2.erode(lines1, kernel, iterations=17)\n\n kernel = np.ones((5, 1), np.uint8)\n lines2 = np.copy(image)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a sample config file, to pipe into a file
def print_config_file(): print(CONFIG_FILE_CONTENT, end="")
[ "def print_config_file(output_dir, args):\n with open(os.path.join(output_dir, 'config.cfg'), 'w') as f:\n for k, v in vars(args).items():\n f.write(f'{k}={v}\\n')\n f.write(f'device={get_device()}')", "def cc_print_cmd(yaml_file):\n data = yaml.safe_load(yaml_file)\n with open(c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the category of the practice
def category(self) -> PracticeCategory: return self._arpeggio.category
[ "def category (self):\n return self.__category", "def get_category(product):\n category = product.category\n return category", "def get_category(s):\n\n list_of_cat = ['food', 'clothings', 'gas', 'groceries', 'medical']\n possible_mapping = {'food':{'mcdonalds','orange'}, 'clothings':{'shirts...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Before hook for setting default response properties. This hook simply sets the the response body, status, and headers to the `_default_status`, `_default_body`, and `_default_headers` attributes that are assumed to be defined on the resource object.
def set_resp_defaults(req, resp, resource, params): if resource._default_status is not None: resp.status = resource._default_status if resource._default_body is not None: resp.text = resource._default_body if resource._default_headers is not None: resp.set_headers(resource._defaul...
[ "def mocked_head_requests(self, override_status_code=None):\n resp_mock = create_autospec(requests.Response)\n if override_status_code is not None:\n resp_mock.status_code = override_status_code\n else:\n resp_mock.status_code = self.expected_enum\n self.doc_r_sessi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render an incoming mapping using context provided in context using Jinja2. Returns a dict containing rendered mapping.
def render_values(mapping=None, context=None, allow_undefined=False): if not context or not mapping: return mapping # Add in special __context variable that provides an easy way to get access to entire context. # This mean __context is a reserve key word although backwards compat is preserved by m...
[ "def jinja_render(context, template):\n jinja_environment = Environment(undefined=StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n extensions=['jinja2.ext.loopcontrols',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function which very simplisticly detect if the provided value contains or is a Jinja expression.
def is_jinja_expression(value): if not value or not isinstance(value, six.string_types): return False for marker in JINJA_EXPRESSIONS_START_MARKERS: if marker in value: return True return False
[ "def _needs_expansion(value):\n return Config.RE_HAS_VAR_REF.match(value) is not None", "def testElif(self):\n template = \"\"\"\n {{ if [var] == 1 }}a\n {{ elif [var] == 2 }}b\n {{ elif [var] == 3 }}c\n {{ elif [var] == 4 }}d\n {{ endif }}\"\"\"\n self.assertEqual(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
trains SVR with different gamma and C param and chose the combinantion that results in lowest error on cross validation set
def tune(features, Cexp=1000.0, gammaexp=0.001): performance = {} #vals = [0.1, 0.3, 1.0, 3.0, 10.0] vals = [0.1, 1.0, 10.0] X = XAlltr Xcv = XAllcv for prog_i, feature in enumerate(features): for C in vals: C = C*Cexp for gamma in vals: gamma = ga...
[ "def fast_opt_svr_hyperparams(x, y, cs, epsilons, gammas, validation_method, parameter):\r\n \r\n if validation_method != 'cv' and validation_method != 'midknn':\r\n# print('\\'{0}\\' is unknown. Please check \\'validation_method\\'.'.format(validation_method))\r\n# return 0, 0, 0\r\n sys...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the assertions in the check_every_quote method to all quotes.
def test_all(self): for q in self.quotes: self.check_every_quote(q)
[ "def test_normalize_quotes_1(self):\n text = 'This is a test that shoudln\\'t change anything.'\n clean_text = normalize_quotes(text, default_quote='\"', quotes=None)\n self.assertEquals(clean_text, text)", "def test_quote_picker(self):\n\n self.REQUESTPARSER.quote_picker()\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots variables in pairs for pair plots.
def pair_plot(data, label, var_columns, figsize=None): plt.figure(figsize=figsize) tmp = data tmp['label'] = label sns.pairplot(tmp, vars=var_columns, hue='label') plt.show()
[ "def plot_variable_pairs(df):\n plt.figure(figsize=(10,10))\n sns.pairplot(df, kind=\"reg\", plot_kws={\"line_kws\":{\"color\":\"purple\"}, \"scatter_kws\":{\"alpha\": 0.5}})", "def plot_all_two_params(self):\n display_names = list(self._params['optimization_definitions'].keys())\n for display...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots a precisionrecall curve.
def precision_recall_plot(prediction_prob, y, figsize=(15, 7)): precision, recall, thresholds = precision_recall_curve(y, prediction_prob) average_precision = average_precision_score(y, prediction_prob) plt.figure(figsize=figsize) step_kwargs = ({'step': 'post'} if 'step' in sig...
[ "def show_precision_recall_curve(classifier, x_test: pd.DataFrame, y_test: pd.DataFrame):\n \n # Get prediction probability estimate\n if hasattr(classifier,\"predict_proba\"):\n y_score = pd.DataFrame(classifier.predict_proba(x_test))[1]\n elif hasattr(classifier,\"decision_function\"):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place
def calc_mean_score(movies): return round(sum(movie.score for movie in movies) /len(movies),1)
[ "def calc_mean_score(movies):\r\n list_of_scores = []\r\n for m in movies:\r\n list_of_scores.append(m.score)\r\n return round(mean(list_of_scores), 1)", "def calc_mean_score(movies):\n ratings = [m.score for m in movies] # accessing the score values in list m\n mean = sum(ratings) / max(1,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate through the directors dict (returned by get_movies_by_director), return a list of tuples (director, average_score) ordered by highest score in descending order. Only take directors into account with >= MIN_MOVIES
def get_average_scores(directors): result = [] for director,movies in directors.items(): if len(movies) >= MIN_MOVIES: mean_score = calc_mean_score(movies) result.append((director,mean_score)) return sorted(result,key=lambda x: x[1],reverse=True)
[ "def get_average_scores(directors):\r\n return sorted(\r\n [\r\n (director, calc_mean_score(movies))\r\n for director, movies in directors.items()\r\n if len(movies) >= MIN_MOVIES\r\n ],\r\n key=lambda x: x[1],\r\n reverse=True,\r\n )", "def get_m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs the mcastshow command
def show(self, use_json=False, **kwargs): show = self.mcast_show+" -j" if use_json else self.mcast_show show_response = self.send_and_receive(show, self.onos_cli_running) if use_json: return json.loads(show_response[0]) else: return show_response[0]
[ "def tvshow(self, irc, msg, args, options, query):\n # prefer manually passed options, then saved user options\n # this merges the two possible dictionaries, prefering manually passed\n # options if they already exist\n user_options = self.db.get(msg.prefix) or dict()\n options = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the header of any show command for any particular s, g
def _get_show_header(self, source, group): header = self.mcast_show_header.replace("<source>", source) header = header.replace("<group>", group) return header
[ "def show_headers():\n return (\n 'Name',\n 'Port',\n 'Scheme',\n 'Certificate file',\n 'Key file',\n 'Indication call',\n 'Indication file',\n 'Log file',\n 'PID',\n 'Start PID',\n 'Creat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate color wheel according Middlebury color code
def make_color_wheel(): RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros([ncols, 3]) col = 0 # RY colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY)) col += RY # YG...
[ "def make_color_wheel():\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n\n colorwheel = np.zeros([ncols, 3])\n\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
word segmentation using jieba
def word_segmentation(input): new_input = ','.join(jieba.cut(input)) return new_input
[ "def segment():\n\n\t# read input images from 'in' directory\n\timg = \"data/in.jpg\"\n\n\t# read image, prepare it by resizing it to fixed height and converting it to grayscale\n\timg = prepareImg(cv2.imread(img), 50)\n\t\n\t# execute segmentation with given parameters\n\t# -kernelSize: size of filter kernel (odd ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs inference on the image in the given `req`uest. Returns the predicted class as a string and its confidence level. If the confidence level is below a given threshold (defined in the constructor), the predicted class is set to 'bg'. The confidence level is a number between 0 and 1 and is the one that corresponds to t...
def predict(self, req: dict) -> (str, float): # The base-64 string is converted into an image object but this object # cannot be passed to Keras directly. The object is first dumped into a # temp file and then the filename is passed to Keras. try: image_obj = base64_to_image_...
[ "def http_classify(self, req):\n\n if len(req.files) != 0:\n img = np.fromstring(req.files['file'].read(), np.uint8)\n else:\n img = np.fromstring(req.data, np.uint8)\n\n img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (self.Helpers.confs[\"dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator for registering a custom ``__subclasscheck__`` method for ``cls``
def register_subclasscheck(cls): def _fn(fn): _subclasscheck_registry[cls] = fn return fn return _fn
[ "def runtime_checkable(cls):\n if not issubclass(cls, Generic) or not cls._is_protocol:\n raise TypeError('@runtime_checkable can be only applied to protocol classes,'\n ' got %r' % cls)\n cls._is_runtime_protocol = True\n return cls", "def __subclasshook__(cls, klass):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Multiplies two binary polynomials.
def BinMult(x: BinPoly, y: BinPoly) -> BinPoly: res = 0 while x: if x & 1: res ^= y x >>= 1 y <<= 1 return res
[ "def multiply(self, poly1, poly2):\n i = self.poly_to_power[str(poly1)]\n j = self.poly_to_power[str(poly2)]\n return self.power_to_poly[(i+j) % (self.field_characteristic - 1)]", "def mulPoly(poly1,poly2):\n if isinstance(poly1,Zero) or isinstance(poly2,Zero):\n return mkZero()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sequences with leading zeros are degenerate cases. Especially the sequence 0, 0, 0, ..., 1 leads to a feedback polynomial of maximal degree.
def testLeadingZeros(self): for seq in range(1, 130): for zeroes in (1, seq.bit_length(), 2 * seq.bit_length()): self.CompareImplementations(seq, seq.bit_length() + zeroes)
[ "def testLeadingAndTrailingZeros(self):\n for seq in range(1, 130):\n for zeroes in (seq.bit_length(), 2 * seq.bit_length()):\n self.CompareImplementations(seq << zeroes,\n seq.bit_length() + 2 * zeroes)", "def testTrailingZeros(self):\n for seq in range(1, 130...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests sequences with trailing zeroes.
def testTrailingZeros(self): for seq in range(1, 130): for zeroes in (seq.bit_length(), 2 * seq.bit_length(), 3 * seq.bit_length()): self.CompareImplementations(seq << zeroes, seq.bit_length() + zeroes)
[ "def testLeadingAndTrailingZeros(self):\n for seq in range(1, 130):\n for zeroes in (seq.bit_length(), 2 * seq.bit_length()):\n self.CompareImplementations(seq << zeroes,\n seq.bit_length() + 2 * zeroes)", "def test_is_trim_trailing_zeros(self):\n self.asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests sequences with leading and trailing zeroes.
def testLeadingAndTrailingZeros(self): for seq in range(1, 130): for zeroes in (seq.bit_length(), 2 * seq.bit_length()): self.CompareImplementations(seq << zeroes, seq.bit_length() + 2 * zeroes)
[ "def testLeadingZeros(self):\n for seq in range(1, 130):\n for zeroes in (1, seq.bit_length(), 2 * seq.bit_length()):\n self.CompareImplementations(seq, seq.bit_length() + zeroes)", "def testTrailingZeros(self):\n for seq in range(1, 130):\n for zeroes in (seq.bit_length(), 2 * seq.bit_leng...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle uncheck all button to mark all check boxes as unchecked.
def uncheck_all(self, sender, args): self._set_states(state=False)
[ "def uncheck_all(self):\n self._checked = [False] * len(self._aovs)", "def deselect_all(self):\n for c in self.check_boxes:\n if c.isChecked() is True:\n c.click()", "def clear_all_checkboxes(self, check_box_listception):\n for check_box_list in check_box_listcepti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark selected checkboxes as checked.
def check_selected(self, sender, args): self._set_states(state=True, selected=True)
[ "def set_checked(self, value):\n self.checkbox.setChecked(value)", "def select_all(self):\n for c in self.check_boxes:\n if c.isChecked() is False:\n c.click()", "def setChkBox(self, checked, *locator):\n element = self.driver.find_element(*locator)\n if (el...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mark selected checkboxes as unchecked.
def uncheck_selected(self, sender, args): self._set_states(state=False, selected=True)
[ "def deselect_all(self):\n for c in self.check_boxes:\n if c.isChecked() is True:\n c.click()", "def uncheck_all(self):\n self._checked = [False] * len(self._aovs)", "def uncheck_all(self, sender, args):\n self._set_states(state=False)", "def unselect_checkbox(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a facsimile of a nystagmus signal In this helper, all amplitude are measured as the standard deviation of the signal.
def generate_signal(n_times=5000, s_freq=1000, nystagmus_type="pendular", nystagmus_freq=4, curv=0, saccad_freq=.5, dt_sigm=0.1, std_noise=.3, nystagmus_amp=MEAN_AMPLITUDES['nystagmus'], saccad_amp=MEAN_AMPLITUDES['saccad'], low_freq_amp=ME...
[ "def artificial():\n Ns = 5 # Number of sines.\n Amin = 1 # Minimum/Maximum amplitude for the sines.\n Amax = 2\n fs = 1000 # Sampling frequency.\n Tmes = 5 # Measurement time.\n Ttot = 100 # Total time.\n\n Nmes = int(fs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulate a dataset of n_trials oculo signal with facsimile nystagmus.
def load_data(n_trials, n_times, std_noise=.3, display=False, random_state=None): rng = check_random_state(random_state) # Sampling frequency s_freq = 1000 # Mean saccades frequency saccad_freq = .5 # Duration of the saccads in second dt_sigm = 0.1 trends = np.zeros((n_t...
[ "def simulate(self, n_time_steps):\n # simulate all patients\n for patient in self._patients:\n # simulate\n patient.simulate(n_time_steps)\n # record survival time\n value = patient.get_survival_time()\n if not (value is None):\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move to the next entry in the playlist and return the entry
def next(self) -> (PlaylistEntry, None): if isinstance(self._current, PlaylistEntry): self.push_back_complete(self._current) self._current = None if len(self._queue): self._current = self._queue.popleft() elif len(self._complete_queue) > 0 and self._playlist.should_loop(): self.reload_complete() s...
[ "def up_next():\n return jsonify({\"Next\": playlist.pop() })", "def next(self):\n\t\tself.item = next(self.list)", "def getNextItem(self):\n if len(self.items) > 0:\n return self.items.pop(0)\n return None", "def next_song(self):\n # Get current song tuple from listbox\n next_s = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that class directory contains all images only. Only supports JPG and PNG images.
def verify_class(self, directory): valid_image_ext = ['.jpg', '.jpeg', '.png'] files = os.listdir(os.path.join(self.datapath, directory)) if len(files) > 0: for file in files: _, ext = os.path.splitext(file) if ext not in valid_image_ext: ...
[ "def _validate_images(node):\n images = ix.api.OfObjectArray()\n node.get_attribute(\"images_and_layers\").get_values(images)\n out_paths = PathList()\n if not images.get_count():\n ix.log_error(\"No render images. Please reference one or more image items\")\n\n for image in images:\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all ideas of the user ordered by date
def get_queryset(self): return Idea.objects.filter(owner=self.request.user).order_by('-update_date')
[ "def list_all_ideas(request, orderby='latest'):\n \n # Choose the correct ordering type\n if orderby not in IDEA_ORDER_BY:\n model_orderby = IDEA_ORDER_BY[DEFAULT_ORDER_BY]\n else:\n model_orderby = IDEA_ORDER_BY[orderby]\n \n if type(model_orderby) is tuple:\n call_string...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reshapes and interpolates policy functions for housing adjusters on endogenous wealth grid
def interp_adj(a_adj,c_adj, wealth_endgrid, extrap = True): a_adj_uniform = np.zeros((grid_size_Q*grid_size_M, grid_size_A)) H_adj_uniform = np.zeros((grid_size_Q*grid_size_M, grid_size_A)) c_adj_uniform = np.zeros((grid_size_Q*grid_size_M, grid_size_A)) a_adj_bar ...
[ "def get_endogenous_wealth_grid(\n current_period_policy: np.ndarray, exog_savings_grid: np.ndarray\n) -> np.ndarray:\n endog_wealth_grid = exog_savings_grid + current_period_policy\n\n return endog_wealth_grid", "def interp_no_adj(assets_endgrid_1,cons_1,etas_1): \n\n assets_endgrid ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reshapes and interps the policy functions for housing nonadjusters on endogenous a assett grid
def interp_no_adj(assets_endgrid_1,cons_1,etas_1): assets_endgrid = assets_endgrid_1.reshape(grid_size_A,\ grid_size_H\ *grid_size_Q*grid_size_M) assets_endgrid = np.transp...
[ "def update_average_policies_sampling(self):\n infos0 = []\n infos1 = []\n self._info_sets_inputs0 = []\n self._info_sets_targets0 = []\n self._info_sets_inputs1 = []\n self._info_sets_targets1 = []\n self._average_policy_tables = [{} for _ in range(self._num_players...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns unconstrained next period mortgage m_t+1 as function of a_t+1, h_t and q_t
def eval_mort_policy(t, t_prime_funcs): m_prime_func = np.empty(grid_size_A*grid_size_H\ *grid_size_Q) # loop over values of A_t+1, H_t, Q_t for i in range(len(X_RC_contgp)): # pull out state values for i x_prime,h,q ...
[ "def demand(p, a=200, b=10, d=10, t=np.linspace(1,10,10)):\n\n return 1.0 / b * ( a - p * ( d + t ) / d )", "def momentum_resolution(p) :\n return 0.005", "def final_amt(p, r, n, t):\r\n a = p*(1+r/n)**(n*t)\r\n return a", "def schedule(t, k=20, lam=0.005, limit=10000):\n #return (k * np.exp(-l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function generates a graph of the frequencies of the elements in the input array, arr This function utilizes matplotlib and will produce errors if the package is not installed To install matplotlib, use 'pip install matplotlib'
def graph_frequency_histogram(arr, bar_color='green', title='Graph of Frequencies'): plt.style.use('ggplot') dictionary = bf.frequency(arr) keys = dictionary.keys() values = [dictionary[i] for i in keys] x_pos = [i for i in range(len(keys))] plt.bar(x_pos, values, color=bar_color) plt.titl...
[ "def freq(self, freq_arr):\r\n if freq_arr is not None:\r\n self._freq = np.array(freq_arr)\r\n\r\n if self._freq.size is not len(self.tipper):\r\n self._logger.info('length of freq list/array not correct' + \\\r\n ' (%ii instead of %ii)' % (self._fre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns a scatter plot of the given data Note that input array must be an array of array with each subarray
def graph_scatter(arr, color='green', title='Scatter Plot of Given Points', x_label='X', y_label='Y'): plt.style.use('ggplot') x, y = [], [] for point in arr: x.append(point[0]) y.append(point[1]) fig = plt.figure() axes = fig.add_axes([0,0,1,1]) axes.scatter(x, y, color=color)...
[ "def show_scatterplot(self, array, name=None, *args, **kwargs):\n\n figure = super().show_scatterplot(array, name, show=False, *args, **kwargs)\n figure_image = figure_to_image(figure)\n\n return figure_image", "def display_2D_scatter_plot(dataset, title, xlabel, ylabel, labels = None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check DUT memory usage and process cpu usage are within threshold.
def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds): duthost = duthosts[enum_rand_one_per_hwsku_hostname] # Wait until all critical services is fully started pytest_assert(wait_until(360, 20, 0, duthost.critical_services_fully_started), "All critical ser...
[ "def cpu_check():\n cpu_usage = psutil.cpu_percent(interval=5, percpu=False)\n if cpu_usage > 80:\n subject = \"Error - CPU usage is over 80%\"\n message = email.generate_error_report(subject)\n emails.send(message)", "def _get_cpuunits_usage(self):\n try:\n out, err =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check DUT memory usage and process cpu usage are within threshold. Disable all counterpoll types except tested one Collect memory and CPUs usage for 60 secs Compare the memory usage with the memory threshold Compare the average cpu usage with the cpu threshold for the specified progress Restore counterpolls status
def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds, restore_counter_poll, counterpoll_type, # noqa F811 counterpoll_cpu_threshold, disable_pfcwd): duthost = duthosts[enum_rand_one_per_hwsk...
[ "def cpu_check():\n cpu_usage = psutil.cpu_percent(interval=5, percpu=False)\n if cpu_usage > 80:\n subject = \"Error - CPU usage is over 80%\"\n message = email.generate_error_report(subject)\n emails.send(message)", "def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method it to extract the valid cpu usage data according to the poll_interval 1. Find the index for the max one for every poll interval, 2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage 1) 3. If the index is closed in the neighbour interval, only keep the former one 4. Re...
def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval): valid_cpu_usage_center_index_list = [] poll_number = len(program_to_check_cpu_usage) // poll_interval def find_max_cpu_usage(cpu_usage_list, poll_times): max_cpu_usage = cpu_usage_list[0] max_cpu_usage_index = 0 ...
[ "def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname,\n setup_thresholds, restore_counter_poll, counterpoll_type, # noqa F811\n counterpoll_cpu_threshold, disable_pfcwd):\n duthost = duthosts[enum_rand_one_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a CreateValue object from a json dictionary.
def _from_dict(cls, _dict): args = {} if 'value' in _dict: args['value'] = _dict['value'] else: raise ValueError( 'Required property \'value\' not present in CreateValue JSON') if 'metadata' in _dict: args['metadata'] = _dict['m...
[ "def from_json(cls, json_str: str) -> SampleCreateRequest:\n return cls.from_dict(json.loads(json_str))", "def from_json(self, value):\n raise NotImplementedError", "def from_dict(cls, _dict: Dict) -> 'TemplateValues':\n args = {}\n if 'values_metadata' in _dict:\n args['v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a DialogNodeOutput object from a json dictionary.
def _from_dict(cls, _dict): args = {} xtra = _dict.copy() if 'generic' in _dict: args['generic'] = [ DialogNodeOutputGeneric._from_dict(x) for x in (_dict.get('generic')) ] del xtra['generic'] if 'modifiers' in ...
[ "def _from_dict(cls, _dict):\r\n args = {}\r\n if 'response_type' in _dict:\r\n args['response_type'] = _dict.get('response_type')\r\n else:\r\n raise ValueError(\r\n 'Required property \\'response_type\\' not present in DialogNodeOutputGeneric JSON'\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a DialogNodeOutputGeneric object from a json dictionary.
def _from_dict(cls, _dict): args = {} if 'response_type' in _dict: args['response_type'] = _dict.get('response_type') else: raise ValueError( 'Required property \'response_type\' not present in DialogNodeOutputGeneric JSON' ) if...
[ "def _from_dict(cls, _dict):\r\n args = {}\r\n xtra = _dict.copy()\r\n if 'generic' in _dict:\r\n args['generic'] = [\r\n DialogNodeOutputGeneric._from_dict(x)\r\n for x in (_dict.get('generic'))\r\n ]\r\n del xtra['generic']\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a DialogNodeOutputOptionsElement object from a json dictionary.
def _from_dict(cls, _dict): args = {} if 'label' in _dict: args['label'] = _dict.get('label') else: raise ValueError( 'Required property \'label\' not present in DialogNodeOutputOptionsElement JSON' ) if 'value' in _dict: ...
[ "def _from_dict(cls, _dict):\r\n args = {}\r\n if 'response_type' in _dict:\r\n args['response_type'] = _dict.get('response_type')\r\n else:\r\n raise ValueError(\r\n 'Required property \\'response_type\\' not present in DialogNodeOutputGeneric JSON'\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a InputData object from a json dictionary.
def _from_dict(cls, _dict): args = {} xtra = _dict.copy() if 'text' in _dict: args['text'] = _dict.get('text') del xtra['text'] else: raise ValueError( 'Required property \'text\' not present in InputData JSON') args.up...
[ "def from_dict(cls, _dict: Dict) -> 'DataIntgFlowJson':\n args = {}\n if 'attachments' in _dict:\n args['attachments'] = PipelineJson.from_dict(_dict.get('attachments'))\n if 'entity' in _dict:\n args['entity'] = DataIntgFlowEntity.from_dict(_dict.get('entity'))\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
toma un conj maximal de un dia num_dia y genera la restriccion acorde
def armar_restr_de_conj_dia(num_dia,conj): global NUM_RESTR restr = "" for evento in conj: if restr != "": restr += " + " dia = str(num_dia-DISPLACEMENT) #pprint(evento) idx_depo =int(evento[K_DEP])-1 depo = deportes[idx_depo] h_in = evento[K_IN] ...
[ "def algoGlouton(self):\n jeuApprentissage = 0.5 # 50% du nombre de coups max sera utilisé pour connaitre le meilleur bras. \n if self.nbCoupsJoue < jeuApprentissage*self.nbCoupsMax:\n res = self.nbCoupsJoue % len(self.listBras)\n else:\n numeroMeilleurBras=-1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is used to construct an {tt atmosphere} tool.
def __init__(self, *args, **kwargs): self._swigobj = kwargs.get('swig_object',None) if self._swigobj is None: self._swigobj = _atmosphere()
[ "def atmosphere(self) -> AtmosphereData:\n pass", "def get_atmosphere(self):\n return self.lib.get_atmosphere()", "def makeTooltar(self):\n retval = self.run()\n if retval:\n print('## Run failed. Cannot build yet. Please fix and retry', file=sys.stderr)\n sys.e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the version of ATM library implemented to this tool.
def getAtmVersion(self): _getAtmVersion_result = _str_dc(self._swigobj.getAtmVersion()) return _getAtmVersion_result
[ "def libVersion(self) -> str:\n return self._libVersion", "def get_version() -> str:\n return VERSION", "def get_version():\n import pkg_resources # part of setuptools\n return pkg_resources.require(\"mbed-ls\")[0].version", "def get_acm_version():\n return float(\".\".join(acm.ShortVersio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of index numbers and corresponding atmosphere types used by the ATM library.
def listAtmosphereTypes(self): _listAtmosphereTypes_result = [_str_dc(_x) for _x in self._swigobj.listAtmosphereTypes()] return _listAtmosphereTypes_result
[ "def _nativeAtmospherics( self ):\r\n\t\tunique_id \t= mxs.blurUtil.uniqueId\r\n\t\tatm\t\t\t= list(self.metaData().value('linkedAtmos'))\r\n\t\tget_atmos\t= mxs.getAtmospheric\r\n\t\tget_effect\t= mxs.getEffect\r\n\t\toutput\t\t= []\r\n\r\n\t\t# collect the atmospherics\r\n\t\tfor i in range( mxs.numAtmospherics )...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An atmospheric profile is composed of 4 quantities as a function of
def initAtmProfile(self, altitude={'value': float(5000.), 'unit': 'm'}, temperature={'value': float(270.0), 'unit': 'K'}, pressure={'value': float(560.0), 'unit': 'mbar'}, maxAltitude={'value': float(48.0), 'unit': 'km'}, humidity=float(20.0), dTem_dh={'value': float(-5.6), 'unit': 'K/km'}, dP={'value': float(10.0), 'u...
[ "def porosity_profile(img, axis = None, sample_type = None, void_fraction = 1):\n \n \n img = img.copy();\n phi = [];\n \n \n if (sample_type == '3_Phase'):\n for i in img:\n n = i[i > 0].size;\n phi.append(100*np.sum(i[i>0])/n/255);\n \n elif (sample_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new spectral window, uniformly sampled, this spectral window having no sideband.
def addSpectralWindow(self, fCenter={'value': float(350), 'unit': 'GHz'}, fWidth={'value': float(0.008), 'unit': 'GHz'}, fRes={'value': float(0.002), 'unit': 'GHz'}): schema = {'fCenter': {'type': 'cDoubleQuant'}, 'fWidth': {'type': 'cDoubleQuant'}, 'fRes': {'type': 'cDoubleQuant'}} doc = {'fCenter': fC...
[ "def windowed(sample: Sample) -> Sample:\n hamming_window = (1 / 2) * (1 + np.cos(2 * pi * (NSV - N_2 / 2) / N_2))\n return Sample(\n phoneme=sample.phoneme,\n file_name=sample.file_name,\n data=hamming_window * sample.data\n )", "def _create_spectral_windows(self, spw_defs):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the reference channel of the given spectral window
def getRefChan(self, spwid=int(0)): schema = {'spwid': {'type': 'cInt'}} doc = {'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getRefChan_result = self._swigobj.getRefChan(_pc.document['spwid']) return _getRefChan_result
[ "def get_power_spectrogram_channel(self, n):\n self._verify_get_channel(n)\n\n # np.array helps with duck typing\n return utils._get_axis(np.array(self.power_spectrogram_data),\n constants.STFT_CHAN_INDEX, n)", "def spectrum_window(self):\n window_type = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the channel frequency for a given grid point for the specified spectral window.
def getChanFreq(self, chanNum=int(0), spwid=int(0)): schema = {'chanNum': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'chanNum': chanNum, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getChanFreq_result = _quant_dc(self._swigobj.getChanFreq(_pc.document['chanN...
[ "def gen_frequency(cube):\n if not isinstance(cube, fits.hdu.image.ImageHDU):\n try:\n cube = cube['image']\n except:\n raise ValueError('func : ``gen_frequency`` var : ``cube`` must '\n 'have ImageHDU.')\n\n header = cube.header\n return WCS(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get lowest frequency channel for the specified spectral window.
def getMinFreq(self, spwid=int(0)): schema = {'spwid': {'type': 'cInt'}} doc = {'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getMinFreq_result = _quant_dc(self._swigobj.getMinFreq(_pc.document['spwid'])) return _getMinFreq_result
[ "def first_channel(self):\n self.current = self.ch[0]\n return self.current_channel()", "def basic_frequency(freqs, spectrum):\n return freqs[np.argmax(spectrum[1:])]", "def get_min_beam_fwhm(self):\n return np.nanmin(self.get_pixels().resolution)", "def _get_window_function(window_typ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated Dry Opacity for one channel in a band.
def getDryOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getDryOpacity_result = self._swigobj.getDryOpacity(_pc.document['nc'], _pc.document['spwid'])...
[ "def getWetOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getWetOpacity_result = _quant_dc(self._swigobj.getWetOpacity(_pc.document['nc'], _pc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated Dry Continuum Opacity for one channel in a band.
def getDryContOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getDryContOpacity_result = self._swigobj.getDryContOpacity(_pc.document['nc'], _pc.docume...
[ "def getDryOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDryOpacity_result = self._swigobj.getDryOpacity(_pc.document['nc'], _pc.document[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated O2 Lines Opacity for one channel in a band.
def getO2LinesOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getO2LinesOpacity_result = self._swigobj.getO2LinesOpacity(_pc.document['nc'], _pc.docume...
[ "def getH2OLinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getH2OLinesOpacity_result = self._swigobj.getH2OLinesOpacity(_pc.document['nc']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated O3 Lines Opacity for one channel in a band.
def getO3LinesOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getO3LinesOpacity_result = self._swigobj.getO3LinesOpacity(_pc.document['nc'], _pc.docume...
[ "def getO2LinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO2LinesOpacity_result = self._swigobj.getO2LinesOpacity(_pc.document['nc'], _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated CO Lines Opacity for one channel in a band.
def getCOLinesOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getCOLinesOpacity_result = self._swigobj.getCOLinesOpacity(_pc.document['nc'], _pc.docume...
[ "def getO3LinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO3LinesOpacity_result = self._swigobj.getO3LinesOpacity(_pc.document['nc'], _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated N2O Lines Opacity for one channel in a band.
def getN2OLinesOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getN2OLinesOpacity_result = self._swigobj.getN2OLinesOpacity(_pc.document['nc'], _pc.doc...
[ "def getO2LinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO2LinesOpacity_result = self._swigobj.getO2LinesOpacity(_pc.document['nc'], _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated zenith Wet Opacity for one channel in a band.
def getWetOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getWetOpacity_result = _quant_dc(self._swigobj.getWetOpacity(_pc.document['nc'], _pc.document...
[ "def GetAlpha(self) -> \"double\":\n return _itkSigmoidImageFilterPython.itkSigmoidImageFilterIF3IF3_GetAlpha(self)", "def getDryOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.valid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated zenith H2O Lines Opacity for one channel in a band.
def getH2OLinesOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getH2OLinesOpacity_result = self._swigobj.getH2OLinesOpacity(_pc.document['nc'], _pc.doc...
[ "def getO2LinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO2LinesOpacity_result = self._swigobj.getO2LinesOpacity(_pc.document['nc'], _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated zenith H2O Continuum Opacity for one channel in a band.
def getH2OContOpacity(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getH2OContOpacity_result = self._swigobj.getH2OContOpacity(_pc.document['nc'], _pc.docume...
[ "def getH2OLinesOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getH2OLinesOpacity_result = self._swigobj.getH2OLinesOpacity(_pc.document['nc']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the integrated zenith H2O Atmospheric Phase Delay (Dispersive part) for the current conditions, for channel number nc of spectral window spwid.
def getDispersivePhaseDelay(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getDispersivePhaseDelay_result = _quant_dc(self._swigobj.getDispersivePhaseDelay(_p...
[ "def getH2OContOpacity(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getH2OContOpacity_result = self._swigobj.getH2OContOpacity(_pc.document['nc'], _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated wet Atmospheric Path length (Dispersive part) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getDispersiveWetPathLength_result = _quant_dc(self._swigobj.getDispersiveWetPath...
[ "def getNonDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getNonDispersiveWetPathLength_result = _quant_dc(self._swigobj.getNo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated wet Atmospheric Path length (NonDispersive part) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getNonDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getNonDispersiveWetPathLength_result = _quant_dc(self._swigobj.getNonDispersi...
[ "def getDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDispersiveWetPathLength_result = _quant_dc(self._swigobj.getDispersi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated dry Atmospheric Path length (NonDispersive part) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getNonDispersiveDryPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getNonDispersiveDryPathLength_result = _quant_dc(self._swigobj.getNonDispersi...
[ "def getDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDispersiveWetPathLength_result = _quant_dc(self._swigobj.getDispersi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated Atmospheric Path length (due to O2 Lines) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getO2LinesPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getO2LinesPathLength_result = _quant_dc(self._swigobj.getO2LinesPathLength(_pc.documen...
[ "def getN2OLinesPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getN2OLinesPathLength_result = _quant_dc(self._swigobj.getN2OLinesPathLength...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated Atmospheric Path length (due to O3 Lines) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getO3LinesPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getO3LinesPathLength_result = _quant_dc(self._swigobj.getO3LinesPathLength(_pc.documen...
[ "def getDispersiveWetPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getDispersiveWetPathLength_result = _quant_dc(self._swigobj.getDispersi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the integrated Atmospheric Path length (due to N2O Lines) along the atmospheric path corresponding to the 1st guess water column for channel nc in spectral window spwid.
def getN2OLinesPathLength(self, nc=int(-1), spwid=int(0)): schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nc': nc, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getN2OLinesPathLength_result = _quant_dc(self._swigobj.getN2OLinesPathLength(_pc.docu...
[ "def getO2LinesPathLength(self, nc=int(-1), spwid=int(0)):\n schema = {'nc': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}}\n doc = {'nc': nc, 'spwid': spwid}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getO2LinesPathLength_result = _quant_dc(self._swigobj.getO2LinesPathLength(_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get H2O continuum Absorption Coefficient at layer nl, spectral window spwid and frequency channel nf
def getAbsH2OCont(self, nl, nf=int(0), spwid=int(0)): schema = {'nl': {'type': 'cInt'}, 'nf': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nl': nl, 'nf': nf, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getAbsH2OCont_result = _quant_dc(self._swigobj.getAbsH2O...
[ "def calculate_co_column_density():\n # Build up all the constants\n # Already defined in astropy.constants\n # const.k_B, const.eps0, const.h\n #\n B0 = 55101.01 * u.MHz\n Eu = 5.28880 * u.K\n mu = 0.11046 * u.Debye\n nu = 110.20135400 * u.GHz\n Ju = 1.\n g = 2.*Ju + 1\n S = Ju/g\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get N2O lines Absorption Coefficient at layer nl, spectral window spwid and frequency channel nf
def getAbsN2OLines(self, nl, nf=int(0), spwid=int(0)): schema = {'nl': {'type': 'cInt'}, 'nf': {'type': 'cInt'}, 'spwid': {'type': 'cInt'}} doc = {'nl': nl, 'nf': nf, 'spwid': spwid} assert _pc.validate(doc,schema), str(_pc.errors) _getAbsN2OLines_result = _quant_dc(self._swigobj.getAbsN...
[ "def measureLines(self):\n #create a dictionary for all the corresponding wavelengths of the absorption features\n indexDict = {}\n #list the indices for each important absorption feature: numlo, numhi, denomlo, denomhi \n # THESE ARE ALL IN VACUUM and angstroms!!\n indexDict['CaK...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setter for air mass in SkyStatus without performing water vapor retrieval.
def setAirMass(self, airmass): schema = {'airmass': {'type': 'cFloat', 'coerce': _coerce.to_float}} doc = {'airmass': airmass} assert _pc.validate(doc,schema), str(_pc.errors) _setAirMass_result = self._swigobj.setAirMass(_pc.document['airmass']) return _setAirMass_result
[ "def targetObservability(time, airmass):\n airmassCutoff = 1.8\n\n # When the target sets the \"airmass\" goes negative. That's bad\n if (airmass > 1.0) and (airmass < airmassCutoff):\n return 1\n else:\n return 0", "def setAirRes( self, airres ):\n for body in self.bodies:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accessor to get airmass.
def getAirMass(self): _getAirMass_result = self._swigobj.getAirMass() return _getAirMass_result
[ "def get_airmass(self,t,lon='9.9158', lat='51.5413', elevation=100):\n\t\tgottingen = ephem.Observer()\n\t\tgottingen.lon = lon\n\t\tgottingen.lat = lat\n\t\tgottingen.elevation = elevation # units? m?\n\t\tgottingen.date = Time(t, format='jd', scale='utc').iso\n\t\tsun = ephem.Sun(gottingen)\n\t\taltitude = sun.al...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set sky background temperature in SkyStatus without performing water vapor retrieval
def setSkyBackgroundTemperature(self, tbgr={'value': float(2.73), 'unit': 'K'}): schema = {'tbgr': {'type': 'cDoubleQuant'}} doc = {'tbgr': tbgr} assert _pc.validate(doc,schema), str(_pc.errors) _setSkyBackgroundTemperature_result = self._swigobj.setSkyBackgroundTemperature(_quant_ec(_pc...
[ "def temperature(self, temperature):\n self.transite_light_state(color_temp=temperature)", "async def colourtemp_set(self, ctx, ct: int = 500, *, name=None):\n if not await self.get_bridge():\n await ctx.send(\"No IP has been set.\")\n return\n ct = await self.max_min_ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the sky background temperature
def getSkyBackgroundTemperature(self): _getSkyBackgroundTemperature_result = _quant_dc(self._swigobj.getSkyBackgroundTemperature()) return _getSkyBackgroundTemperature_result
[ "def color_temp(self):\n return self.device.state.get('temperature_mireds')", "def getHeatTemp(self):\r\n return self.heat.sensors[0].getTemp()", "def sky_temperature(frequency: u.Quantity = 50*u.MHz) -> u.Quantity:\n wavelength = frequency.to(\n u.m,\n equivalencies=u.spectral()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the average Equivalent Blackbody Temperature in spectral window spwid, for the current conditions and a perfect sky coupling.
def getAverageTebbSky(self, spwid=int(0), wh2o={'value': float(-1), 'unit': 'mm'}): schema = {'spwid': {'type': 'cInt'}, 'wh2o': {'type': 'cDoubleQuant'}} doc = {'spwid': spwid, 'wh2o': wh2o} assert _pc.validate(doc,schema), str(_pc.errors) _getAverageTebbSky_result = _quant_dc(self._swi...
[ "def getAverageTrjSky(self, spwid=int(0), wh2o={'value': float(-1), 'unit': 'mm'}):\n schema = {'spwid': {'type': 'cInt'}, 'wh2o': {'type': 'cDoubleQuant'}}\n doc = {'spwid': spwid, 'wh2o': wh2o}\n assert _pc.validate(doc,schema), str(_pc.errors)\n _getAverageTrjSky_result = _quant_dc(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the Equivalent Blackbody Temperatures in a spectral window spwid for the current (user) Water Vapor Column wh2o, the current Air Mass, and perfect Sky Coupling to the sky.
def getTebbSkySpec(self, spwid=int(0), wh2o={'value': float(-1), 'unit': 'mm'}, tebbSky={'value': float(0), 'unit': ''}): schema = {'spwid': {'type': 'cInt'}, 'wh2o': {'type': 'cDoubleQuant'}, 'tebbSky': {'type': 'cDoubleQuant'}} doc = {'spwid': spwid, 'wh2o': wh2o, 'tebbSky': tebbSky} assert _p...
[ "def temp_water(self):\n # eq7\n th_swir2 = 0.03\n water = self.water_test()\n clear_sky_water = water & (self.swir2 < th_swir2)\n\n # eq8\n clear_water_temp = self.tirs1.copy()\n clear_water_temp[~clear_sky_water] = np.nan\n clear_water_temp[~self.mask] = np....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the average RayleighJeans Temperature in spectral window spwid, for the current (user) Water Vapor Column wh2o, the current Air Mass, and perfect Sky Coupling to the sky.
def getAverageTrjSky(self, spwid=int(0), wh2o={'value': float(-1), 'unit': 'mm'}): schema = {'spwid': {'type': 'cInt'}, 'wh2o': {'type': 'cDoubleQuant'}} doc = {'spwid': spwid, 'wh2o': wh2o} assert _pc.validate(doc,schema), str(_pc.errors) _getAverageTrjSky_result = _quant_dc(self._swigo...
[ "def wind_stress(uw, vw):\n \n nx = len(uw[:,0])\n ny = len(uw[0,:])\n nz = 2 \n Fx = numpy.zeros(((nz,nx,ny)))\n Fy = numpy.zeros(((nz,nx,ny)))\n k = 0.001\n Fx[1,:,:]= k*uw[:,:]*numpy.sqrt((uw[:,:]**2)+(vw[:,:]**2))\n Fy[1,:,:]= k*vw[:,:]*numpy.sqrt((uw[:,:]**2)+(vw[:,:]**2))\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Invite a user to channel {
async def cmd_invite(self, user, obj): for i in [isinstance(chan, str) for chan in obj["args"]]: assert i assert len(obj["args"]) > 1 chan = obj["args"][0] users = obj["args"][1:] for u in users: r = { "user": user.username, ...
[ "async def invite(self, ctx: commands.Context) -> discord.Message:\n return await ctx.send(discord.utils.oauth_url(self.bot.user.id))", "async def invite(self, ctx, *args):\n await ctx.channel.send(\n embed = await Macro.send(\"Add me to your server [here]({})\".format(\n o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send an error object to a user with msg
async def error(self, user, msg): self.logger.warning("ERRROR: {}".format(msg)) r = {"command": "ERROR", "args": [msg]} await self.send_obj(user, r)
[ "def do_error(self, message=None):\n message = message or 'An error is an error.'\n self.send(\n msg=':{server} ERROR :{message}',\n message=message,\n )", "def error(msg):\n message(msg, flag='e')", "def error_msg(self, value):\n self._error_msg = value", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator function to manage JSON deserialization
def deserialize_tweets(line_generator): for line in line_generator: try: yield json.loads(line) except ValueError: continue
[ "def gen_fjson(filename):\n with open(filename) as f:\n for line in f:\n try:\n yield json.loads(line)\n except:\n pass", "def from_json(cls: AnyClass, /, obj: Any) -> AnyClass:\n raise NotImplementedError", "def json(json_file):\n if isins...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call to Audience API happens here. All we ask from the caller are user IDs, a results object, and (optionally) a grouping.
def analyze_user_ids(user_ids, results, groupings = None): import gnip_insights_interface.audience_api as api # set up groupings if groupings is not None: use_groupings = groupings else: grouping_dict = {"groupings": { "gender": {"group_by": ["user.gender"]} , ...
[ "def test_audiences_get_audience_results(self):\n pass", "def test_audiences_get_audience_hit_for_audience(self):\n pass", "def get_activities_response(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }