query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Deals cards to the user and house, returns None. Deals two cards to player and house each. House receives one open and one closed card. If cards have already been dealt this method just returns None.
def first_deal(self) -> None: if len(self.house.hand.cards) == 0 and len(self.user.hand.cards) == 0: # Check if cards are already dealt. print(self.deal_card(self.user)) print(self.deal_card(self.house)) print(self.deal_card(self.user)) print(self.deal_card(self....
[ "def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deals cards to house, returns None
def house_deal(self) -> None: if not self.has_game_ending_hand: while max(self.house.hand.value) < 17: print(f"{self.deal_card(self.house)}")
[ "def first_deal(self) -> None:\n if len(self.house.hand.cards) == 0 and len(self.user.hand.cards) == 0: # Check if cards are already dealt.\n print(self.deal_card(self.user))\n print(self.deal_card(self.house))\n print(self.deal_card(self.user))\n print(self.deal_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for the different kinds of hands the player and house has, returns bool.
def get_game_ending_hands(self) -> bool: end = False if 10 in self.house.hand.value: # Check if house's first card is a 10 if self.action_peek_cards() == 1: # Peek the card to check for and ace. CardValue.ACE has a value of 1 self.event_house_blackjack() end...
[ "def full_house(hand):\r\n s = [n for n,h in hand]\r\n if three_of_a_kind(hand)and len(set(s))==2:\r\n return True\r\n else:\r\n return False", "def isFullHouse(hand):\n if isPair(hand) and isThree(hand):\n if getPair(hand) != getThree(hand):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asks user if the game should be ended or not, returns None.
def round_end(self) -> None: input_ = self.validate_input("\nDo you want to play another round?[y/n]", ("y", "n")) if input_ == "n": self.has_ended = True else: self.user.bet = 0
[ "def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame", "def end_game(self) -> None:\n pass", "def _check_whether_game_end(self):\n flag = False\n if len(self.asteroids_list) == 0:\n self.__screen.show_message(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event for when house has blackjack, returns None.
def event_house_blackjack(self) -> None: if 21 in self.user.hand.value: self.event_player_push() else: print("The house has blackjack") self.event_house_wins()
[ "def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event for when user wins, returns None.
def event_player_wins(self) -> None: win_amount = self.user.bet print("Congratulations, you win:", win_amount) self.user.win_balance(self.user.bet)
[ "def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)", "def player_win(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU WIN\"\r\n print(\"Player wins against opponent.\\n\")\r\n self.player_wins += 1", "def handle_win(context: Ga...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event for when house wins, returns None.
def event_house_wins(self) -> None: print("You lose") self.user.lose_balance(self.user.bet)
[ "def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()", "def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n pri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event for when house goes bust, returns None.
def event_house_bust(self) -> None: print(f"The house's hand contains {min(self.house.hand.value)}, they're bust") self.event_player_wins()
[ "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def event_house_blackjack(self) -> None:\n if 21 in self.user.hand.value:\n self.event_player_push()\n else:\n print(\"The ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Event for when player goes bust, returns None.
def event_player_bust(self) -> None: print(f"Your hand contains {min(self.user.hand.value)}, you're bust") self.event_house_wins()
[ "def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()", "def player_busts(chip):\n chip.lose_bet()\n print(\"you bust!\")", "def update(self):\n if self.bunker_health == 0:\n self.k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
list of urls of a user's repos
def urls(gh, user): return [repo.url for repo in getuserrepos(gh, user)]
[ "def repositories():\n return user.repos()", "def get_repos_for_user(username):\n\n def get_repos(url, attribs=FILTER_ATTRIBS):\n \"\"\"\n Get the repositories at the given URL and return a list\n of dictionaries with the given keys.\n \"\"\"\n r = requests.get(url)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
number of public repositories of a user
def n_public_repos(gh, user): return getuser(gh, user).public_repos
[ "def get_public_images_count(self, user_settings=None, user_id=None, login=None, email=None):\n return objects_module.users.get_public_images_count(self.khoros_object, user_settings, user_id,\n login, email)", "def get_counts(self, obj: Use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applying the same augmentation to image and its corresponding mask
def augment(image,masks): # Random horizontal flipping if random.random() > 0.5: image = TF.hflip(image) masks = TF.hflip(masks) # Random vertical flipping if random.random() > 0.5: image = TF.vflip(image) masks = TF.vflip(masks) ...
[ "def apply(self, img, mask):\n self._setup()\n\n # Apply both\n img = self._mask_and_images.augment_images(img)\n mask = self._mask_and_images.augment_images(mask)\n\n # Apply image only step\n img = self._images_only.augment_images(img)\n\n # Apply mask only step\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to read the image and mask and return a sample of dataset when neededself.
def __getitem__(self,image_id): # read the image image_path = (os.path.join(self.dataset_dir,self.list_dir[image_id],"images/{}.png".format(self.list_dir[image_id]))) image = io.imread(image_path) # read the mask mask_dir = os.path.join(self.dataset_dir,self.list_dir[image_id],'...
[ "def read_data(image_path, mask_path):\n img = cv2.imread(image_path, 0) # /255.#read the gray image\n img = cv2.resize(img, (IMAGE_WIDTH, IMAGE_HEIGHT))\n\n try:\n msk = cv2.imread(mask_path, 0) # /255.#read the gray image\n msk = cv2.resize(msk, (IMAGE_WIDTH, IMAGE_HEI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds columns in the dataframe with zero variance ie those with the same value in every observation.
def find_zero_var(df): toKeep = [] toDelete = [] for col in df: if len(df[col].value_counts()) > 1: toKeep.append(col) else: toDelete.append(col) ## return {'toKeep':toKeep, 'toDelete':toDelete}
[ "def find_zero_var(df):\n toKeep = []\n toDelete = []\n for col in df:\n if len(df[col].value_counts())>1:\n toKeep.append(col)\n else:\n toDelete.append(col)\n #\n return {'toKeep':toKeep, 'toDelete':toDelete}", "def find_zero_var(df): \n toKeep = []\n toD...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds columns that are eother positively or negatively perfectly correlated (with correlations of +1 or 1), and creates a dict that includes which columns to drop so that each remaining column is independent
def find_perfect_corr(df): corrMatrix = df.corr() corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1) already_in = set() result = [] for col in corrMatrix: perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist() if perfect_corr and col n...
[ "def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the text output
def get_text(self): return self.output.getvalue()
[ "def get_text():", "def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text", "def Text(self) -> str:", "def _text_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response.text"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays statistics on the most popular stations and trip.
def station_stats(data): print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station popular_ss= data['Start Station'].mode()[0] print('Most popular Start Station:', popular_ss) # display most commonly used end station po...
[ "def station_stats():\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display most commonly used start station\r\n print(\"Most commonly used start station: \" + local_data_frame['Start Station'].value_counts().idxmax())\r\n\r\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays statistics on the total and average trip duration.
def trip_duration_stats(data): print('\nCalculating Trip Duration...\n') start_time = time.time() # display total travel time total_trip_time= data['Trip Duration'].sum() print('The Total Travel Time is {} Hours'. format(total_trip_time/3600)) # display mean travel time avg_trip= data['Trip ...
[ "def trip_duration_stats():\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n print(format_travel_time(\"Total\", local_data_frame['Trip Duration'].sum()))\r\n\r\n # TO DO: display mean travel time\r\n print(format_trave...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list containing the names of all installed FSLeyes plugins.
def listPlugins(): plugins = [] for dist in pkg_resources.working_set: if dist.project_name.startswith('fsleyes-plugin-'): plugins.append(dist.project_name) return list(sorted(plugins))
[ "def _list_plugins_on_fs(cls):\n return os.listdir(settings.PLUGINS_PATH)", "def listExistingPlugins():\n return map(lambda x: x.__name__, plugins.PluginBase.PluginBase.__subclasses__())", "def list_plugins():\n plugins_info = get_plugins_info()\n\n for plugin_info in plugins_info:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads the given Python file as a FSLeyes plugin.
def loadPlugin(filename): name = op.splitext(op.basename(filename))[0] modname = 'fsleyes_plugin_{}'.format(name) distname = 'fsleyes-plugin-{}'.format(name) if distname in listPlugins(): log.debug('Plugin %s is already in environment - skipping', distname) return log.debug('...
[ "def load_hooks_plugin(file_path, module_name='mgstest.plugin'):\n if os.path.exists(file_path):\n return Plugin(module=load_module_file(file_path, module_name))\n else:\n return Plugin()", "def load_plugin(self):\n en_data = self.receive(3) # Max plugin name length 999 chars\n e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies the given Python file into the FSLeyes settings directory, within a subdirectory called ``plugins``. After the file has been
def installPlugin(filename): basename = op.splitext(op.basename(filename))[0] dest = 'plugins/{}.py'.format(basename) log.debug('Installing plugin %s', filename) with open(filename, 'rt') as inf, \ fslsettings.writeFile(dest) as outf: outf.write(inf.read()) dest = fsl...
[ "def copy_settings():\n new_filename = 'settings.ini'\n if os.path.isfile(new_filename):\n error_msg = '{} already exists'.format(new_filename)\n raise Exception(error_msg)\n\n # determine the path of the example settings in the package\n pkgdir = os.path.dirname(thief_snapshot.__file__)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a filesystem delete based on UUID.
def handle_delete(uuid): location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid) print(uuid) print(location) shutil.rmtree(location)
[ "def handle_delete(uuid):\n location = os.path.join(current_app.config['UPLOAD_DIRECTORY'], secure_filename(uuid))\n current_app.logger.info(\"Deletion Task Started for %s\", uuid)\n shutil.rmtree(location)\n return \"removed\"", "def handle_delete(uuid):\n location = os.path.join(UPLOAD_DIRECTORY,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save an upload. Uploads are stored in media/uploads
def save_upload(f, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with open(path, 'wb+') as destination: destination.write(f.read())
[ "def upload(self, post):\n # TODO: handle filename conflicts\n directory = \".\".join(self.filename.split(\".\")[:-1])\n\n self.abspath = os.path.join(self.root_dir, directory)\n self.localpath = os.path.join(\"/static/gallery\", directory)\n if not os.path.exists(self.abspath):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A POST request. Validate the form and then handle the upload based ont the POSTed data. Does not handle extra parameters yet.
def post(self): if validate(request.form): handle_upload(request.files['qqfile'], request.form) return make_response(200, {"success": True}) else: return make_response(400, {"error": "Invalid request"})
[ "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handles a filesystem delete based on UUID.
def handle_delete(uuid): location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid) print(uuid) print(location) shutil.rmtree(location)
[ "def handle_delete(uuid):\n location = os.path.join(current_app.config['UPLOAD_DIRECTORY'], secure_filename(uuid))\n current_app.logger.info(\"Deletion Task Started for %s\", uuid)\n shutil.rmtree(location)\n return \"removed\"", "def handle_delete(uuid):\n location = os.path.join(UPLOAD_DIRECTORY,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if we downscale the image, the intrinsic matrix also needs to be changed.
def rescale_intrinsic(self): # scale focal length and principal points wrt image resizeing if self.downscale > 1: self.K = self.K_orig.copy() self.K[0, 0] /= float(self.downscale) self.K[1, 1] /= float(self.downscale) self.K[0, 2] /= float(self.downscale) ...
[ "def _change_scale(self, image):\n # if specified to preserve aspect ratio\n if self.aspect_aware:\n h, w = image.shape[:2]\n dW = 0\n dH = 0\n # if width is smaller than height\n if w < h:\n image = imutils.resize(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads a set of images to self.imgs list
def load_images(self): self.img_paths = sorted(glob(self.img_pattern)) self.imgs = [] for idx, this_path in enumerate(self.img_paths): try: this_img = cv2.imread(this_path) if self.downscale > 1: this_img = cv2.resize(this_img, (0, ...
[ "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def _load_images(self):\n if not self.test_set:\n images = []\n masks = []\n for item in self.image_names:\n image = nrrd.read(os....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function visualizes the epipolar lines
def visualize_epipolar_lines(self, img1, img2, p1, p2, E, save_path): # get fundamental matrix F, mask_fdm = cv2.findFundamentalMat(p1, p2, cv2.RANSAC) p1_selected = p1[mask_fdm.ravel() == 1] p2_selected = p2[mask_fdm.ravel() == 1] # draw lines lines1 = cv2.computeCorres...
[ "def visualize():", "def plotEcliptic(maptype=Projection()):\n\n\n ra = np.empty(360)\n dec = np.empty(360)\n for i in np.arange(360):\n ra[i] = i + 2.45*np.sin (2 * i * np.pi/180.)\n dec[i] =23.5*np.sin( i*np.pi/180.)\n\n maptype.plotLine(ra, dec, 'r-', lw=4, label=\"Ecliptic\")", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
查找:tasklist | find "node.exe" 杀进程:taskkill F PID node.exe
def kill_server(self): # 查找结果列表 server_list = self.dos.excute_cmd_result('tasklist | find "node.exe"') if len(server_list) > 0: self.dos.excute_cmd('taskkill -F -PID node.exe')
[ "def kill_all_process(nodes):\n plist = []\n for node in nodes:\n proc = Process(target=run_linux_cmd, args=(\"ps aux | grep esgdata\\ | grep \\\"\" + sheet_name + \"\\\" | grep -v grep | grep -v ssh | awk \\\"{print \\\\$2}\\\" | xargs kill\", node, True))\n plist.append(proc)\n\n for proc in plist:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluates the baseline predictor.
def evalBaseline(self, df = None): if (df is None): self.r_b = self.df.merge(self.df_user[["user ind", "b_u"]], on = "user ind") self.r_b = self.r_b.merge(self.df_item[["item ind", "b_i"]], on = "item ind") self.r_b["baseline"] = self.r_mean + self.r_b["b_u"] + self....
[ "def baseline(data):\n\tdum = DummyClassifier(strategy='most_frequent', random_state=1)\n\tdum.fit(data[0]['posts'], data[0]['label'])\n\tpred = dum.predict(data[1]['posts'])\n\n\tprint(\"----------BASELINE-----------\")\n\tprint(\"Accuracy score: {}\\n\".format(accuracy_score(data[1]['label'], pred)))\n\tprint(\"-...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimates model coefficients from calculated shifts.
def initialize_model(self, positions, shifts_y, shifts_x): shifts_y = list(map(lambda x: x*-1, shifts_y)) shifts_x = list(map(lambda x: x*-1, shifts_x)) def list_shift(pos, c): return np.array([DeformationModel.calculate_shifts_from_coeffs(p[0], p[1], p[2...
[ "def coefficients(self) :\n raise NotImplementedError", "def test_Shift_model_set_linear_fit():\n\n init_model = models.Shift(offset=[0, 0], n_models=2)\n\n x = np.arange(10)\n yy = np.array([x + 0.1, x - 0.2])\n\n fitter = fitting.LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Randomly generates model with reasonable coefficients.
def initialize_model_randomly(self, shape=(2048, 2048), tn=50): self.coeffs = self.generate_random_coeffs(shape, tn)
[ "def randomize_coefficients(self):\n coeff1 = round(uniform(0.00, 0.98), 2)\n coeff2 = round(uniform(0.00, (1 - coeff1)), 2)\n coeff3 = round(uniform(0.00, (1 - (coeff1 + coeff2))), 2)\n coeff4 = 1 - (coeff1 + coeff2 + coeff3)\n self.coefficient = {'frequency': coeff1, 'precedence...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates vector of reasonable random model coefficients a_i. shape is (height, width) tuple. Generated coefficients are in interval with c_0 in .
def generate_random_coeffs(shape, tn): res = np.zeros((2, 9)) # reasonable space-dependent part width = shape[1] height = shape[0] min_val = 1e-4 for i in range(2): c = res[i] # generate quadratic coefficients c[2] = np.random.uniform...
[ "def constructModel(bvals,coeffs,xc,size):\n model_img=np.dot(bvals,coeffs)\n model_img=np.reshape(model_img,size)\n return model_img", "def random_coefficients(self, n=3, max_range = 10):\n return np.random.uniform(-1*max_range, max_range, n)", "def polynomial_model(z, c0, c1, c2, c3, c4, c5):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the base unit of this sensor
def base_unit() -> ureg: return ureg.meter
[ "def base_unit(unit):\n return __units[unit]['baseUnit']", "def get_base_unit(self, obj: Dimension) -> Unit:\n return obj.base_unit", "def set_base_uint(self, base_unit):\n self.base_unit = base_unit\n self.decimal_point = util.base_unit_name_to_decimal_point(self.base_unit)\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors.
def test_byteps_push_pull(self): dtypes = ['float16', 'float32', 'float64'] dims = [1, 2, 3] count = 0 ctx = self._current_context() shapes = [(), (17), (17, 17), (17, 17, 17)] for dtype, dim in itertools.product(dtypes, dims): # MXNet uses gpu_id as part of t...
[ "def test_byteps_push_pull_inplace(self):\n size = bps.size()\n dtypes = ['float16', 'float32', 'float64']\n dims = [1, 2, 3]\n count = 0\n ctx = self._current_context()\n shapes = [(), (17), (17, 17), (17, 17, 17)]\n for dtype, dim in itertools.product(dtypes, dims)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the byteps_push_pull correctly sums 1D, 2D, 3D tensors.
def test_byteps_push_pull_inplace(self): size = bps.size() dtypes = ['float16', 'float32', 'float64'] dims = [1, 2, 3] count = 0 ctx = self._current_context() shapes = [(), (17), (17, 17), (17, 17, 17)] for dtype, dim in itertools.product(dtypes, dims): ...
[ "def test_byteps_push_pull(self):\n dtypes = ['float16', 'float32', 'float64']\n dims = [1, 2, 3]\n count = 0\n ctx = self._current_context()\n shapes = [(), (17), (17, 17), (17, 17, 17)]\n for dtype, dim in itertools.product(dtypes, dims):\n # MXNet uses gpu_id ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
scan the pzt_motor (e.g., pzt_dcm_th2), detectors can be any signal or motor (e.g., Andor, dcm.th2)
def pzt_scan(pzt_motor, start, stop, steps, detectors=[Vout2], sleep_time=1, md=None): if Andor in detectors: exposure_time = yield from bps.rd(Andor.cam.acquire_time) yield from mv(Andor.cam.acquire, 0) yield from mv(Andor.cam.image_mode, 0) yield from mv(Andor.cam.num_images, 1) ...
[ "def pzt_scan_multiple(\n moving_pzt,\n start,\n stop,\n steps,\n detectors=[Vout2],\n repeat_num=2,\n sleep_time=1,\n fn=\"/home/xf18id/Documents/FXI_commision/DCM_scan/\",\n):\n\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeat scanning the pzt (e.g. pzt_dcm_ch2, pzt_dcm_th2), and read the detector outputs. Images and .csv data file will be saved
def pzt_scan_multiple( moving_pzt, start, stop, steps, detectors=[Vout2], repeat_num=2, sleep_time=1, fn="/home/xf18id/Documents/FXI_commision/DCM_scan/", ): det = [det.name for det in detectors] det_name = "" for i in range(len(det)): det_name += det[i] det_...
[ "def test():\n\n for data_path in tqdm(opt.DATA_PATH_LIST):\n \n # make save folder for each test dataset\n SAVE_PATH = os.path.join(opt.SAVE_PATH,os.path.basename(data_path))\n os.makedirs(SAVE_PATH,exist_ok=True)\n\n # get image data \n Img_paths = sorted(glob.g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
With given energy list, scan the pzt multiple times and record the signal from various detectors, file will be saved to local folder.
def pzt_energy_scan( moving_pzt, start, stop, steps, eng_list, detectors=[dcm.th2, Vout2], repeat_num=1, sleep_time=1, fn="/home/xf18id/Documents/FXI_commision/DCM_scan/", ): det = [det.name for det in detectors] det_name = "" for i in range(len(det)): det_name +=...
[ "def readenergy(self, filelist):\r\n \r\n energy=[]\r\n tmpenergy=[]\r\n for filename in filelist:\r\n if not(os.path.exists(filename)):\r\n if self._resultfile: self._resultfile.write('Output file: \"'+filename+'\" does not exist. Restart your calculation. \\n')\r\n else: print 'Outp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate which row to update, factoring in the header row placed every $hdr_span years.
def year_span(target_year:int, base_year:int, yr_span:int, hdr_span:int, logger:lg.Logger = None) -> int: if logger: logger.debug(F"target year = {target_year}; base year = {base_year}; year span = {yr_span}; header span = {hdr_span}") year_diff = target_year - base_year hdr_adjustment = 0 if hdr_sp...
[ "def compute_header_score(self, row, next_row, row_num):\n score = 0.0\n\n if next_row is None:\n return score\n\n for cell, next_cell in zip(row, next_row):\n if cell.is_empty():\n score -= 1\n if next_cell.is_empty():\n co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the string representation of a quarter to an int.
def get_int_quarter(p_qtr:str, logger:lg.Logger = None) -> int: if logger: logger.debug(F"quarter to convert = {p_qtr}") msg = "Input MUST be a String of 0..4!" if not p_qtr.isnumeric() or len(p_qtr) != 1: if logger: c_frame = inspect.currentframe().f_back logger.err...
[ "def dec2int(r: str) -> int:", "def _check_quarter(quarter: int) -> int:\n if MIN_QUARTER <= quarter <= MAX_QUARTER:\n return quarter\n else:\n raise ValueError(f\"quarter {quarter} is out of range\")", "def month_for_quarter(quarter):\n if quarter == \"Q1\":\n return 1\n elif q...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the year and month that start the FOLLOWING quarter.
def next_quarter_start(start_year:int, start_month:int, logger:lg.Logger = None) -> (int, int): if logger: logger.debug(F"start year = {start_year}; start month = {start_month}") # add number of months for a Quarter next_month = start_month + QTR_MONTHS # use integer division to find out if the ...
[ "def prev_quarter_boundaries(now):\n first_of_month = datetime.datetime(now.year, now.month, 1)\n\n # 75 days before the 1st is always in the previous quarter\n date_in_prev_q = first_of_month - datetime.timedelta(days=75)\n\n q_y = date_in_prev_q.year\n q_start_m = int((date_in_prev_q.month-1) / 3)*...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the date that ends the CURRENT quarter.
def current_quarter_end(start_year:int, start_month:int, logger:lg.Logger = None) -> date: if logger: logger.info(F"start year = {start_year}; start month = {start_month}") end_year, end_month = next_quarter_start(start_year, start_month) # end date is one day back from the start of the next period ...
[ "def get_quarter_end(x: Optional[Date] = None) -> Date:\n return get_quarter_start(x or get_today()) + relativedelta(months=+3, days=-1)", "def get_last_quarter(today: Optional[date] = None) -> str:\n if today is None:\n today = date.today()\n year = today.year\n last_quarter = (today.month - 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the start and end dates for the quarters in the submitted range.
def generate_quarter_boundaries(start_year:int, start_month:int, num_qtrs:int, logger:lg.Logger = None) -> (date, date): if logger: logger.debug(F"start year = {start_year}; start month = {start_month}; num quarters = {num_qtrs}") for i in range(num_qtrs): yield date(start_year, start_month, 1),...
[ "def quarter(dt):\n quarters = rrule.rrule(\n rrule.MONTHLY,\n bymonth = (1, 4, 7, 10),\n bysetpos = -1,\n dtstart = datetime(dt.year, 1, 1),\n count = 8\n )\n first_day = quarters.before(dt, True)\n last_day = quarters.after(dt) - relativedelta.relativedelta(days=1)\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum values over given axis
def sum(self, axis: int = 0): self.values = self.values.sum(axis=axis) self.layers = [None] return self.copy()
[ "def sum_values(\n data: np.array,\n axis: Union[int, Tuple[int,...]] = -1,\n squeeze: bool = True\n ) -> np.array:\n return np.sum(data, axis=axis, keepdims=not squeeze)", "def sum(tensor, axis=None):\n raise NotImplementedError", "def sum(self, axis=None):\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Config file for main trainer factory (Scheduler to train multiple models in a row i.e. for a specified set of horizons) To ensure that we store the model settings only in one place, the configs Trainer will take the default values given in the model configs file (that must be specified in file_name_model_configs argume...
def __init__(self, config_file_name: str): configs_trainer = io.read_yaml(PATH_CONFIG, config_file_name) configs_model = configs_trainer[configs_trainer['model']] # Add trainer configs attributes horizons = configs_trainer['forecasting_horizons_trainer'] self.forecasting_horizon...
[ "def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Looks in build_dir for log_file in a folder that also includes the junit file.
def find_log_junit(build_dir, junit, log_file): tmps = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir) if '/tmp-node' in f.filename] for folder in tmps: filenames = [f.filename for f in view_base.gcs_ls(folder)] if folder + junit in filenames: path = fol...
[ "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of files named log_file from values in all_logs
def find_log_files(all_logs, log_file): log_files = [] for folder in all_logs.itervalues(): for log in folder: if log_file in log: log_files.append(log) return log_files
[ "def get_log_files(self):\n pattern = r'.*[a-zA-Z_]{}\\.log'\n log_files = []\n files = os.scandir(self.cwd)\n for file in files:\n match = re.fullmatch(pattern.format(self.testnum), file.name)\n if match:\n log = path.abspath(path.join(self.cwd, file...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns dictionary given the artifacts folder with the keys being the folders, and the values being the log files within the corresponding folder
def get_all_logs(directory, artifacts): log_files = {} if artifacts: dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory) if f.is_dir] else: dirs = [directory] for d in dirs: log_files[d] = [] for f in view_base.gcs_ls(d): l...
[ "def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Based on make_dict, either returns the objref_dict or the parsed log file
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None): log = gcs_async.read(log_filename).get_result() if log is None: return {}, False if make_dict else None if pod: bold_re = regex.wordRE(pod) else: bold_re = regex.error_re if objref_dict is...
[ "def __init__(self, logfilename):\n\n self.Standards = defaultdict(lambda: defaultdict(list))\n self.Targets = defaultdict(lambda: defaultdict(list))\n self.Flats = defaultdict(list)\n self.Arcs = defaultdict(list)\n self.WorkingDirectory = os.path.dirname(logfilename)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load Disaster Declarations and County information into database, as two separate tables that are being created simultaneously. Database information being pulled from FEMA API Disasters Declaration Summaries Data set. Data populates both disastesr and counties tables. API returns 1k records max ($top set to variable res...
def load_disasters(): print "Disasters" #deletes any data within the table before seeding Disaster.query.delete() result_count = 1000 iteration = 0 records_returned = 1000 # makes payload requests from FEMA API while records_returned == 1000: payload = {'$top': result_count,...
[ "def get_crime_info():\r\n #url to all king county restaurant health inspections since 2006\r\n\r\n offset = 0\r\n count = 0\r\n pk = 0\r\n\r\n while offset < 10000:\r\n url = \"https://data.seattle.gov/resource/pu5n-trf4.json?$limit=10000&$offset=\" + str(offset) + \"&$where=event_clearance_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load States into database from a text file.
def load_states(): print "States and Territories" State.query.delete() for row in open("data/states_and_territories.txt"): row = row.rstrip() # can't seem to get rid of "\r" character other than doing a .split piped_rows = row.split("\r") for i in piped_rows: ...
[ "def LoadStates(self):\n\t\tstates_file = open(self.states_file_path, 'r')\n\t\tprint(\"States:\")\n\t\tfor line in states_file:\n\t\t\tif line[0] == '#':\n\t\t\t\tcontinue\n\n\t\t\tcolon_index = line.find(':')\n\t\t\tif colon_index != -1:\n\t\t\t\tidentifier = str(line[:colon_index])\n\t\t\t\tvalue = line[colon_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tupil represent the upper and lower values of the price range at the given index. If there is one dataseries then return the tupil (value, None)
def get_value_at_index(self, index, cc): high = cc.dsget('high') low = cc.dsget('low') return (high[index], low[index])
[ "def get_bounds(self, value = None, index = None):\n\n if self._data is None or 0 in self._data.shape:\n return (0.0, 0.0)\n\n if type(value) == types.IntType:\n if self.value_dimension == 0:\n maxi = nanmax(self._data[value, ::])\n mini = nanmin(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trains one elastic logistic classifier per review group. Saves the trained classifiers within self.models.
def train(self, x_train, y_train): # convert input to format for classifier list_of_embeddings = list(x_train[self.embeddings_col]) x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings]) # discard fold ID column from labels r...
[ "def train(self):\n for classifier in self.classifiers:\n classifier.train()\n if self.config.mode == 'cv':\n self.test()", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the admin visual data for a specific assignment. Currently the data passed back feeds into the radial and passed time scatter graphs.
def public_visuals_assignment_id(assignment_id: str): # Get the assignment object assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() # If the assignment does not exist, then stop req_assert(assignment is not None, message='assignment does not exist') # Asse...
[ "def get_admin_assignment_visual_data(assignment_id: str) -> List[Dict[str, Any]]:\n\n # Get all the assignment tests for the specified assignment\n assignment_tests = AssignmentTest.query.filter(\n AssignmentTest.assignment_id == assignment_id\n ).all()\n\n # Build a list of visual data for each...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the visual history for a specific student and assignment. lightly cached per assignment and user
def visual_history_assignment_netid(assignment_id: str, netid: str): # Get the assignment object assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() # If the assignment does not exist, then stop req_assert(assignment is not None, message='assignment does not exis...
[ "def read_history(self):\r\n cursor = connection.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT id, created, student_module_id FROM courseware_studentmodulehistory\r\n \"\"\")\r\n return cursor.fetchall()", "def list_history(request):\n history = History.objects\n\n if not ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the summary sundial data for an assignment. This endpoint is ridiculously IO intensive. heavily cached
def visual_sundial_assignment(assignment_id: str): # Get the assignment object assignment = Assignment.query.filter( Assignment.id == assignment_id ).first() # If the assignment does not exist, then stop req_assert(assignment is not None, message='assignment does not exist') # Assert t...
[ "def get_assignment_sundial(assignment_id):\n\n # Get the assignment\n assignment = Assignment.query.filter(\n Assignment.id == assignment_id\n ).first()\n\n # Create base sundial\n sundial = {\n 'children': [\n\n # Build Passed\n {\n 'name': 'build ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reshapes an input variable without copy.
def reshape(x, shape): if x.shape == shape: return chainer.as_variable(x) y, = Reshape(shape).apply((x,)) return y
[ "def reshape(x, shape):\n return Reshape(shape)(x)", "def reshape_var(var):\n dims = np.shape(var)\n nx = dims[0]\n ny = dims[1]\n nz = dims[2]\n\n var_2d = var.reshape(nx * ny, nz)\n return var_2d", "def _reshape_X(X):\n if len(X.shape) != 3:\n return X.reshape((X.shape[0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convolve image [img] with [kernel].
def convolution(img, kernel, padding='fill'): kernel = np.rot90(kernel, 2) h,w = kernel.shape[:2] t,b,l,r = (h-1)//2, h//2, (w-1)//2, w//2 # Use numpy padding because it works for >2d padshape = [(t,b),(l,r)]+[(0,0)]*(len(img.shape[2:])) padded_img = np.pad(img, padshape, mode={'fill':'constant',...
[ "def convolve(self,image, kernel):\n im_out = convolve2d(image, kernel, mode='same', boundary='symm')\n return im_out", "def convolve(image, kernel):\n # grab the spatial dimensions of the image and kernel\n (image_height, image_width) = image.shape[:2]\n (_, kernel_width) = kernel.shape[:2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given two list of keypoint locations and descriptions, compute the correspondences.
def find_correspondences(pts1, pts2, desc1, desc2, match_score_type='ratio'): N = pts1.shape[0] X = np.sum(desc1**2, axis=1, keepdims=True) Y = np.sum(desc2**2, axis=1, keepdims=True).T XY = np.dot(desc1,desc2.T) L = X + Y - 2*XY D = (np.maximum(L, 0)) scores = np.min(D, axis = 1) indic...
[ "def apply_feature_matching(desc1: np.ndarray, desc2: np.ndarray,\n match_calculator: Callable[[list, list], float]) -> list:\n\n # Check descriptors dimensions are 2\n assert desc1.ndim == 2, \"Descriptor 1 shape is not 2\"\n assert desc2.ndim == 2, \"Descriptor 2 shape is not 2\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the Symmetrical Epipolar Distance.
def sym_epipolar_dist(corr, F): corrs_temp = np.zeros(4) corrs_temp[1] = corr[0] corrs_temp[0] = corr[1] corrs_temp[2] = corr[3] corrs_temp[3] = corr[2] corr = corrs_temp p1 = np.hstack([corr[:2],1]) p2 = np.hstack([corr[2:],1]) first_term = (F @ p1)[:-1] second_term = (F.T @ p2)...
[ "def find_hypotenuse(self):\n return sqrt(pow(self.opposite, 2) + pow(self.adjacent, 2))", "def euclidean_degree(self):\n if self.is_zero():\n raise ValueError(\"euclidean degree not defined for the zero element\")\n from sage.rings.all import ZZ\n return ZZ....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function with assistance of helper functions finds all variable declarations matching regex_string inside the file and returns them inside a list.
def parse_file(self, file_name): with open(file_name, "r") as input_file: file_contents = input_file.read() """ Regex is done on line by line basis - to ensure that irrespective of the formatting all docstrings are identified and all variable specifications are foun...
[ "def regex_findall_variables(content):\n try:\n vars_list = []\n for var_tuple in variable_regex_compile.findall(content):\n vars_list.append(\n var_tuple[0] or var_tuple[1]\n )\n return vars_list\n except TypeError:\n return []", "def identif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to find the specified regex_string in a docstring. Returns True if matched, False otherwise.
def docstring_contains_variable_declaration(self, docstring): found = re.search(self.regex_string, docstring) if found == None: return False return True
[ "def check_found_regex(self, string, regex):\n if string != None:\n if re.search(regex, string) != None:\n return True\n return False", "def regex_found(my_string:str, pattern:str=r\".*\") -> bool:\n ans = match(pattern, my_string)\n logger.debug(f'Results of {pattern} within {my_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ultraio = chrlength uniqueratio / chr_total_reads
def ultratio(chrlength, uniqueratio, chrtotalreads, frcount): ultratio = chrlength * uniqueratio / (chrtotalreads - frcount) return ultratio
[ "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def automated_readablitity_index(self) -> float:\n l = self.count_letters()\n w = self.count_words()\n s = self.count_sentences()\n return 4.71 * (l / w) + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a single string, checks if it is one of the FlowFrame's channel names. For a collection, checks if all its items are a channel name.
def __contains__(self, item): if isinstance(item, basestring): return item in self._channels elif hasattr(item, '__iter__'): return all(ch in self._channels for ch in item) else: return False
[ "def is_channel(self, channel_name):\n if ',' in channel_name or ' ' in channel_name:\n return False\n\n if len(channel_name) > self.maximum_channel_length:\n return False\n\n for prefix in self['chantypes']:\n if channel_name.startswith(prefix):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new FlowFrame with copy of this one's data. The copy will not be linked to the same FCS file.
def copy(self, ID=None): if ID is None: match = re.match(r'^(.*-copy)(\d*)$', self._ID) if match is not None: ID = match.group(1) + str(int(match.group(2) or 1) + 1) else: ID = self._ID + '-copy' return FlowFrame(self.data.copy(), ID=ID)
[ "def flow_new(self, flow, frame):\n pass", "def clone(self, new_fsource):\n return Stream(self._identifier, new_fsource, self._name, self._size, self._stream_type)", "def __copy__(self):\n return NodeFlows(self.T, self.f_to, self.f_from, self.ratio, self.intra)", "def copy(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new FlowFrame from a subset of this one's events. The data is copied (changing the new data wont' affect the old) and the new FlowFrame won't be linked to the same FCS file.
def filter(self, which, **kwargs): # Get data (if lazy loading, want to avoid accessing this attribute # twice) data = self.data # Get filtered data frame df = data.iloc[which] # Some indexing methods return a *view* on the original data, meaning # changes to one will affect the other. We don't want thi...
[ "def flow_new(self, flow, frame):\n pass", "def copy_events(self, fsock, input, new_wgt):\n \n\n new_wgt = self.get_fortran_str(new_wgt)\n old_line = \"\"\n nb_evt =0 \n for line in open(input):\n if old_line.startswith(\"<event>\"):\n nb_evt+=1\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
cast to str instead of HttpUrl model instance
def url_to_string(cls, v, values, **kwargs): return str(v)
[ "def to_url(value):\n return str(value)", "def test_model(self):\n url = Urls('https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have-done-these-few-things-e5aaac451942', 'http://172.104.63.163/n4lm9')\n self.assertEqual(url.long,'https://blog.gds-gov.tech/terragrunt-in-retro-i-would-have...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the matrix of doc into the feature matrix of length
def get_features(docs, max_length): docs = list(docs) Xs = numpy.zeros((len(docs), max_length), dtype='int32') for i, doc in enumerate(docs): j = 0 for token in doc: vector_id = token.vocab.vectors.find(key=token.orth) if vector_id >= 0: Xs[i, j] = vec...
[ "def get_feature_matrix(self) -> torch.Tensor:\n if self.feature_extractor.doc_features:\n x_docs = []\n for doc in sorted(self.iterate_indexed_docs(), key=lambda doc: doc.doc_id):\n # TODO: support more features\n # TODO: use DictVectorizer\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add two numbers of different bases and return the sum
def flexibase_add(str1, str2, base1, base2): n1 = base_to_int(str1, base1) n2 = base_to_int(str2, base2) #result = int_to_base(tmp, base1) return n1+n2
[ "def flexibase_add(str1, str2, base1, base2):\n result = int_to_base(tmp, base1)\n return result", "def add(b1, b2):\n n1=bin_to_dec(b1)\n n2 = bin_to_dec(b2)\n b_sum = dec_to_bin(n1+n2)\n return b_sum", "def add_numbers(a,b):\r\n return a+ b", "def add_wo_carry(n1, n2):\n l1 = [int(x) for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new drink.
def drinks_new(): return render_template('drinks_new.html', drink={})
[ "def add_drink(self, drink_name, beans_cost, water_cost, milk_cost):\r\n self.available_drinks.append(Drinks(drink_name,\r\n beans_cost,\r\n water_cost,\r\n milk_cost\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a new drink.
def drinks_submit(): drink = { 'name': request.form.get('name'), 'price': request.form.get('price'), 'description': request.form.get('description'), 'images': request.form.get('images').split() } drink_id = drinks_collection.insert_one(drink).inserted_id return redirect(u...
[ "def stock_submit():\n item = {\n 'title': request.form.get('title'),\n 'description': request.form.get('description'),\n 'cost': request.form.get('cost')\n }\n stock.insert_one(item)\n return redirect(url_for('stock_index'))", "def test_create_one_drink(self):\n\n response...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show a single drink.
def drinks_show(drink_id): drink = drinks_collection.find_one({'_id': ObjectId(drink_id)}) return render_template('drinks_show.html', drink=drink)
[ "def viewDrink(bar_id, drink_id):\n drink = session.query(Drink).filter_by(id=drink_id).one()\n return render_template('view_drink.html', drink=drink)", "def drink_detail_view(request, pk):\n try:\n drink = Drink.objects.get(pk=pk)\n except Drink.DoesNotExist:\n return redirect(to=revers...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the edit form for a drink.
def drinks_edit(drink_id): drink = drinks_collection.find_one({'_id': ObjectId(drink_id)}) return render_template('drinks_edit.html', drink=drink)
[ "def editDrink(bar_id, drink_id):\n editedDrink = session.query(Drink).filter_by(id=drink_id).one()\n if editedDrink.user_id != login_session['user_id']:\n flash('You are not authorized to edit this drink!')\n return redirect(url_for('showMenu', bar_id=bar_id))\n if request.method == 'POST':\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit an edited drink.
def drinks_update(drink_id): updated_drink = { 'name': request.form.get('name'), 'price': request.form.get('price'), 'description': request.form.get('description'), 'images': request.form.get('images').split() } drinks_collection.update_one( {'_id': ObjectId(drink_id)...
[ "def editDrink(bar_id, drink_id):\n editedDrink = session.query(Drink).filter_by(id=drink_id).one()\n if editedDrink.user_id != login_session['user_id']:\n flash('You are not authorized to edit this drink!')\n return redirect(url_for('showMenu', bar_id=bar_id))\n if request.method == 'POST':\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert pydantic object to pandas dataframe with 1 row.
def to_df(self): return pd.DataFrame([dict(self)])
[ "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_dataframe(self):\n return pd.DataFrame(self.to_dict())", "def to_df(self) -> pd.DataFrame:\n return self.stream(DataFrame())", "def to_dataframe(self) -> pd.DataFrame:\n # noinspection PyTypeChecker\n return sup...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the entropic regularized wasserstein barycenter of distributions A (Adapted from ot.bregman.barycenter_sinkhorn() function. Only difference is that this function returns transport plans in addition to barycenter)
def barycenter_sinkhorn(A, M, reg, weights=None, numItermax=1000, stopThr=1e-4, verbose=False, log=False): if weights is None: weights = np.ones(A.shape[1]) / A.shape[1] else: assert(len(weights) == A.shape[1]) if log: log = {'err': []} # M = M/np.media...
[ "def barycenter(particles, w, n_particles, particles_sr=np.zeros(0)):\n # Calculate covariances\n covs = np.zeros((n_particles, 2, 2))\n covs[:, 0, 0] = particles[:, 2]\n covs[:, 0, 1] = particles[:, 3]\n covs[:, 1, 0] = particles[:, 3]\n covs[:, 1, 1] = particles[:, 4]\n\n # Calculate Barycent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the internal _parse_image_meta methode Feed it an 'orphan' image as we get it from from imgadm list j
def test_parse_image_meta_orphan(image_orphan): ret = {"Error": "This looks like an orphaned image, image payload was invalid."} assert _parse_image_meta(image_orphan, True) == ret
[ "def test_list_image_metadata(self):\n pass", "def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the internal _parse_image_meta methode Feed it an 'native' image as we get it from from imgadm list j
def test_parse_image_meta_native(image_native): ret = { "description": ("A SmartOS image pre-configured for building pkgsrc packages."), "name": "pkgbuild", "os": "smartos", "published": "2018-04-09T08:25:52Z", "source": "https://images.joyent.com", "version": "18.1.0...
[ "def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the internal _parse_image_meta methode Feed it an 'lx' image as we get it from from imgadm list j
def test_parse_image_meta_lx(image_lx): ret = { "description": ( "Container-native Ubuntu 16.04 64-bit image. Built to run on " "containers with bare metal speed, while offering all the " "services of a typical unix host." ), "name": "ubuntu-16.04", ...
[ "def test_list_image_metadata(self):\n pass", "def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the internal _parse_image_meta methode Feed it an 'zvol' image as we get it from from imgadm list j
def test_parse_image_meta_zvol(image_zvol): ret = { "description": ( "Ubuntu 18.04 LTS (20180808 64-bit). Certified Ubuntu Server " "Cloud Image from Canonical. For kvm and bhyve." ), "name": "ubuntu-certified-18.04", "os": "linux", "published": "2018-...
[ "def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the internal _parse_image_meta methode Feed it an 'docker' image as we get it from from imgadm list j
def test_parse_image_meta_docker(image_docker): ret = { "description": ( "Docker image imported from " "busybox42/zimbra-docker-centos:latest on " "2019-03-23T01:32:25.320Z." ), "name": "busybox42/zimbra-docker-centos:latest", "os": "linux", ...
[ "def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return valid filename for image. If there are multiple files in the database they append it with ',{number}'. For example image.png,0. This method will put the `number` between the filename and the extension. Thus 'image.png,0' becomes 'image0.png'. Returns str Converted filename.
def get_filename(self) -> str: fname = self.url.split("/")[-1] if "," in fname: _fname, _i = fname.split(",") _split_fname = _fname.split(".") _name = _split_fname[0] _extension = _split_fname[-1] return _name + _i + "." + _extension el...
[ "def filename(self) -> str:\n return \"{:0>8}.jpg\".format(self.id)", "def imId2name(self, im_id):\n \n if isinstance(im_id, int):\n name = str(im_id).zfill(self.STR_ID_LEN) + '.jpg'\n elif isinstance(im_id, str):\n name = im_id + '.jpg'\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads image from url. Returns Image PIL Image object. Source
def from_url(self) -> PngImagePlugin.PngImageFile: response = requests.get(self.url) img = Image.open(BytesIO(response.content)) return img
[ "def load_image(url):\n\tfd = urllib2.urlopen(url)\n\treturn StringIO.StringIO(fd.read())", "def download_pil_image(self, url):\r\n return Image.open(urlopen(url))", "def fetch_image(url: str) -> Image.Image:\n r = httpx.get(url)\n if not r.status_code == httpx.codes.OK:\n raise HTTPExceptio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reduce the amount of whitespace around an image.
def reduce_whitespace(self, border: int = 5) -> None: if self.img is None: raise FileExistsError("Load an image first with from_url.") pix = np.asarray(self.img) pix = pix[:, :, 0:3] # Drop the alpha channel idx = np.where(pix - 255)[0:2] # Drop the color when finding edg...
[ "def alt_trim_whitespace(image, delta=80):\n width, height = image.size\n\n x1 = left_line(image, delta)\n x2 = right_line(image, delta)\n y1 = top_line(image, delta)\n y2 = bottom_line(image, delta)\n\n # y1 = max(y1 - 10, 0)\n # y2 = min(y2 + 10, height)\n # x1 = max(x1 - 10, 0)\n # x2 ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When creating a log pass all essential information about the instance of the climb, where the style will need to be corrected.
def __init__(self, date: dt_date, style: str, partners: list, notes: str, climb: Climb): self._date = date self._styles = { 'Lead RP': 'read point', 'AltLd O/S': 'onsight', 'Solo O/S': 'onsight', 'Lead rpt': 'no log', 'Lead O/S':...
[ "def __init__(self, logarea):\n self.logarea = logarea\n if not logarea:\n return\n logarea.tag_config('info', foreground='green')\n logarea.tag_config('cmd', foreground='blue')\n logarea.tag_config('output', foreground='grey')\n logarea.tag_config('error', foreg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the found style is not in the dictionary of styles then add the style to the dictionary.
def add_style_to_styles(self, style_key: str, style_value: str): self._styles[style_key] = style_value
[ "def get_or_add_style(document, style_name, style_type):\n # `get_by_id` returns default style if a style with defined name not found\n\n try:\n style_id = document.styles._get_style_id_from_name(style_name, style_type)\n return document.styles.get_by_id(style_id, style_type), False\n except ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Correct the UKC style to a more readable type of style, using the already created list of styles. If the style isn't in the already created dictionary then ask the user what style it is, then add.
def match_style(self, input_style: str) -> str: try: # Try to get from the dictionary return self.get_style_from_styles(input_style) except KeyError: # If you get a key error, it is not in the dictionary new_style = input(input_style + '\nWhat style is this?') # Ask the us...
[ "def _set_style(style):\n if isinstance(style, (str, dict)):\n return Style(style)\n elif isinstance(style, Style):\n return style\n else:\n return Style()", "def style(style_def):\n if not style_def:\n return {}\n if isinstance(style_def, dict):\n return style_de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the first sunday of the given month of the given year. >>> GetFirstSundayOfMonth(2016, 2) 7 >>> GetFirstSundayOfMonth(2016, 3) 6 >>> GetFirstSundayOfMonth(2000, 1) 2
def GetFirstSundayOfMonth(year, month): weeks = calendar.Calendar().monthdays2calendar(year, month) # Return the first day in the first week that is a Sunday. return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0]
[ "def first_saturday_on_month(today_date=None):\n today_date = today_date or date.today()\n first_day_of_month = date(today_date.year, today_date.month, 1)\n month_range = calendar.monthrange(today_date.year, today_date.month)\n delta = (calendar.SATURDAY - month_range[0]) % 7\n return first_day_of_mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the approximate build date given the specific build type. >>> GetBuildDate('default', datetime.datetime(2016, 2, 6, 1, 2, 3))
def GetBuildDate(build_type, utc_now): day = utc_now.day month = utc_now.month year = utc_now.year if build_type != 'official': first_sunday = GetFirstSundayOfMonth(year, month) # If our build is after the first Sunday, we've already refreshed our build # cache on a quiet day, so just use that day. ...
[ "def GetBuildDate(build_type, utc_now):\n day = utc_now.day\n month = utc_now.month\n year = utc_now.year\n if build_type != 'official':\n first_sunday = GetFirstSundayOfMonth(year, month)\n # If our build is after the first Sunday, we've already refreshed our build\n # cache on a quiet day, so just us...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List current checks on given repo ref.
async def list(app: AppIdentity, repo: str, ref: str): repo = RepoName.parse(repo) async with aiohttp.ClientSession( headers=await app.installation_headers(repo.owner)) as sesh: fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref) print(await fetch.execute(sesh))
[ "def list(self, repo, ref, user=None):\n return self._get(\n self.make_request('statuses.list', user=user, repo=repo, ref=ref))", "def list_refs(refname = None):\r\n argv = ['git', 'show-ref', '--']\r\n if refname:\r\n argv += [refname]\r\n p = subprocess.Popen(argv, preexec_fn =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a sorted list of stops, sorted by distance from the given point.
def get_stops_sorted( latitude, longitude ): returnvalue = [] stops_file = open( 'google_transit/stops.txt' ) stops_iter = DictReader( stops_file ) for stop in stops_iter: distance = angular_distance( latitude, longitude, float( stop[ 'stop_lat' ] ), float( stop[ 'stop_lon' ])) stop[ 'distance' ] = dis...
[ "def sortDistance(self, point = (-1, -1)):\n return FeatureSet(sorted(self, key = lambda f: f.distanceFrom(point)))", "def order_by_distance(center, points):\n pass", "def sort_points(point, cloud):\n minsq = [distance_point_point_sqrd(p, point) for p in cloud]\n return sorted(zip(minsq, cloud, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the angular distance between two points
def angular_distance( lat1, lon1, lat2, lon2 ): pi_180 = pi / 180 return acos( cos( lat1 * pi_180 ) * cos( lon1 * pi_180 ) * cos( lat2 * pi_180) * cos( lon2 * pi_180 ) + cos( lat1 * pi_180) * sin( lon1 * pi_180 ) * cos( lat2 * pi_180) * sin( lon2 * pi_180 ) + sin( lat1 * pi_180 ) * sin( lat2 * pi_180 ))
[ "def angular_dist(p1, p2): # theta1, theta2, phi1, phi2):\n return np.arccos(p1['coszen'] * p2['coszen'] + p1['sinzen'] * p2['sinzen'] * np.cos(p1['az'] - p2['az']))", "def angular_distance(r1, d1, r2, d2):\n\n if np.isreal(r1):\n r1u = u.deg\n else:\n r1u = u.hourangle\n\n if np.isreal(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the close image to this button.
def __add_icon_to_button(self): self.set_relief(gtk.RELIEF_NONE) icon_box = gtk.HBox(False, 0) image = gtk.Image() image.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU) settings = gtk.Widget.get_settings(self) width, height = gtk.icon_size_l...
[ "def click_close_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['close'])", "def make_close_button (self):\n debug (\"In VizObject::make_close_button ()\")\n but = Tkinter.Button (self.root, text=\"Close\", underline=0,\n command=self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run redmapper on a single healpix pixel. This method will check if files already exist, and will skip any steps that already exist. The border radius will automatically be calculated based on the richest possible cluster at the lowest possible redshift. All files will be placed in self.config.outpath (see self.__init__...
def run(self): # need to think about outpath # Make sure all files are here and okay... if not self.config.galfile_pixelized: raise ValueError("Code only runs with pixelized galfile.") self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_componen...
[ "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run runcat on a single healpix pixel. All files will be placed in self.config.outpath (see self.__init__)
def run(self): if not self.config.galfile_pixelized: raise ValueError("Code only runs with pixelized galfile.") self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True) # Compute the border size ...
[ "def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run zmask on a single healpix pixel. This method will check if files already exist, and will skip any steps that already exist. The border radius will automatically be calculated based on the richest possible cluster at the lowest possible redshift. All files will be placed in self.config.outpath (see self.__init__)
def run(self): if not self.config.galfile_pixelized: raise ValueError("Code only runs with pixelized galfile.") self.config.check_files(check_zredfile=False, check_bkgfile=True, check_parfile=True, check_randfile=True) # Compute the border size ...
[ "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the bor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run zscan on a single healpix pixel. All files will be placed in self.config.outpath (see self.__init__)
def run(self): if not self.config.galfile_pixelized: raise ValueError("Code only runs with pixelized galfile.") self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True) # Compute the border size ...
[ "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True,\n check_parfile=True, check_randfile=True)\n\n # Compute the bor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load df to TD Table
def load_td_table(tab_df_list, if_exists='append'): try: dest_table, dataframe, client = tab_df_list if dataframe.empty: print(f'Table {dest_table} has no new data to load...') else: # Converting 'NaN' to NULL dataframe = dataframe.where(pd.notnull(datafra...
[ "def load_df(df, table_name, dbapi='sqlite:///insurance.db', \n if_exists='replace'):\n con = create_engine(dbapi, echo=False)\n print('Inserting {} rows into {}'.format(len(df.index), table_name))\n df.to_sql(table_name, con=con, if_exists=if_exists, index=False)", "def import_from_dataframe(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From complete_routes.txt generated by fetch_mbta_routes, outputs list of possible mbta route_ids
def mbta_route_list(): f = open('complete_routes.txt', 'r') complete_routes = ast.literal_eval(f.read()) #creates list of all route_ids in MBTA system subway_route_list = [] for x in range(len(complete_routes['mode'])): if complete_routes['mode'][x]['mode_name'] == 'Subway': for...
[ "def get_routes():\n\n return Db().get_line_ids()", "def get_active_routes():\n\n # call api to get all current routes\n #url = 'http://restbus.info/api/agencies/sf-muni/routes'\n url = 'http://webservices.nextbus.com/service/publicJSONFeed?command=routeList&a=sf-muni'\n r = requests.get(url)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }