query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns true if the current user is following the desired user
def is_following_by_username(self, id): return self.followed.filter(followers.c.followed_id == id).count() > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_following(self, user_id):\n return user_id in self.following", "def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False", "def is_following(self, user):\n return self.followed.filter_by(\n followed_id=user.id).first() is not None", "def is_following(self, user):\n return self.followed.filter(followers.c.followed_id == user.id).count() > 0", "def is_following(self, user):\n return self.followed.filter(followers.c.followed_id == user.id).count() > 0", "def is_follower(self, you, them):\n if self.filter(from_user=them, to_user=you).count() > 0:\n return True\n return False", "def is_followed_by(self, user):\n return self.followers.filter_by(\n follower_id=user.id).first() is not None", "def is_authenticated_user_following(self, username=None,id=None):\n if not self.is_authenticated:\n raise PicplzError(\"is_authenticated_user_following requires an authenticated API instance\")\n \n return None", "def user_follow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n if user_id in self._users_following.get(self.user_id, []):\n self.logger.debug(\"User %s already followed\", user_id)\n return False\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/create/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following.pop(self.user_id) # reset\n return result[\"friendship_status\"][\"following\"] is True", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def follows(self, other):\n\t\treturn self.followed.filter(followers.c.followed_by == other.id).count() > 0", "def follow_user(cls, user, following):\r\n pass", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def is_user_player(self, user):\n return self.user == user", "async def is_following(self, TargetId: int):\n\n data = {\"targetUserIds\": [TargetId]}\n e = await self.request.request(url=f'https://friends.roblox.com/v1/user/following-exists',\n method='post',\n data=data)\n return e['followings'][0]['isFollowing']", "def follow_user(cls, user, following):\n pass", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "def is_following(source_id, destination_id):\n if Forward.objects.filter(source_id=source_id,\n destination_id=destination_id):\n return True\n return False", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def author_following(self):\n\t\tpass", "def follow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 0:\n self.users_hat[following_index, user_index] = 1\n elif self.is_verbose():\n self.log(f\"User {following_index} was already following user {user_index}\")", "def user_playlist_is_following(self, playlist_id, user_ids, **kwargs):\n return self._get(\n API.PLAYLIST_FOLLOWERS_CONTAINS.value.format( # pylint: disable=no-member\n playlist_id=playlist_id\n ),\n ids=\",\".join(user_ids),\n **kwargs,\n )", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def _has_following_ratio_of(self, user: _InstagramUser, ratio: float) -> bool:\n follower_count = len(self._user_follower_info(uid=user.uid))\n following_count = len(self._user_following_info(uid=user.uid))\n\n if follower_count == 0:\n return True\n\n return (following_count / follower_count) > ratio", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def checkIfUserIsCurrent(self,userId : str) -> bool:\n\n if userId == userId[0]:\n return True\n else:\n return False", "def is_user_playing(self, user):\n return user in self.active_games", "def followed_by(self):\r\n return relationships.FollowedBy(self)", "def doesfollow(user):\n return jsonify({\n 'follows': isFollowed(g.user,user)\n })", "def followed_by(self):\n return relationships.FollowedBy(self)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def user_is_same(current_user_number, past_user_numbers):\n if current_user_number == past_user_numbers:\n return True\n else:\n return False", "def ref_user_flag(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return True\n except ObjectDoesNotExist:\n return False", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def test_following_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def is_user_change_required(self):\n return self.__running_user != self.__desired_user", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def following(self):\n return self.data.get(\"following\")", "def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active", "def is_me(self, m):\n return m.author == self.client.user", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def is_user_already_followed(followed_user_id, user_id):\n result = Follow.objects.filter(followed_user=followed_user_id,\n user=user_id).exists()\n return result", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False", "def was_followed(sender, instance, created, **kwargs):\n\n sendr = User.objects.get(id=instance.user_id)\n followed = User.objects.get(id=instance.followed_user_id)\n if created:\n notify.send(sender=sendr, recipient=followed, verb='followed',\n description=\"{} followed you.\".format(sendr.username))", "def is_visible_to(self, user):\n return True", "def user_unfollow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/destroy/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following[self.user_id].pop(user_id, None)\n return result[\"friendship_status\"][\"following\"] is False", "def get_is_self(self, obj: Profile) -> bool:\n request: HttpRequest = self.context.get('request')\n if request:\n if request.user.is_authenticated:\n return obj == request.user.profile\n return False", "def save_following(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':following:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n following = u.following(first=100, after=end_cursor)\n else:\n following = u.following(first=100)\n if not following:\n return False\n while True:\n if following['data']['user']['following']['edges']:\n index = ''.join(['gh_following-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubFollowing',\n document=following,\n login=user.login,\n path=path)\n has_next_page = following['data']['user']['following']['pageInfo']['hasNextPage']\n end_cursor = following['data']['user']['following']['pageInfo']['endCursor']\n if has_next_page:\n following = u.following(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':following:endCursor']), end_cursor)\n break\n else:\n break\n\n return True", "def test_person_is_following(self):\n person = Person.objects.create(\n username='tom', email='tom@gmail.com', password='fake_password'\n )\n self.assertFalse(person.is_following(1))\n show = FollowedShows.objects.create(\n user=person,\n show_name='show1',\n show_id=1,\n air_days='monday, tuesday',\n air_time='10:00',\n summary='summary here',\n network='network here'\n )\n self.assertTrue(person.is_following(show.show_id))", "def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False", "def test_user_already_followed(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response2 = self.client.post(self.follow_url, format='json')\n self.assertEqual(response2.content,\n b'{\"detail\": {\"error\": \"user already followed\"}}')\n self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)", "def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True", "def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def follow_user(self:'InstaClient', user:str, nav_to_user:bool=True):\n # Check User Vadility\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n # Navigate to User Page\n self._nav_user(user, check_user=False)\n \n if self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.MESSAGE_USER_BTN))):\n # User already followed\n pass\n else:\n follow_button = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.FOLLOW_BTN)), url=ClientUrls.NAV_USER.format(user))\n self._press_button(follow_button)\n profile.requested_by_viewer = True\n return profile", "def logged_in(self):\n return self.user is not None", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def is_participant(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.participants:\n return True\n return False", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def accept_follow_request(request):\n username = request.POST[\"user\"]\n requester = get_user_from_username(request.user, username)\n\n try:\n follow_request = models.UserFollowRequest.objects.get(\n user_subject=requester, user_object=request.user\n )\n except models.UserFollowRequest.DoesNotExist:\n # Request already dealt with.\n return redirect(request.user.local_path)\n follow_request.accept()\n\n return redirect(request.user.local_path)", "def can_update_user(cls, db_tuple, target, actor):\n return target.user_id == actor.user_id", "def test_following(self):\n\n follow1 = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/following\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)", "def following_and_storing(self, user_obj):\n if self.following(user_obj['user']):\n self.monitored_users.append({'user': user_obj['user'], 'username': user_obj['username'],\n 'followDate': datetime.now().timestamp()})", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def user_appears(self, user):\n pass", "def follow(self, follower, followee):\n pass", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def test_user_follow_self(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.post(self.follow_self_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def follows(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def following_changed(sender, action, instance, *args, **kwargs):\n\n # m2mchanged.connect specified in apps.py\n\n following = instance.following.all()\n creator = instance.user\n\n if creator in following:\n raise ValidationError (\"can't like own post\")", "def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False", "def is_active_user(self):\n\n return self.is_active", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def author_is_me(message: discord.Message) -> bool:\n return message.author == config.bot.user", "def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()", "def test_get_list_of_following_users_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)", "def user_has_access(self, user):\n if self.visibility == self.PUBLIC:\n return True\n elif self.visibility == self.PRIVATE and self.created_by == user:\n return True\n elif self.visibility in (self.ORG_ONLY, self.ORG_ONLY_NO_EXTERNAL):\n if user.external and self.visibility == self.ORG_ONLY_NO_EXTERNAL:\n return False\n elif self.organization.memberships.filter(user=user).count() >= 1:\n return True\n return False", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def is_active(self):\n return self.status == ACTIVE_USER", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def is_leader(self):\n return self.__is_leader", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "def can_be_viewed_by(self,user):\n return True", "def show_following(user_id):\n\n\n user = User.query.get_or_404(user_id)\n return render_template('users/following.html', user=user)", "def test_follow_manually_approved(self):\n activity = {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": \"https://example.com/users/rat/follows/123\",\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": \"https://example.com/user/mouse\",\n }\n\n self.local_user.manually_approves_followers = True\n self.local_user.save(\n broadcast=False, update_fields=[\"manually_approves_followers\"]\n )\n\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n views.inbox.activity_task(activity)\n\n # notification created\n notification = models.Notification.objects.get()\n self.assertEqual(notification.user, self.local_user)\n self.assertEqual(notification.notification_type, \"FOLLOW_REQUEST\")\n\n # the request should exist\n request = models.UserFollowRequest.objects.get()\n self.assertEqual(request.user_subject, self.remote_user)\n self.assertEqual(request.user_object, self.local_user)\n\n # the follow relationship should not exist\n follow = models.UserFollows.objects.all()\n self.assertEqual(list(follow), [])", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def follows(self):\r\n return relationships.Follows(self)", "def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True" ]
[ "0.7629707", "0.75313014", "0.74600714", "0.7424218", "0.7424218", "0.73141986", "0.7265916", "0.7074228", "0.7049804", "0.6913712", "0.6912288", "0.682332", "0.6789204", "0.6776134", "0.67695856", "0.67149085", "0.65792125", "0.6536938", "0.6522163", "0.6520679", "0.6476891", "0.64407915", "0.6424886", "0.63474077", "0.63474077", "0.63367516", "0.6318876", "0.63111293", "0.6305456", "0.6287174", "0.62690854", "0.6268289", "0.62357277", "0.61720717", "0.6105842", "0.6089034", "0.6071211", "0.60619164", "0.60472775", "0.6033834", "0.6024139", "0.5991299", "0.5983718", "0.5971912", "0.5963616", "0.59623706", "0.59582907", "0.59547925", "0.5938019", "0.5934896", "0.593432", "0.5921433", "0.5916775", "0.59057456", "0.5865513", "0.5857874", "0.58383226", "0.5822138", "0.5812362", "0.5787508", "0.57803625", "0.5776359", "0.5769329", "0.57536346", "0.57487625", "0.57467365", "0.5730519", "0.5728986", "0.5726049", "0.5725832", "0.57164764", "0.5713108", "0.57070947", "0.57057047", "0.5704866", "0.5685519", "0.56850195", "0.56831187", "0.5681126", "0.5679174", "0.5675616", "0.567203", "0.567203", "0.567203", "0.5668842", "0.56632", "0.5659024", "0.5655514", "0.56492716", "0.563577", "0.5634555", "0.56317145", "0.56135416", "0.56096953", "0.5607123", "0.55957603", "0.5591321", "0.5585159", "0.55849856", "0.55842966" ]
0.70594585
8
Returns the username of the desired user
def get_username_by_id(self, id): return User.query.get(id).username
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def username(self) -> str:", "def username(self) -> str:", "def get_username(self):\r\n return self.username", "def get_username(self):\n return self.username", "def get_username(self, request):\r\n try:\r\n return request.user.username\r\n except AttributeError:\r\n return ''", "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def username(self):\n return self.user.username", "def get_username(self):\n return str(getattr(self, self.USERNAME_FIELD))", "def get_username(self, obj):\n return obj.user.username", "def get_username(self, obj):\n return obj.user.username", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()", "def GetUsername(self):\n pass", "def username(self, instance):\r\n return instance.user.username", "def get_username(self) -> str:\n return self._username", "def _get_username():\n username = request.args.get(\"username\")\n if not username:\n raise NoUserError()\n else:\n return username", "def get_username(self):\n raise NotImplementedError('get_username')", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def get_username(self):\r\n raise NotImplementedError", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def _get_user_name(self):\n if self.runtime.get_real_user is None:\n return 'staff'\n else:\n return self.runtime.get_real_user(self.runtime.anonymous_student_id).username", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> undefined.UndefinedOr[str]:", "def username(self) -> str:\n raise NotImplementedError", "def username(self):\n return self._username()", "def getUserName(self):\n userType = self.env['res.users']\n \n uiUser = userType.browse(self._uid)\n return uiUser.name", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def getUsername(self):\n\t\treturn self.Username.lower()", "def usernameFind(self):\r\n return self.username()", "def username(self) :\n\t\ttry :\n\t\t\treturn self._username\n\t\texcept Exception as e:\n\t\t\traise e", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )", "def username(self, inst):\r\n return inst.user.username", "def user_name(self):\n return self._user_name", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def username(user_id):\n return UserIndex.instance().name(user_id)", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return self._username", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def get_username(self, tg_user_id):\n\n data = {\n 'user_id': tg_user_id\n }\n result = self._send_data('getUser', data)\n if result.update:\n return result.update.get('username','')", "async def get_user_name(self, user_target: str) -> str:\n user = await self.get_user(user_target=user_target)\n if user is None:\n return user_target\n return user.display_name", "def get_username(self):\n return self.browser.find_element(*locators.USER_NAME_TEXT).text", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "def username(self):\n return self._username", "def username(self):\n return self._username", "def username(self):\n return self._username", "def username(self):\n return self._query_config()['username']", "def get_username_for_active_connection(self):\n user_info = self.get_user_info()\n return getattr(user_info, 'user_name', None)", "def get_username():\n\n if session.get(\"user_id\") is None:\n username = \"\"\n else:\n user_id = session.get(\"user_id\")\n user = User.query.filter(User.id==user_id).first()\n username = user.username\n\n return username", "def GetUsername(self):\n return self._username", "def user_name(self):\n\n return self._user_name", "def username(self):\n if self._username is not None:\n return self._username\n # Try to get a username from the userprofile\n try:\n self._username = self.userprofile.user.username\n except UserProfile.DoesNotExist:\n # User profile does not exist\n return None\n return self._username", "def username(self) -> Optional[str]:\n return self._state.get(\"username\", None)", "def get_user_name(self):\n\t\treturn call_sdk_function('PrlLic_GetUserName', self.handle)", "def get_username():\r\n return get_creds(CREDS_FILE)[1]", "def _get_username(self):\n name = self._get_username_from_cookies()\n if name:\n return name\n if self._oauth and self._login_info[0]:\n return self._login_info[0]\n return self._get_username_from_api()", "def get(self, username):\n return username", "def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()", "def username():\n login = 0\n return pwd.getpwuid(os.getuid())[login]", "def _get_username(user_id):\n username = select(u.username for u in UserInformationData if u.user_id == user_id).first()\n\n return username", "def user_name():\n\n # Theoretically, we should be using Kerberos principal name for this.\n # However, Python Kerberos API bindings (both kerberos and krb5 modules)\n # are broken to the extent that one does not return the username, and other\n # has sad API, so we have to use other venues.\n\n if \"ATHENA_USER\" in os.environ:\n return os.environ[\"ATHENA_USER\"]\n return getpass.getuser()", "def display_name(self) -> str:\n return self.requester.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_username(self):\n if not self.is_valid():\n return None\n try:\n # NOTE: all emails stored in lower-case\n email = self.clean_email().lower()\n return User.objects.get(email=email).username\n except User.DoesNotExist:\n pass\n return None", "def getName(self):\n return self.__username", "def user_name(self):\n return self._stub.List(self._message).user_name", "def get_username(self, partner):\n user = PartnerUserDetails.objects.filter(partner_id=partner).first()\n if user:\n return RedUser.objects.get(pk=user.user_id).username\n else:\n return False", "def username(self):\n return self._authenticator.username()", "def username(self) -> str:\n return self.get_env_var(self.username_var)", "def username(self) -> str:\n return self.get_env_var(self.username_var)", "def username(self, login_failures):\n return login_failures.user.username", "def get_user_name(_cache_user) -> str:\n try:\n return _cache_user[\"preferred_username\"]\n except KeyError:\n return \"Testing\"\n except TypeError:\n return \"Testing\"", "def user_name(self):\n return lamin_user_settings().name" ]
[ "0.8169498", "0.8169498", "0.8081674", "0.8035194", "0.8018373", "0.7989936", "0.79892623", "0.798525", "0.79793316", "0.79793316", "0.79586726", "0.79586726", "0.79586726", "0.79513943", "0.7948783", "0.7941232", "0.79404753", "0.79382294", "0.7898637", "0.7894659", "0.7890763", "0.7881293", "0.78777444", "0.7849664", "0.7847574", "0.7841217", "0.77928585", "0.7759054", "0.7745586", "0.7745586", "0.7745586", "0.7739494", "0.7737782", "0.7735171", "0.7725124", "0.7721014", "0.77176565", "0.77106047", "0.7710496", "0.7702446", "0.7660066", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76334083", "0.76308435", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76215947", "0.76207906", "0.76151025", "0.7600418", "0.7578446", "0.7578446", "0.7576719", "0.7576719", "0.7576719", "0.75663656", "0.7558788", "0.75521713", "0.75491214", "0.7546499", "0.75420517", "0.75390714", "0.7533986", "0.7496638", "0.74916476", "0.747556", "0.74719274", "0.74301636", "0.74193925", "0.7416751", "0.74100786", "0.7399403", "0.7399403", "0.7399403", "0.7388507", "0.7379602", "0.73728526", "0.7360349", "0.7346212", "0.7325293", "0.7325293", "0.7317342", "0.73054206", "0.72758657" ]
0.7294774
99
Gets the list of banks.
def get_banks() -> List[BankDetails]: from paynlsdk.api.transaction.getbanks import Request client = APIClient() request = Request() client.perform_request(request) return request.response.banks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def get_list() -> List[BankDetails]:\n from paynlsdk.client.transaction import Transaction\n return Transaction.get_banks().banks", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def list_brands(self, **kwargs):\n url = self.api_url('brands')\n\n return requests.get(\n url,\n headers=self.auth_header,\n params=kwargs,\n ).json()", "def bank(self):\n return self.random_element(self.banks)", "def get_selected_banks(self):\n banks = [self.mainwindow.bplr_bank1_checkbox,\n self.mainwindow.bplr_bank2_checkbox,\n self.mainwindow.bplr_bank3_checkbox,\n self.mainwindow.bplr_bank4_checkbox]\n\n checked_banks = []\n pointer = 0\n\n # Check which banks were chosen, and add them to a list\n for obj in banks:\n if obj.checkState():\n checked_banks.append(self.banks_ids[pointer])\n pointer += 1\n\n # If no banks have been selected, raise a ValueError\n if not checked_banks:\n raise ValueError\n\n return checked_banks", "def getBoogies(self):\n return self.boogies", "def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def get_banks():\n\n\tbank_list = []\n\tbank_obj = lib.operations.bank.get_bank_list()\n\n\tfor obj in bank_obj:\n\t\tcheckbox = \"<div class='checkbox'>\" \\\n\t\t \"<label><input name='checkbox' type='checkbox' id='bankBox' value='{id}'></label>\" \\\n\t\t \"</div>\".format(id=obj.id)\n\t\tbtn_group = \"<div class='btn-group'>\" \\\n\t\t \"<button type='button' id='bankAdd' class='btn btn-success btn-flat' onclick='loadModal(this,{idCol})'>\" \\\n\t\t \"<i class='fa fa-plus'></i>\" \\\n\t\t \"</button>\" \\\n\t\t \"<button type='button' id='bankDel' class='btn btn-danger btn-flat'><i class='fa fa-trash'></i>\" \\\n\t\t \"</button>\" \\\n\t\t \"<button type='button' id='bankRef' class='btn btn-warning btn-flat'><i class='fa fa-refresh'></i>\" \\\n\t\t \"</button>\" \\\n\t\t \"<button type='button' id='bankUpd' class='btn btn-info btn-flat' onclick='loadModal(this,{idCol})'>\" \\\n\t\t \"<i class='fa fa-reply'></i>\" \\\n\t\t \"</button></div>\".format(idCol=obj.id)\n\t\trow = dict(checkbox=checkbox, id=obj.id, name=obj.name, city=obj.city, address=obj.address, options=btn_group)\n\t\tbank_list.append(row)\n\n\treturn send_result(bank_list, status=\"True\", total=len(bank_obj))", "def tank_names(self):\n return self._tanks", "def getBrickList(self):\n return self._bricks", "def get_list_of_bbs(self):\n return self.mfp.get_list_of_bbs()", "def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck", "def bank(self) -> str:\n return self.random_element(self.banks)", "def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repos/{self.repo}/branches'\n response = self._get_request(branches_endpoint)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n return None\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['displayId']\n } for branch in branches_page['values']\n ]", "def MatchListToRanks(self):\n return self.ListToRanks(self.matchList)", "def query_ram_banks(sudo: bool = False, **_) -> t.List[t.Mapping[str, t.Any]]:\n try:\n xml_root = parse_lshw(sudo=sudo)\n except subprocess.TimeoutExpired:\n return []\n except subprocess.CalledProcessError:\n return []\n except FileNotFoundError:\n return []\n except ET.ParseError:\n return []\n nodes = xml_root.findall('.//node')\n _LOG.debug('%i nodes', len(nodes))\n ram_banks = []\n for node in nodes:\n node_id = node.attrib['id']\n _LOG.debug('%s', node_id)\n if not node_id.startswith('bank'):\n continue\n ram_banks.append(query_ram_bank(node))\n return ram_banks", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made", "def get_branches(self):\n\n # gets all branches in repository\n branches_endpoint = f'/repositories/{self.owner}/{self.repo}/refs/branches'\n filter_param = {'fields': 'values.name'}\n response = self._get_request(branches_endpoint, filter_param)\n # guard condition\n if response.status_code != STATUS_CODE_OK:\n raise BitbucketRequestSenderExc(\n f'Invalid parameter(s) in: owner: {self.owner},'\n f' repo: {self.repo}')\n # deserialize\n branches_page = response.json()\n\n return [\n {\n 'name': branch['name']\n } for branch in branches_page['values']\n ]", "def _fetch_bills(self, options):\n bill_count = options['max'] or fetch.DEFAULT_BILL_COUNT\n return fetch.bills(per_page=bill_count)", "def get_bank(self):\n return self._bank", "def get_blists(self):\n return self.blists[:]", "def balances(self):\n\t\tif self._session:\n\t\t\treturn self._session.get_account().get('balances', [])\n\n\t\treturn []", "def test_retrieve_all_by_bank(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"\",\n interaction=\"Bank\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n # Gets 288 entries back, because one for each of the 144\n # cards, plus the unconditioned version of each\n self.assertEquals(len(card_stats), 288)\n\n self.assertEquals(card_stats[0]['card_name'], 'Adventurer')\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def tanks(self):\n return self._node_reg.tanks", "def get_budgets(self) -> list:\n return list(self.budgets.values())", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe", "def get_bank_accounts(self):\n spec = {'owner': DBRef(self.collection_name, self._id)}\n return BankAccount.collection.find(spec)", "def index():\n from managers import banks_manager, rates_manager\n\n # acquiring number of all banks in DB\n banks_count = banks_manager.get_banks_count()\n\n # acquiring last update time\n updated = rates_manager.get_last_update_time()\n last_update = None if updated is None else updated.strftime('%Y %d %b, %H:%M')\n\n # acquiring list of banks with best USD selling rate\n usd_selling_rate = rates_manager.get_usd_selling_min_rate()\n usd_selling_banks = banks_manager.get_usd_selling_best_rate_banks()\n\n # acquiring list of banks with best EUR selling rate\n eur_selling_rate = rates_manager.get_eur_selling_min_rate()\n eur_selling_banks = banks_manager.get_eur_selling_best_rate_banks()\n\n # acquiring list of banks with best USD buying rate\n usd_buying_rate = rates_manager.get_usd_buying_max_rate()\n usd_buying_banks = banks_manager.get_usd_buying_best_rate_banks()\n\n # acquiring list of banks with best EUR buying rate\n eur_buying_rate = rates_manager.get_eur_buying_max_rate()\n eur_buying_banks = banks_manager.get_eur_buying_best_rate_banks()\n\n # initializing banks data map\n data_map = {\n 'usd_selling_rate': usd_selling_rate,\n 'eur_selling_rate': eur_selling_rate,\n 'usd_buying_rate': usd_buying_rate,\n 'eur_buying_rate': eur_buying_rate,\n 'usd_selling_banks': usd_selling_banks,\n 'eur_selling_banks': eur_selling_banks,\n 'usd_buying_banks': usd_buying_banks,\n 'eur_buying_banks': eur_buying_banks\n }\n return render_template(\"index.html\", title='Home', banks_count=banks_count, last_updated=last_update, data=data_map)", "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def list(self, request, *args, **kwargs):\n queryset = BankConnections.objects.filter(user=self.request.user)\n \n response = [{\n \"connected\": True if connection.isTokenValid else False,\n \"bank\": connection.bank_branch.bank.id\n } for connection in queryset]\n\n return Response(response)", "def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))", "def get_vendor_bills(self, count: int = 10) -> list:\n return list(\n itertools.islice(self.client.vendor_bills.get_all_generator(), count)\n )", "def list_branches(self) -> List[str]:\n self.__verify_repo_initialized()\n branches = heads.get_branch_names(self._env.branchenv)\n return branches", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def notreturnedbanks(self):\n notreturned = []\n for bank in self.signedin():\n if not bank.returned():\n notreturned.append(bank)\n return notreturned", "def account_balances(self):\n return self.get('balances', auth=True)", "def get_balances(self):\r\n balances = self.api.find(\"tokens\", \"balances\", query={\"account\": self.account})\r\n return balances", "def scrape_billboard(self):\n\t\t# url of hot 100 on billboard\n\t\turl = \"http://www.billboard.com/charts/hot-100\"\n\t\thttp = urllib3.PoolManager()\n\t\trequest_main = http.request('GET', url)\n\t\tsoup_main = BeautifulSoup(request_main.data, \"lxml\")\n\t\tblocks = soup_main.find_all(\"div\", class_=\"chart-row__title\")\n\t\tfor i, block in enumerate(blocks):\n\t\t\ttitle = block.find(\"h2\", class_=\"chart-row__song\").text\n\t\t\ta_tag = block.find(\"a\")\n\t\t\tartist = None\n\t\t\tif a_tag is not None: \n\t\t\t\tartist = a_tag.text.lstrip().rstrip() \n\t\t\telse: \n\t\t\t\tartist = block.find(\"span\").text.lstrip().rstrip()\n\t\t\tpTitle, pArtist = self.process_title(title), self.process_artist(artist)\n\t\t\tself._songs.append(Song(pTitle, pArtist, i+1))", "def bands(self):\n\t\treturn self._bands", "def get_bank(self):\n return self._i2c_read(_BANK_ADDRESS)", "def list_all_branches(self) -> dict:\n try:\n branches_response = self.repo.get_branches()\n branches_list = []\n for branch in branches_response:\n branches_list.append(branch.raw_data.get('name'))\n return make_success_response(200, branches_list)\n except GithubException as github_exc:\n return make_error_response(github_exc.status, github_exc.data)", "def get_bbands(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.BBANDS(data)\n if result is None:\n raise IndicatorException\n return result", "def getBalances (self):\n\n return [self.nodes[i].getbalance () for i in range (2)]", "def tank_name_list(self):\n return list(self._node_reg.tank_names)", "def list_branches(self) -> PagingList[Branch]:\n return PagingList(lambda offset, limit: self._generate_branches(None, offset, limit), 128)", "def bands(self):\n return self._bands", "def get_fb_ind_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball individual rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT player_id, first_name, last_name, \\\nnickname FROM player\")\n players = cursor.fetchall()\n\n for player_id, first_name, last_name, nickname in players:\n cursor.execute(\"SELECT fb_offense_rating, fb_defense_rating FROM \\\nplayer WHERE player_id = {0}\".format(player_id))\n offense_rating, defense_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(offense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n offense_rank = float(mu) - (3 * float(sigma))\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(defense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n defense_rank = float(mu) - (3 * float(sigma))\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_winner = {0}\".format(player_id))\n offense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_winner = {0}\".format(player_id))\n defense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_loser = {0}\".format(player_id))\n offense_lose_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_loser = {0}\".format(player_id))\n defense_lose_count = cursor.fetchone()[0]\n\n intermediate_rank = (first_name, last_name, nickname,\n 'Offense', round(offense_rank, 4), offense_win_count,\n offense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n intermediate_rank = (first_name, last_name, nickname,\n 'Defense', round(defense_rank, 4), defense_win_count,\n defense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def get_budgets(self) -> list:\n return self.budget_manager.get_budgets()", "def get_all_stocks():\n url = r\"https://brapi.ga/api/quote/list\"\n response = requests.get(url)\n return [stock[\"stock\"] for stock in response.json()[\"stocks\"]]", "def branches(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'branches')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_branches(self, repo_name, cred_hash):\n branch_names = []\n\n url = f'{self.code_cloud_api.branch_api}/{repo_name}/branches?start=0&limit=30'\n response = self.code_cloud_api.get(url=url, cred_hash=cred_hash)\n if not response['status']:\n return response\n \n for item in response.get('data', {}).get('values', {}):\n branch_names.append(item.get('displayId', ''))\n\n return {'status': True, 'data': branch_names}", "def list(self, request, *args, **kwargs):\n return super(BalanceBillsViewSet, self).list(\n request,\n *args,\n **kwargs\n )", "def getBuddies(self, start=None, length=10):\n if start == None:\n start = len(self.achievements)\n doc = minidom.parse(urllib.urlopen(\"%s/rest/users/buddies/%s/%i/%i\" % (serverString, self.name, start, length)))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n for element in doc.getElementsByTagName(\"buddy\"):\n name = element.getElementsByTagName(\"name\")[0].firstChild.data\n id = element.getElementsByTagName(\"id\")[0].firstChild.data\n self.buddies += [Author(name, id)]", "def get_all_habits(self):\n return self.habits", "def bank_account_bban(self):\n return self.__bank_account_bban", "def list(self, request, *args, **kwargs):\n return super(BalanceHistoryBillsViewSet, self).list(\n request,\n *args,\n **kwargs\n )", "def returnBalances(self):\n pass", "def branches(self):\r\n url = '{0}/branches/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_all_boards():\n return [board for board in GRAPH_DB.find(\"board\")]", "def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)", "def get_banned(self):\n return self.execute(TABELLE['id_users']['select']['banned'])", "def test_fetch_banks_length(self):\n banks = self.api.fetch_banks()\n bank = banks[0]\n\n self.assertGreater(len(banks), 0)\n self.assertIn('id', bank)\n self.assertIn('code', bank)\n self.assertIn('name', bank)", "def get(self):\n return {'bills': [bill.json() for bill in BillModel.find_all()]}", "def _balances(self) -> Dict[str, int]:\n\n return self.client.get(self._resources(\"balance\"))", "def bands(self):\n if self._bands is None:\n self._bands = self._compute_bands()\n return self._bands", "def get_next_objective_banks(self, n=None):\n if n > self.available():\n # !!! This is not quite as specified (see method docs) !!!\n raise IllegalState('not enough elements available in this list')\n else:\n next_list = []\n x = 0\n while x < n:\n try:\n next_list.append(next(self))\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n x = x + 1\n return next_list", "def _sub_bags(self) -> List[Bag]:\n return [Bag(name, amount) for name, amount in self.rules[self.name].items()]", "def get_bus_list():\n\n\tbuses = db.session.query(Bus.bus_name).all()\n\n \n\treturn buses", "def ranks(cls):\n ranked = []\n for team in sorted(dbsession.query(cls).order_by(desc(cls.money)).all()):\n if not team.locked:\n ranked.append(team)\n return ranked", "def get_user_rankings(self, req):\n return msgs.UserRanks(ranks=[user.getRank() for user in models.User.query().fetch()])" ]
[ "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.76620173", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.6498705", "0.6401011", "0.63876766", "0.6261225", "0.6159119", "0.6064427", "0.60622555", "0.60158557", "0.59559435", "0.5937878", "0.59333014", "0.59222174", "0.5918928", "0.5913982", "0.5898898", "0.5898898", "0.5898898", "0.5898898", "0.5898898", "0.5898898", "0.5898898", "0.5895676", "0.58878624", "0.58547187", "0.5826322", "0.5817494", "0.57561815", "0.5749588", "0.5743868", "0.57165265", "0.57165265", "0.57165265", "0.57165265", "0.57165265", "0.57165265", "0.57165265", "0.5693034", "0.5654393", "0.5642607", "0.5635654", "0.5635232", "0.5628946", "0.5615819", "0.5614886", "0.5614886", "0.5614886", "0.5614886", "0.5614886", "0.5614886", "0.5614886", "0.55965024", "0.5591252", "0.5579065", "0.55778027", "0.5575743", "0.55734897", "0.55475366", "0.5529067", "0.5526248", "0.5485021", "0.5472338", "0.5457923", "0.54406947", "0.543927", "0.54247355", "0.54115033", "0.5404555", "0.5398249", "0.5390522", "0.5388992", "0.5378353", "0.5373642", "0.536161", "0.53596157", "0.5354793", "0.53537524", "0.5350712", "0.5347277", "0.5342832", "0.533502", "0.53200793", "0.52983075", "0.5288203", "0.5279747", "0.5278561" ]
0.7611785
8
Refund (part of) a transaction
def refund(transaction_id: str, amount: int=None, description: str=None, process_date: datetime=None): return Transaction.refund_response(transaction_id, amount, description, process_date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refund_payment(self, **kwargs):", "def refund_payment(self, **kwargs):", "def refund(self, amount_in_cents=None):\r\n self.require_item()\r\n\r\n url = self.get_url()\r\n params = base.get_params(('amount_in_cents',), locals())\r\n if params:\r\n url = url + '?' + http.urlencode_any(params)\r\n\r\n request = http.Request('DELETE', url)\r\n\r\n return request, parsers.parse_empty", "def refund(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('refund_charge', params)", "def refund(self, callerReference, transactionId, refundAmount=None,\r\n callerDescription=None):\r\n params = {}\r\n params['CallerReference'] = callerReference\r\n params['TransactionId'] = transactionId\r\n if(refundAmount != None):\r\n params['RefundAmount'] = refundAmount\r\n if(callerDescription != None):\r\n params['CallerDescription'] = callerDescription\r\n \r\n response = self.make_request(\"Refund\", params)\r\n body = response.read()\r\n if(response.status == 200):\r\n rs = ResultSet()\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs\r\n else:\r\n raise FPSResponseError(response.status, response.reason, body)", "def __refund_entry(self, entry):\n\n buyin = entry.contest_pool.prize_structure.buyin\n bm = BuyinManager(entry.user)\n transaction = None\n\n # Create a cash or ticket deposit as a refund,\n # based on what the user used to get into the contest\n if bm.entry_did_use_ticket(entry):\n tm = TicketManager(entry.user)\n tm.deposit(buyin)\n transaction = tm.transaction\n refund = self.__create_refund(transaction, entry)\n else:\n ct = CashTransaction(entry.user)\n ct.deposit(buyin)\n transaction = ct.transaction\n refund = self.__create_refund(transaction, entry)\n\n # Create refund transaction from escrow\n escrow_ct = CashTransaction(self.get_escrow_user())\n escrow_ct.withdraw(buyin, trans=transaction)\n return refund", "def refund_payment(self, payment, reason=None):\n self.refund(payment, payment.amount, reason)\n payment.state = 'REFUND'\n payment.save()", "def refund(payment: Payment, amount: Decimal, **connection_params):\n error = check_payment_supported(payment=payment)\n capture_txn = payment.transactions.filter(\n kind=TransactionKind.CHARGE, is_success=True).first()\n\n if error:\n response = get_error_response(amount)\n elif capture_txn is not None:\n razorpay_client = get_client(**connection_params)\n razorpay_amount = get_amount_for_razorpay(amount)\n try:\n response = razorpay_client.payment.refund(\n capture_txn.token, razorpay_amount)\n clean_razorpay_response(response)\n except RAZORPAY_EXCEPTIONS as exc:\n error = get_error_message_from_razorpay_error(exc)\n response = get_error_response(amount)\n else:\n error = errors.ORDER_NOT_CHARGED\n response = get_error_response(amount)\n\n transaction = _generate_transaction(\n payment=payment, kind=TransactionKind.REFUND, **response)\n return transaction, error", "def refund(refund, bucket=None):\n if not isinstance(refund, _Refund):\n raise TypeError(\"The Refund must be of type Refund\")\n\n if refund.is_null():\n return _TransactionRecord()\n\n if bucket is None:\n bucket = _login_to_service_account()\n\n # return value from the credit to debit accounts\n debit_account = _Account(uid=refund.debit_account_uid(),\n bucket=bucket)\n credit_account = _Account(uid=refund.credit_account_uid(),\n bucket=bucket)\n\n # remember that a refund debits from the original credit account...\n # (and can only refund completed (DIRECT) transactions)\n debit_note = _DebitNote(refund=refund, account=credit_account,\n bucket=bucket)\n\n # now create the credit note to return the value into the debit account\n try:\n credit_note = _CreditNote(debit_note=debit_note,\n refund=refund,\n account=debit_account,\n bucket=bucket)\n except Exception as e:\n # delete the debit note\n try:\n debit_account._delete_note(debit_note, bucket=bucket)\n except:\n pass\n\n # reset the transaction to its original state\n try:\n _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.DIRECT,\n bucket=bucket)\n except:\n pass\n\n raise e\n\n try:\n paired_notes = _PairedNote.create(debit_note, credit_note)\n except Exception as e:\n # delete all records...!\n try:\n debit_account._delete_note(debit_note, bucket=bucket)\n except:\n pass\n\n try:\n credit_account._delete_note(credit_note, bucket=bucket)\n except:\n pass\n\n # reset the transaction to the pending state\n try:\n _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.DIRECT,\n bucket=bucket)\n except:\n pass\n\n raise e\n\n # now record the two entries to the ledger. The below function\n # is guaranteed not to raise an exception\n return Ledger._record_to_ledger(paired_notes, refund=refund,\n bucket=bucket)", "def undo_transaction(self):\n transaction = self.context\n entries = transaction.entries()\n\n # check if we can undo\n if not transaction.canUndoOrReverse():\n raise AccessControl_Unauthorized('No permission to create transactionentries, or there are no entries to reverse')\n \n # force a remove from the balances and update the references\n for transactionEntry in entries:\n transactionEntry.removeTransactionEntryFromAccount()\n\n # remove transaction\n transaction.getTransactionFolder().manage_delObjects(ids=transaction.getId())", "def refunded(payment_id):\n EpayPayment = apps.get_model('epay', 'EpayPayment')\n epay = PaymentProcessor.epay\n with transaction.atomic():\n epay_payment = EpayPayment.objects.select_related('payment').get(payment_id=payment_id)\n payment = epay_payment.payment\n epay.refund(\n payment_id, payment.amount, epay_payment.approval_code,\n epay_payment.reference, currency=payment.currency)\n\n payment.change_status(\"cancelled\")\n\n return epay_payment", "def cancel_stripe(self):\n TransactionLog = Pool().get('payment_gateway.transaction.log')\n\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n\n stripe.api_key = self.gateway.stripe_api_key\n\n try:\n charge = stripe.Charge.retrieve(\n self.provider_reference\n ).refund(idempotency_key=('refund_%s' % self.uuid))\n except (\n stripe.error.InvalidRequestError,\n stripe.error.AuthenticationError, stripe.error.APIConnectionError,\n stripe.error.StripeError\n ), exc:\n TransactionLog.serialize_and_create(self, exc.json_body)\n else:\n self.state = 'cancel'\n self.save()\n TransactionLog.create([{\n 'transaction': self,\n 'log': unicode(charge),\n }])", "def refundMarketOrder(self, marketOrder):\n try:\n mySystem = self.systems[marketOrder.system]\n if marketOrder.type == 'sell':\n # refund system resource\n mySystem.modifyResource(marketOrder.value, marketOrder.amount)\n else:\n # refund empire credits\n mySystem.payResources(-(marketOrder.amount * marketOrder.max),0,0,0)\n \n return 1\n except:\n return 'galaxy->refundMarketOrder error'", "def test_refund_with_bank_refund(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(650), A(0), A(0))], D(650))\n refund_jobs([(self.job, A(50), A(0))])\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 650\n ), # invoice debit (600) + refund debit (50) = total debited (650)\n invoiced=A(600), # invoice debit (600) = total invoiced (600)\n paid=A(-600), # payment credit (-650) + refund (50) = paid (-600)\n credited=A(\n -650\n ), # payment credit (-650) + adjustment (0) = credited (-650)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n )", "def cancel_order(self, **kwargs):\n return self.client.execute(\"order/refund\", \"POST\", kwargs)", "def refund(self, contest, force=False, admin_force=False):\n self.validate_arguments(contest)\n\n if admin_force == False:\n #\n if contest.gpp == True:\n return\n # if its already been cancelled, we cant do it again\n if contest in HistoryContest.objects.all():\n raise ContestCanNotBeRefunded()\n\n # if we are not forcing the refund, then check if the contest is live first\n if not force:\n\n if contest not in LiveContest.objects.all():\n raise ContestCanNotBeRefunded()\n\n #\n # get all entries for a contest\n entries = Entry.objects.filter(contest=contest)\n\n #\n # For all entries create a refund transaction and deposit the\n # cash or ticket back into the user's account\n for entry in entries:\n self.__refund_entry(entry)\n\n #\n # after all set the contest to cancelled\n contest.status = Contest.CANCELLED\n contest.save()", "def RefundOrder(capture_id, refund_amount=0, currency_code=\"EUR\"):\n\tsale = Sale.find(capture_id)\n\n\trefund = sale.refund({\n\t\"amount\": {\n\t\t\"total\": refund_amount,\n\t\t\"currency\": currency_code\n\t}\n\t})\n\n\tif refund.success():\n\t\tprint(\"Refund[%s] Success\" % (refund.id))\n\t\treturn True # Return True if the Refund was successfull\n\telse:\n\t\tprint(refund.error)\n\t\treturn False # Return False if the Refund failed", "def test_refund_with_applied_refund(self):\n # Invoice 700.00\n debit_jobs(\n [(self.job, A(680), Entry.WORK_DEBIT), (self.job2, A(20), Entry.WORK_DEBIT)]\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(-20),\n promised=A(0),\n debited=A(\n 680\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 680\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n promised=A(0),\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(20), A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 680\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-680), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )", "def create_refundtransfer_for(self, transfer):\n lock = transfer.lock\n\n if lock.hashlock not in self.our_state.locked:\n raise ValueError('Unknow hashlock')\n\n locked_transfer = self.create_lockedtransfer(\n lock.amount,\n lock.expiration,\n lock.hashlock,\n )\n\n cancel_transfer = locked_transfer.to_refundtransfer()\n\n return cancel_transfer", "def rollback_transaction(self, event=None):\n assert self._current_transaction\n\n # Store stacks\n undo_stack = list(self._undo_stack)\n\n erroneous_tx = self._current_transaction\n self._current_transaction = None\n try:\n with Transaction(self.event_manager):\n try:\n erroneous_tx.execute()\n except Exception as e:\n logger.error(\"Could not roolback transaction\")\n logger.error(e)\n finally:\n # Discard all data collected in the rollback \"transaction\"\n self._undo_stack = undo_stack\n\n self._action_executed()", "def test_refund_with_applied_refund_and_bank_refund(self):\n # Invoice 600.00\n debit_jobs(\n [(self.job, A(580), Entry.WORK_DEBIT), (self.job2, A(20), Entry.WORK_DEBIT)]\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n one = A(n=\"-0.01\", t=\"0.01\")\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(\"-120\") + one,\n promised=A(-100) + one,\n debited=A(\n 580\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n promised=A(-100) + one,\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n partial=A(700).net_amount,\n tax=A(700).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(120) - one, A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-580), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n promised=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n partial=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )", "def rollback(self, project_id, transaction):\n request_pb = _datastore_pb2.RollbackRequest(\n project_id=project_id, transaction=transaction\n )\n # Response is empty (i.e. no fields) but we return it anyway.\n return _rpc(\n self.client._http,\n project_id,\n \"rollback\",\n self.client._base_url,\n self.client._client_info,\n request_pb,\n _datastore_pb2.RollbackResponse,\n )", "def abort(self, transaction):\n raise NotImplementedError", "def rollback(self):\n raise TransactionRollback('rollback called outside of transaction')", "def initiate_refund(self, order: Order) -> OrderRefund:\n raise NotImplementedError", "def rollback(self) -> None:\n if self._transaction is None:\n pass\n else:\n self._transaction.rollback(_to_root=True)", "def abort_transaction(self) -> None:\n pass", "def abort(self):\n if self.transaction:\n token = self.transaction\n self.transaction = None\n self.client.abort(self.creds, token, self.environment)", "def Rollback(self, request, global_params=None):\n config = self.GetMethodConfig('Rollback')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Rollback(self, request, global_params=None):\n config = self.GetMethodConfig('Rollback')\n return self._RunMethod(\n config, request, global_params=global_params)", "def rollback_transaction(self):\n cursor = self._cursor()\n cursor.close()\n self._db.rollback()\n self._end_transaction()", "def _Dynamic_Rollback(self, transaction, transaction_response,\n request_id=None):\n transaction.set_app(self.project_id)\n\n try:\n del self.__tx_actions[transaction.handle()]\n except KeyError:\n pass\n\n self._RemoteSend(transaction, transaction_response, \"Rollback\", request_id)\n \n return transaction_response", "def refund(self, cr, uid, ids, context=None):\n clone_list = []\n line_obj = self.pool.get('pos.order.line')\n \n for order in self.browse(cr, uid, ids, context=context):\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))\n\n clone_id = self.copy(cr, uid, order.id, {\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'session_id': current_session_ids[0],\n 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'parent_id': order.id,\n }, context=context)\n clone_list.append(clone_id)\n\n for clone in self.browse(cr, uid, clone_list, context=context):\n for order_line in clone.lines:\n print order_line.available_qty\n line_obj.write(cr, uid, [order_line.id], {\n 'return_qty': 0.0,\n 'qty': -(order_line.parent_id.available_qty),\n }, context=context)\n\n abs = {\n 'name': _('Return Products'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'res_id':clone_list[0],\n 'view_id': False,\n 'context':context,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n }\n return abs", "def rollback_block(self, block):\n # TODO: 0.007-12c changes\n current_length = self.db.get('length')\n if block['length'] != current_length:\n # Block is not at the top the chain\n return False\n\n for tx in block['txs']:\n tx_owner_address = tools.tx_owner_address(tx)\n owner_account = self.get_account(tx_owner_address)\n if tx['type'] == 'mint':\n owner_account['amount'] -= tools.block_reward(block['length'])\n self.db.put(tx_owner_address, owner_account)\n elif tx['type'] == 'spend':\n owner_account['amount'] += tx['amount']\n owner_account['count'] -= 1\n owner_account['tx_blocks'].remove(block['length'])\n\n receiver_account = self.db.get(tx['to'])\n receiver_account['amount'] -= tx['amount']\n receiver_account['tx_blocks'].remove(block['length'])\n\n self.db.put(tx_owner_address, owner_account)\n self.db.put(tx['to'], receiver_account)", "def refund_unmatched_entry(self, entry):\n\n # This entry should be one that was not matched, make sure it is not in a contest.\n if entry.contest is not None:\n raise UnmatchedEntryIsInContest(entry)\n\n # Refund the entry.\n return self.__refund_entry(entry)", "def abort_transaction(self,xid):\n modlogger.debug( \"abort:%s\"%xid)\n opid = self.new_opid()\n xaction = AbortTxOperation(opid,xid)\n self._add_operation(xid,xaction)\n try:\n self.tx.rollback()\n finally:\n self.tx = None", "def ConcludeTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def exit_transaction():\n _state.transactions = max(get_transactions() - 1, 0)", "def payment_cancel(request):\r\n\ttransaction = Transaction.objects.get(pk=request.GET['pid'])\r\n\tgame = transaction.game\r\n\ttransaction.delete()\r\n\tprint(\"about to call redirect\")\r\n\treturn redirect('/' + str(game.id))", "def cancel_unstake(self, trx_id):\r\n contract_payload = {\"txID\":trx_id}\r\n json_data = {\"contractName\":\"tokens\",\"contractAction\":\"cancelUnstake\",\r\n \"contractPayload\":contract_payload}\r\n tx = self.steem.custom_json(self.ssc_id, json_data, required_auths=[self.account])\r\n return tx", "def abort(payment):\n if isinstance(payment, resources.Payment):\n payment = payment.id\n\n http_client = HttpClient()\n response, __ = http_client.patch(routes.url(routes.PAYMENT_RESOURCE, resource_id=payment), {'abort': True})\n return resources.Payment(**response)", "def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)", "def tpc_abort(self, transaction):\n raise NotImplementedError", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order", "def withdraw(self, amount):\n self.deposit(-amount)", "def test_initiate_refund_another_pending_refund(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n OrderRefundFactory(order=order, status=OrderRefundStatus.PENDING)\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n\n products = [\n {\n \"id\": \"123123123\",\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": 100,\n \"tax\": 24,\n \"price\": 100,\n \"type\": 1,\n }\n ]\n\n assert OrderRefund.objects.count() == 1\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ), mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ), pytest.raises(\n ValidationError\n ) as exception:\n payment_provider.initiate_refund(order)\n\n assert \"Cannot refund an order that has another pending refund\" in str(exception)\n assert OrderRefund.objects.count() == 1", "def unlink(self, cr, uid, ids, context=None):\n payenrich = self.read(cr, uid, ids, ['state'], context=context)\n for s in payenrich:\n if s['state'] not in ['draft', 'cancel']:\n raise osv.except_osv(_('Invalid Action Error'), _('In Order To Delete A Service Request Order(s), It Must Be Cancelled First!'))\n return super(payment_enrich, self).unlink(cr, uid, ids, context=context)", "def test_refund_with_applied_refund_and_bank_refund_and_recognized_revenue(self):\n\n # Invoice 600.00\n debit_jobs(\n [\n (self.job, A(580), Entry.WORK_DEBIT),\n (self.job2, A(20), Entry.WORK_DEBIT),\n ],\n recognize_revenue=True,\n )\n\n # Payment of 700.00 is incorrectly applied to first job\n credit_jobs([(self.job, A(700), A(0), A(0))], D(700))\n\n one = A(n=\"-0.01\", t=\"0.01\")\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(\"-120\") + one,\n debited=A(\n 580\n ), # invoice debit (680) + refund debit (0) = total debited (680)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-700), # payment credit (-700) + refund debit (0) = paid (-700)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(700, 0, 0),\n balance=A(20),\n debited=A(20), # invoice debit (20) + refund debit (0) = total debited (20)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(0), # payment credit (0) + refund debit (0) = paid (0)\n credited=A(0), # payment credit (0) + adjustment (0) = total credited (0)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )\n\n # Refund 20.00 from first job and apply to second job\n refund_jobs([(self.job, A(120) - one, A(0)), (self.job2, A(0), A(20))])\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n debited=A(\n 700\n ), # invoice debit (680) + refund debit (20) = total debited (700)\n invoiced=A(\n 580\n ), # invoice debit (680) + adjustment (0) = total invoiced (680)\n paid=A(-580), # payment credit (-700) + refund debit (20) = paid (-680)\n credited=A(\n -700\n ), # payment credit (-700) + adjustment (0) = total credited (-700)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n )\n\n self.assert_balances(\n bank=A(600, 0, 0),\n balance=A(0),\n debited=A(\n 20\n ), # invoice debit (20) + refund debit (20) = total debited (70)\n invoiced=A(20), # invoice debit (20) + adjustment (0) = total invoiced (20)\n paid=A(-20), # payment credit (-20) + refund debit (0) = paid (-20)\n credited=A(\n -20\n ), # payment credit (-20) + adjustment (0) = total credited (-20)\n income=A(600).net_amount,\n tax=A(600).tax_amount,\n switch_to_job=self.job2,\n )", "def destroy(self, request, *args, **kwargs):\n\n instance = self.get_object()\n user = instance.user\n refund_policy = None\n\n if self.request.user.is_staff:\n refund_policy = request.data.get('refund_policy', None)\n if self.request.user.id != user.id:\n cancel_reason = Reservation.CANCELATION_REASON_ADMIN_CANCELLED\n else:\n cancel_reason = Reservation.CANCELATION_REASON_USER_CANCELLED\n\n refund_data = instance.process_refund(cancel_reason, refund_policy)\n if refund_data:\n Reservation.send_refund_confirmation_email(refund_data)\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def withdraw(self, amount):\n self.balance -= amount", "def refund_cert_callback(sender, course_enrollment=None, **kwargs):\r\n\r\n # Only refund verified cert unenrollments that are within bounds of the expiration date\r\n if not course_enrollment.refundable():\r\n return\r\n\r\n target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')\r\n try:\r\n target_cert = target_certs[0]\r\n except IndexError:\r\n log.error(\"Matching CertificateItem not found while trying to refund. User %s, Course %s\", course_enrollment.user, course_enrollment.course_id)\r\n return\r\n target_cert.status = 'refunded'\r\n target_cert.refund_requested_time = datetime.now(pytz.utc)\r\n target_cert.save()\r\n target_cert.order.status = 'refunded'\r\n target_cert.order.save()\r\n\r\n order_number = target_cert.order_id\r\n # send billing an email so they can handle refunding\r\n subject = _(\"[Refund] User-Requested Refund\")\r\n message = \"User {user} ({user_email}) has requested a refund on Order #{order_number}.\".format(user=course_enrollment.user,\r\n user_email=course_enrollment.user.email,\r\n order_number=order_number)\r\n to_email = [settings.PAYMENT_SUPPORT_EMAIL]\r\n from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)\r\n try:\r\n send_mail(subject, message, from_email, to_email, fail_silently=False)\r\n except Exception as exception: # pylint: disable=broad-except\r\n err_str = ('Failed sending email to billing to request a refund for verified certificate'\r\n ' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\\n{exception}')\r\n log.error(err_str.format(\r\n user=course_enrollment.user,\r\n course=course_enrollment.course_id,\r\n ce_id=course_enrollment.id,\r\n order=order_number,\r\n exception=exception,\r\n ))\r\n\r\n return target_cert", "def rollback(self):\n conn = self.threadingLocal.connection\n if isinstance(conn, Transaction) and not conn._obsolete:\n self.threadingLocal.connection.rollback()", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def undo(checksum: str):\n if check_transaction(\"undo\", checksum) is False:\n abort(403)\n transaction = Transaction.query \\\n .filter(Transaction.undone == False) \\\n .order_by(Transaction.timestamp.desc()).first_or_404()\n # don't undo changes that happened more than 20s in the past\n if transaction.timestamp + timedelta(0, 20) < datetime.now():\n abort(404)\n transaction.undone = True\n db.session.add(transaction)\n db.session.commit()\n return \"ok\"", "async def test_refund_request(app, session, stan_server, event_loop, client_id, events_stan, future):\n # Call back for the subscription\n from account_mailer.worker import cb_subscription_handler\n\n # vars\n invoice_id = '1'\n events_subject = 'test_subject'\n events_queue = 'test_queue'\n events_durable_name = 'test_durable'\n\n # register the handler to test it\n await subscribe_to_queue(events_stan,\n events_subject,\n events_queue,\n events_durable_name,\n cb_subscription_handler)\n\n # add an event to queue\n mail_details = {\n 'identifier': 'NR 123456789',\n 'orderNumber': '1',\n 'transactionDateTime': '2020-12-12 14:10:20',\n 'transactionAmount': 50.00,\n 'transactionId': 'REG1234'\n }\n await helper_add_ref_req_to_queue(events_stan, events_subject, invoice_id=invoice_id, mail_details=mail_details)\n\n assert True # If no errors, we assumed test passed.\n\n # Test drawdown refund\n mail_details = {\n 'identifier': 'NR 123456789',\n 'orderNumber': '1',\n 'transactionDateTime': '2020-12-12 14:10:20',\n 'transactionAmount': 50.00,\n 'transactionId': 'REG1234',\n 'refunDate': '2000-01-01',\n 'bcolAccount': '12345',\n 'bcolUser': '009900'\n }\n await helper_add_ref_req_to_queue(events_stan, events_subject, invoice_id=invoice_id, mail_details=mail_details,\n pay_method='drawdown')", "def rollback(self):\n if self._transaction is None:\n raise TransactionNotStartedError(\"Cannot call rollback without a transaction\")\n else:\n def _resetTxn(result):\n self._transaction = None\n d = self._config.rollback(self._transaction)\n d.addCallback(_resetTxn)\n return d", "def rollback(self) -> None:\n with self.lock:\n self.wait(self._rollback_gen())", "def rollback(self):\n pass", "def endTransaction(self, transactionID: int) -> None:\n ...", "def make_payment(self, payment):\n self._balance -= payment", "def withdrawal(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(-amount)\n else:\n print('withdrawal error')", "def rollback_object(self, obj):\n attribute_manager.rollback(obj)\n try:\n self.dirty.remove(obj)\n except KeyError:\n pass\n try:\n self.deleted.remove(obj)\n except KeyError:\n pass", "def test_initiate_refund_refunded_amount_does_not_match(\n bambora_provider_base_config: dict, order: Order\n):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n place_price = order.total_price + 10\n\n products = [\n {\n \"id\": \"123123123\",\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": price_as_fractional_int(\n convert_aftertax_to_pretax(place_price, order.product.tax_percentage)\n ),\n \"tax\": int(order.product.tax_percentage),\n \"price\": price_as_fractional_int(place_price),\n \"type\": 1,\n }\n ]\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ), mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ), pytest.raises(\n RefundPriceError\n ) as exception:\n payment_provider.initiate_refund(order)\n\n assert (\n f\"The amount to be refunded ({currency_format(place_price)}) \"\n f\"does not match the amount paid ({currency_format(order.total_price)})\"\n ) in str(exception)\n assert not OrderRefund.objects.exists()", "def create(payment, **data):\n if isinstance(payment, resources.Payment):\n payment = payment.id\n\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.REFUND_RESOURCE, payment_id=payment), data)\n return resources.Refund(**response)", "def svn_fs_abort_txn(*args):\r\n return _fs.svn_fs_abort_txn(*args)", "def rollback(self):\r\n self.db.rollback()", "def revert(self, *args, **kwargs):", "def make_payment(self,amount):\n self._balance-=amount", "def reversed(payment_id):\n EpayPayment = apps.get_model('epay', 'EpayPayment')\n epay = PaymentProcessor.epay\n with transaction.atomic():\n epay_payment = EpayPayment.objects.select_related('payment').get(payment_id=payment_id)\n payment = epay_payment.payment\n epay.cancel(\n payment_id, payment.amount, epay_payment.approval_code,\n epay_payment.reference, currency=payment.currency)\n\n payment.change_status(\"cancelled\")\n\n return epay_payment", "def post(invoice_id):\n current_app.logger.info(f'<Refund.post : {invoice_id}')\n request_json = request.get_json(silent=True)\n try:\n valid_format, errors = schema_utils.validate(request_json, 'refund') if request_json else (True, None)\n if not valid_format:\n return error_to_response(Error.INVALID_REQUEST, invalid_params=schema_utils.serialize(errors))\n\n response = RefundService.create_refund(invoice_id, request_json)\n\n except BusinessException as exception:\n return exception.response()\n current_app.logger.debug(f'>Refund.post : {invoice_id}')\n return jsonify(response), HTTPStatus.ACCEPTED", "def RollBack(self):\r\n self.conn.rollback()", "def terminate(self, refund=None):\r\n self.require_item()\r\n\r\n url = '{0}/terminate'.format(self.get_url())\r\n params = {\r\n 'refund': refund if refund else 'none'\r\n }\r\n url = url + '?' + http.urlencode_any(params)\r\n\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty", "def rollback(self):\n raise NotImplementedError", "def cancel_or_refund_request(payload):\n response = requests.post(url, data=payload)\n return response.json()", "def withdraw(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] -= float(amount)\r\n self.balance -= float(amount)", "def rollback(self):\n self._rollback = True", "def subtract(self, amount: float, reason: str = \"\") -> \"Bank\":\n\n if amount == 0: # Pointless, do nothing.\n return 0\n\n self.__record_ledger__(-amount, reason)\n self.balance -= amount\n return self", "def cancel_account_payment(self, payment_txn, user):\n order = payment_txn.order\n with transaction.atomic():\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.save()\n\n order.order_status = Order.ORDER_CANCELLED\n order.payment_status = Order.PAYMENT_VOID\n order.shipping_status = Order.SHIPPING_NOT_REQUIRED\n order.updated_by = unicode(user)\n order.save()", "def rollback(self, snapshot):\n\n if not self.__settings:\n return\n\n _logger.debug('Performing rollback of data into '\n 'subsequent harvest period. Metric data and transaction events'\n 'will be preserved and rolled into next harvest')\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot, rollback=True)\n self._merge_synthetics_events(snapshot, rollback=True)\n self._merge_error_events(snapshot)\n self._merge_custom_events(snapshot, rollback=True)\n self._merge_span_events(snapshot, rollback=True)", "def retrieve(payment, refund_id):\n if isinstance(payment, resources.Payment):\n payment = payment.id\n\n http_client = HttpClient()\n response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, resource_id=refund_id, payment_id=payment))\n return resources.Refund(**response)", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def _undo(self):\n if not self._executed:\n raise TransactionNotExecuted(self)\n # The default implementation is to return the inverse of this\n # transaction.\n return Inverse(self)", "def refund_chq(self, refund_chq):\n\n self._refund_chq = refund_chq", "def transaction_delete(request, transaction_id, model_class=Transaction, template_name='budget/transactions/delete.html'):\n transaction = get_object_or_404(Transaction.active.all(), pk=transaction_id)\n if request.POST:\n if request.POST.get('confirmed'):\n transaction.delete()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n return render_to_response(template_name, {\n 'transaction': transaction,\n }, context_instance=RequestContext(request))", "def pay_fee(self, fee):\n self.wallet -= fee", "def unreturnbank(self):\n pass", "def reverse_transaction(self):\n portal_types = getToolByName(self.context, 'portal_types')\n\n transaction = self.context\n entries = transaction.entries()\n\n # check if there are transaction entries\n if not transaction.canUndoOrReverse():\n raise AccessControl_Unauthorized('No permission to create transactionentries, or there are no entries to reverse')\n\n # add the new reversal transaction\n transaction_folder = transaction.getTransactionFolder()\n new_transactionid = transaction_folder.generateUniqueId('Transaction')\n\n portal_types.constructContent('Transaction',\n transaction_folder,\n new_transactionid,)\n\n new_transaction = transaction_folder[new_transactionid]\n # create all the reverse transactions\n for transactionEntry in entries:\n entryid = new_transaction.generateUniqueId('TransactionEntry')\n portal_types.constructContent('TransactionEntry',\n new_transaction,\n entryid,)\n debit_credit = DEBIT\n if transactionEntry.getDebitCredit() == DEBIT:\n debit_credit = CREDIT\n new_transaction[entryid].edit(Account=transactionEntry.getAccount(),\n DebitCredit=debit_credit,\n Amount=transactionEntry.getAmount())\n\n self.request.response.redirect(new_transaction.absolute_url()+ '/view')", "def rollback(self):\n self.db.rollback()", "def Rollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def enqueue_refund(self, status, user, refund_reason=None,\n rejection_reason=None):\n from mkt.prices.models import Refund\n refund, c = Refund.objects.safer_get_or_create(contribution=self,\n user=user)\n refund.status = status\n\n # Determine which timestamps to update.\n timestamps = []\n if status in (mkt.REFUND_PENDING, mkt.REFUND_APPROVED_INSTANT,\n mkt.REFUND_FAILED):\n timestamps.append('requested')\n if status in (mkt.REFUND_APPROVED, mkt.REFUND_APPROVED_INSTANT):\n timestamps.append('approved')\n elif status == mkt.REFUND_DECLINED:\n timestamps.append('declined')\n for ts in timestamps:\n setattr(refund, ts, datetime.datetime.now())\n\n if refund_reason:\n refund.refund_reason = refund_reason\n if rejection_reason:\n refund.rejection_reason = rejection_reason\n refund.save()\n return refund", "def _rollback_context(self, persister):\n try:\n # Rollback the job transactional context.\n persister.rollback()\n\n except _errors.DatabaseError as error:\n _LOGGER.error(\n \"Error in %s rolling back job's context.\",\n self.__action.__name__, exc_info=error\n )\n\n # Update the job status.\n self.__result = False\n message = \"Tried to execute action ({0}).\".format(\n self.__action.__name__)\n self._add_status(Job.ERROR, Job.COMPLETE, message, True)\n\n # Finish context which means mark the job as finished\n # and update procedure's information.\n self._finish_context(False)", "def rollback(self):\n self.conn.rollback()", "def rollback(self):\n self.success = False\n self.close()", "def withdraw_money(transaction):\n conn = create_connection(database)\n\n sql = ''' UPDATE card\n SET balance = balance - ?\n WHERE number = ?'''\n\n with conn:\n cur = conn.cursor()\n cur.execute(sql, transaction)\n\n conn.commit()", "def test_initiate_refund_success(bambora_provider_base_config: dict, order: Order):\n request = RequestFactory().request()\n order.status = OrderStatus.PAID\n order.lease.status = LeaseStatus.PAID\n order.lease.save()\n order.save()\n\n OrderToken.objects.create(\n order=order, token=\"98765\", valid_until=now() - relativedelta(hours=1)\n )\n valid_token = OrderToken.objects.create(\n order=order, token=\"12345\", valid_until=now() - relativedelta(days=7)\n )\n if hasattr(order.product, \"price_for_tier\"):\n place_price = order.product.price_for_tier(order.lease.berth.pier.price_tier)\n area = order.lease.berth.pier.harbor\n else:\n # Winter products are priced per m2\n place_price = rounded(\n order.product.price_value\n * order.lease.place.place_type.width\n * order.lease.place.place_type.length,\n )\n area = order.lease.place.winter_storage_section.area\n\n products = [\n {\n \"id\": get_talpa_product_id(order.product.id, area, False),\n \"product_id\": 1123,\n \"title\": order.product.name,\n \"count\": 1,\n \"pretax_price\": price_as_fractional_int(\n convert_aftertax_to_pretax(place_price, order.product.tax_percentage)\n ),\n \"tax\": int(order.product.tax_percentage),\n \"price\": str(price_as_fractional_int(place_price)),\n \"type\": 1,\n }\n ]\n\n payment_provider = create_bambora_provider(bambora_provider_base_config, request)\n with mock.patch(\n \"payments.providers.bambora_payform.requests.post\",\n side_effect=mocked_refund_response_create,\n ) as mock_call, mock.patch(\n \"payments.providers.bambora_payform.BamboraPayformProvider.get_payment_details\",\n side_effect=mocked_refund_payment_details(products=products),\n ):\n refund = payment_provider.initiate_refund(order)\n\n assert refund.refund_id == \"123456\"\n assert refund.order == order\n assert refund.status == OrderRefundStatus.PENDING\n assert refund.amount == order.total_price\n\n args = mock_call.call_args.kwargs.get(\"json\")\n assert (\n args.get(\"order_number\")\n == f\"{order.order_number}-{valid_token.created_at.timestamp()}\"\n )", "def rollback_action(args, kwargs, was_interrupted, result=None):\n raise NotImplementedError()", "def transferfunds(self):", "def _do_rollback(self):\n self.backend.rollback()" ]
[ "0.8025384", "0.8025384", "0.72545916", "0.7059862", "0.6983477", "0.69578123", "0.6895562", "0.68423915", "0.65162545", "0.64691937", "0.6408193", "0.6375899", "0.63735026", "0.6373101", "0.6268878", "0.6206176", "0.61452", "0.6065676", "0.606385", "0.60265565", "0.5964484", "0.59596354", "0.5942356", "0.5896965", "0.58796966", "0.5820681", "0.58144075", "0.57555", "0.56643736", "0.56643736", "0.5630709", "0.56230015", "0.5621838", "0.56120396", "0.5577625", "0.5553781", "0.5530479", "0.55229753", "0.5519964", "0.55189615", "0.55144036", "0.5490747", "0.54850864", "0.54833037", "0.5442767", "0.5441117", "0.5432171", "0.5428056", "0.5418114", "0.5402348", "0.5396348", "0.53838825", "0.53772306", "0.5375173", "0.5364461", "0.53521276", "0.53501195", "0.5334987", "0.5329123", "0.53246355", "0.5314837", "0.52964264", "0.52854425", "0.52741486", "0.52580583", "0.52543074", "0.52458143", "0.5236213", "0.5232906", "0.5230857", "0.5228705", "0.5227218", "0.5226912", "0.5222822", "0.52211845", "0.5218666", "0.5200222", "0.51979315", "0.5197451", "0.5191122", "0.51898485", "0.51833296", "0.51832354", "0.5178602", "0.5178487", "0.51676327", "0.5167576", "0.5166881", "0.51540434", "0.5153867", "0.5153826", "0.5149501", "0.5149008", "0.513053", "0.512865", "0.5126713", "0.51158017", "0.51100063", "0.5109187", "0.51076174" ]
0.7412963
2
Show portfolio of stocks
def index(): rows=db.execute("SELECT * FROM portofolio WHERE user_id=:s",s=session["user_id"]) row=db.execute("SELECT * FROM users WHERE id=:s",s=session["user_id"]) overall=0 for line in rows: overall+=line["total"] overall+=row[0]["cash"] return render_template("portofolio.html",rows=rows,cash=usd(row[0]["cash"]),overall=usd(overall))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def index():\n # Use a place holder ':curr_id' to call the session id which is the user's id\n rows = db.execute(\"SELECT stocks.symbol, stocks.name, portfolio.shares FROM portfolio JOIN users ON users.id = portfolio.user_id JOIN stocks ON portfolio.stock_id = stocks.id WHERE users.id==:curr_id\", curr_id=session[\"user_id\"])\n # Make a select query only on cash to be able to display it in portfolio's table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:curr_id\", curr_id=session[\"user_id\"])\n\n # gets the current price of each stock queried\n if rows:\n for r in rows:\n r_shares = r[\"shares\"]\n r_symbol = r[\"symbol\"]\n # run lookup function to get current price\n dict_2 = lookup(r_symbol)\n # Adds the key \"price\" and its value to the dictionary \"rows\"\n r[\"price\"] = dict_2[\"price\"]\n # Calculates the grand total (stocks’ total value plus cash)\n total = sum([r[\"price\"]*r[\"shares\"] for r in rows]) + row_cash[0][\"cash\"]\n return render_template(\"portfolio.html\", rows=rows, row_cash=row_cash, total=total)", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def index():\n\n # Get user's cash\n user = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n # Get portfolio\n portfolios = db.execute(\"SELECT * FROM portfolios WHERE user_id = ?\", session[\"user_id\"])\n\n # Get symbol for each stock\n length = len(portfolios)\n for i in range(length):\n symbol = portfolios[i]['stocks']\n\n # Lookup stock price and add to portfolio information\n portfolios[i]['price'] = lookup(symbol)['price']\n portfolios[i]['total'] = float(portfolios[i]['price']) * portfolios[i]['shares']\n\n # Calculate total value of stocks\n value = 0\n for j in range(length):\n value += portfolios[j]['price']\n\n # Calculate grand total of stocks plus cash\n g_total = user[0][\"cash\"] + value\n\n return render_template(\"index.html\", portfolios=portfolios, cash=user[0][\"cash\"], g_total=g_total)", "def __display_portfolio(self, p, w):\n\n global st_sort_key\n global st_reverse_sort\n\n line = 1\n total_assets = 0\n total_change = 0\n\n p.assets.sort(key=st_sort_key, reverse=st_reverse_sort)\n\n for s in p.assets:\n # Make sure we have space to write the portfolio totals.\n if line >= (curses.LINES - 3):\n break\n\n total_assets += (p.asset_counts[s.symb()] * s.price())\n total_change += (p.asset_counts[s.symb()] * s.change())\n\n # Color red/green for stocks going up/down.\n change_color = curses.color_pair(0)\n if s.change() > 0:\n change_color = curses.color_pair(1)\n elif s.change() < 0:\n change_color = curses.color_pair(2)\n\n direction = ''\n if s.change() > 0:\n direction = u'\\u25b2'\n elif s.change() < 0:\n direction = u'\\u25bc'\n\n w.addstr(line, 0, '%-15s' % s.name()[0:14])\n w.addstr(line, 16, '%-5s' % s.symb(), curses.A_BOLD)\n w.addstr(line, 22, '%9.2f' % s.price())\n w.addstr(line, 32, direction.encode('utf-8'), change_color)\n w.addstr(line, 33, '%6.2f %5.2f%%' % (abs(s.change()),\n abs(s.change_percent()) *\n 100),\n change_color)\n w.addstr(line, 47, '|')\n w.addstr(line, 49, '%-6d' % p.asset_counts[s.symb()])\n w.addstr(line, 56, '%11.2f' % (p.asset_counts[s.symb()] *\n s.price()))\n w.addstr(line, 68, '%10.2f' % (p.asset_counts[s.symb()] *\n s.change()),\n change_color)\n\n line += 1\n\n line += 1\n\n # Get overall change (of assets) for the portfolio.\n overall_change = total_assets - p.cost_basis()\n overall_color = curses.color_pair(0)\n if overall_change > 0:\n overall_color = curses.color_pair(1)\n elif overall_change < 0:\n overall_color = curses.color_pair(2)\n\n # Color red/green for assets changing.\n change_color = curses.color_pair(0)\n if total_change > 0:\n change_color = curses.color_pair(1)\n elif total_change < 0:\n change_color = curses.color_pair(2)\n\n # Print accumulated stats for the portfolio.\n w.addstr(line, 0, 'Daily:')\n w.addstr(line, 8, '$%.2f' % total_change,\n curses.A_BOLD | change_color)\n w.addstr(line, 23, 'Total:')\n w.addstr(line, 30, '$%.2f' % overall_change,\n curses.A_BOLD | overall_color)\n w.addstr(line + 1, 0, 'Assets:')\n w.addstr(line + 1, 8, '$%.2f' % total_assets)\n w.addstr(line + 1, 23, 'Cash: $%.2f' % p.cash)\n w.addstr(line + 1, 44, 'Total value:')\n w.addstr(line + 1, 58, '$%.2f' % (p.cash + total_assets),\n curses.A_BOLD)", "def portfolio_view(request):\n\n try:\n query = request.dbsession.query(Stock)\n user_entries = query.filter(Stock.account_id == request.authenticated_userid)\n except DBAPIError:\n return DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)\n\n return {'stocks': all_entries}", "def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def display_portfolio(self, p):\n\n if self.terminate:\n return\n\n w = self.windows['MAIN']\n\n self.clear_main()\n self.__display_portfolio(p, w)\n self.clear_header()\n self.set_header(p)\n\n self.refresh()", "def index():\n\n user_id = session.get('user_id')\n table_name = f'stocks_user{user_id}'\n db.execute(\"CREATE TABLE IF NOT EXISTS ? (stock_symbol TEXT NOT NULL, shares NUMBER NOT NULL, price NUMBER NOT NULL, time TEXT NOT NULL)\", table_name)\n money = db.execute(\"SELECT dinheiro FROM users WHERE id = ?\", user_id)[0]['dinheiro']\n total_value_in_stocks = 0\n\n rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol=\"DINHEIRO\" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name)\n for row in rows:\n row[\"company_name\"] = lookup(row[\"stock_symbol\"])['name']\n row[\"price_stock\"] = lookup(row[\"stock_symbol\"])['price']\n row[\"shares\"] = db.execute(\"SELECT SUM(shares) FROM ? WHERE stock_symbol = ?\", table_name, row[\"stock_symbol\"])[0][\"SUM(shares)\"]\n total_value_in_stocks += row[\"shares\"] * row[\"price_stock\"]\n\n portfolio_value = total_value_in_stocks + money\n\n return render_template('index.html', rows=rows, money=money, portfolio_value=portfolio_value)", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)", "def index():\n\n rows = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n users = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = users[0][\"cash\"]\n total = 0\n\n for row in rows:\n symbol = row[\"symbol\"]\n shares = row[\"shares\"]\n stock = lookup(symbol)\n price_t = float(stock[\"price\"]) * shares\n db.execute(\"UPDATE portfolio SET price=:price WHERE id=:id AND symbol=:symbol\",\n price=float(stock[\"price\"]), id=session[\"user_id\"], symbol=row[\"symbol\"])\n total += price_t\n\n TOTAL = total + cash\n return render_template(\"index.html\", rows=rows, cash=usd(cash), TOTAL=usd(TOTAL))", "def index():\n\n # obtain cash info from users database\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])\n grandtotal = cash[0][\"cash\"]\n \n # obtain stock info from portfolio database\n stocks = db.execute(\"SELECT symbol, shares FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for stock in stocks:\n symbol = str(stock[\"symbol\"])\n shares = int(stock[\"shares\"])\n name = \"\"\n price = \"\"\n total = \"\"\n quote = lookup(symbol)\n stock[\"name\"] = quote[\"name\"]\n stock[\"price\"] = \"{:.2f}\".format(quote[\"price\"])\n stock[\"total\"] = \"{:.2f}\".format(quote[\"price\"] * shares)\n stock[\"grandtotal\"] = quote[\"price\"] * shares\n grandtotal += stock[\"grandtotal\"]\n\n # format grandtotal to force 2 decimal places\n grandtotal = \"{:.2f}\".format(grandtotal)\n \n # render index page with some given values\n return render_template(\"index.html\", stocks = stocks, cash = cash, grandtotal = grandtotal)", "def index():\n stocks = db.execute(\"SELECT Symbol, Company, SUM(NumberOfShares) AS Shares, UnitPrice, SUM(TotalPrice) AS TotalPrice FROM \"\n \"portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n\n symbol = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n\n balance = cash[0][\"cash\"]\n grandTotal = 0\n for stock in stocks:\n grandTotal = grandTotal + stock[\"TotalPrice\"]\n\n grandTotal = grandTotal + balance\n\n return render_template(\"index.html\", stockList=stocks, cash=balance, totalAssets=grandTotal, currentUser=session.get(\"user_id\"))", "def index():\n # Establish userID.\n userID = session[\"user_id\"]\n # Isolate all results from portfolio table for the current user.\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id=:userID\", userID=session[\"user_id\"])\n # Cash for current user (first row, cash column)\n cash = db.execute(\"SELECT cash FROM users WHERE id=:userID\", userID=userID)[0][\"cash\"]\n # Empty list to store stock data as iterating through rows.\n stockData = []\n # Set total for combined stoc value to 0.\n totalAllStocks = 0\n\n # Iterate over rows from portfolio and allocate a row for each stock that has more than 0 owned.\n for row in portfolio:\n if row[\"numOwned\"] != 0:\n stockData.append(row)\n\n # Iterate over rows in stock data and provide value for each column. Other values for use in html are already in list from previous loop.\n # Had to play around with usd, once in usd is a str rather than float so usd always has to be post calculations.\n for row in stockData:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"currentPrice\"] = usd(stock[\"price\"])\n row[\"total\"] = usd(row[\"numOwned\"] * stock[\"price\"])\n totalAllStocks += row[\"numOwned\"] * stock[\"price\"]\n # Grand Total is combined stock values and cash value.\n grandTotal = totalAllStocks + cash\n # Return index.html input sources.\n return render_template(\"index.html\", stockData=stockData, cash=usd(cash), totalAllStocks = usd(totalAllStocks), grandTotal=usd(grandTotal))", "def history():\n \n #select user's portfolio\n rows = db.execute(\"SELECT stock, number, trans_price, transaction_stamp FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def stock(request, *args, **kwargs):\n\n mode = 'lines'\n xaxis_title = 'Years'\n date_list = []\n open_list = []\n close_list = []\n low_list = []\n high_list = []\n ticker = request.GET.get('ticker', '')\n year = request.GET.get('year', '')\n month = request.GET.get('month', '')\n\n if month.isdigit():\n month = int(month)\n\n data = Stock.objects.filter(ticker__iexact=ticker).order_by('date')\n if year and year.isdigit():\n if month and month in MONTHS:\n data = data.filter(Q(date__year=year,\n date__month=month))\n xaxis_title = f'{MONTHS[month]} {year}'\n else:\n data = data.filter(Q(date__year=year))\n xaxis_title = year\n\n if not ticker or not data.exists():\n return HttpResponseRedirect('/stocks')\n title = f'{ticker} ({year})' if year else f'{ticker}'\n if data.exists():\n xy_data = data.values('date', 'oopen', 'close', 'low', 'high')\n for item in xy_data:\n date_list.append(item['date'])\n open_list.append(item['oopen'])\n close_list.append(item['close'])\n low_list.append(item['low'])\n high_list.append(item['high'])\n\n figure = {'data': [\n Scatter(x=date_list, y=high_list, mode=mode, name='high',\n opacity=0.8, marker_color='green'),\n Scatter(x=date_list, y=low_list, mode=mode, name='low',\n opacity=0.8, marker_color='red', visible='legendonly'),\n Scatter(x=date_list, y=open_list, mode=mode, name='open',\n opacity=0.8, marker_color='blue', visible='legendonly'),\n Scatter(x=date_list, y=close_list, mode=mode, name='close',\n opacity=0.8, marker_color='orange', visible='legendonly'),\n ], 'layout': {'title': {'text': title, 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center', 'yanchor': 'top'},\n 'yaxis_title': \"Value\", 'xaxis_title': xaxis_title\n }}\n\n plot_div = plot(figure, output_type='div')\n return render(request, \"index.html\", context={'plot_div': plot_div})", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def render_investip():\n\tlinewidth = 2\n\n\tst.sidebar.markdown('# Dashboard')\n\tstock = st.sidebar.selectbox('Stock:', stocks)\n\n\tstartdd = datetime.datetime(2020, 3, 1)\n\tstartdd = st.sidebar.date_input('start-date', value=startdd)\n\n\tendd = datetime.datetime.now()\n\tendd = st.sidebar.date_input('end-date', value=endd)\n\n\tt0 = stock\n\tt0_ohlc = extract(ticker=t0, start_date=startdd, end_date=endd)\n\tt0_df = pd.DataFrame({f'{t0}-Close': t0_ohlc.Close})\n\n\t# st.write(t0_ohlc)\n\tmpf.plot(t0_ohlc, type='candle',volume=True,show_nontrading=False, title=t0, figscale=1.)\n\t# tdf = plot_ticker(t0, df=t0_df, start_date=startdd, end_date=endd)\n\tst.pyplot()\n\n\n\tst.sidebar.markdown('## Stock Correlation')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_corrs')\n\tif stock_returns:\n\t\tst.markdown('## Stock Correlation')\n\t\tstock_selection = st.sidebar.multiselect('Stocks', stocks, def_stocks)\n\t\tplot_stock_correlations(stock_selection, startdd, endd)\n\t\tst.pyplot()\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Returns')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_returns')\n\tif stock_returns:\n\t\tst.markdown('## Stock Returns')\n\t\tst.markdown('''### Daily Stock returns\n[EWMA](https://www.investopedia.com/articles/07/ewma.asp)''')\n\t\tspan = st.sidebar.slider('span', 2, 21, value=5)\n\t\tplot_historical(t0, t0_ohlc, span=span, linewidth=linewidth)\n\t\tst.pyplot()\n\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Volatility')\n\ttrading_context = st.sidebar.checkbox('Enable', value=False, key='cb_volatility')\n\tif trading_context:\n\t\tst.markdown('## Volatility & Risk')\n\t\tst.markdown('''### Daily differences between High & Low\nWe model these ranges with [Inverse Gamma PDF](https://en.wikipedia.org/wiki/Inverse-gamma_distribution).\nGreen lines denote +/- 1 stdev.\n''')\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} High-Low Daily')\n\t\tmmd = t0_ohlc.High - t0_ohlc.Low\n\t\t# mmd.dropna(inplace=True)\n\t\tmmd.plot(color='r', ax=ax[0], lw=linewidth)\n\n\t\tmu, sigma = mmd.dropna().mean(), mmd.dropna().std()\n\t\tzval = 1.#96\n\t\t# TODO: try one-tail limit to get outliers\n\t\t_=ax[0].axhline(y=mu, color='k', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu-zval*sigma, color='g', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu+zval*sigma, color='g', lw=linewidth)\n\n\t\tp95 = mmd.dropna().quantile(.95)\n\t\t_=ax[0].axhline(y=p95, color='b', lw=linewidth, label='p95')\n\t\t_=ax[1].axvline(p95, color='b', lw=linewidth, label='p95')\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t print(invgamma.fit(mmd))\n\t\t sns.distplot(mmd, fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(mmd.values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(mu, color='k', label='mean', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\t\tst.markdown('''### Daily Average True Range (ATR)\nImplementation follows [ATR](https://kodify.net/tradingview/indicators/average-true-range/).\nCheck [Investopedia](https://www.investopedia.com/terms/a/atr.asp) for more info.''')\n\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-High-Low': t0_ohlc.High - t0_ohlc.Low,\n\t\t\tf'{t0}-High-PrevCloseAbs': abs(t0_ohlc.High - t0_ohlc.Close.shift(1)),\n\t\t\tf'{t0}-Low-PrevCloseAbs': abs(t0_ohlc.Low - t0_ohlc.Close.shift(1)),\n\t\t}).max(axis=1)\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-true-range': atr_df,\n\t\t})\n\t\tatr_df[f'{t0}-ATR14'] = atr_df.iloc[:, 0].rolling(14).mean()\n\t\t# st.write(atr_df)\n\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} True Range & SMA14')\n\t\tatr_df.plot(ax=ax[0], lw=linewidth)\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t #print(invgamma.fit(f'{t0}-true-range'))\n\t\t sns.distplot(atr_df[f'{t0}-true-range'], fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(atr_df[f'{t0}-true-range'].values[-1], color='b', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(atr_df[f'{t0}-ATR14'].values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\n\n\t# do_strategy_analysis = True\n\tst.sidebar.markdown('## Trading Strategy')\n\tdo_strategy_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_stra')\n\tif do_strategy_analysis:\n\t\tst.markdown('## Trading Strategy')\n\t\tst.markdown('[investopedia](https://www.investopedia.com/articles/active-trading/052014/how-use-moving-average-buy-stocks.asp)')\n\t\tshort_window = st.sidebar.slider('short_window', 2, 21, 3)\n\t\tlong_window = st.sidebar.slider('long_window', 3, 50, 5)\n\t\tplot_strategy(t0, t0_df, short_window, long_window)\n\t\tst.pyplot()\n\n\t# do_corr_analysis = False\n\tst.sidebar.markdown('## Correlation analysis')\n\tdo_corr_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_corr')\n\tif do_corr_analysis:\n\t\tst.markdown('## Correlation analysis')\n\t\tt1= 'GC=F' # # SP500 'GC=F'\n\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\tt1 = st.sidebar.selectbox('REF1:', stocks, index=stocks.index(t1))\n\t\tt2 = st.sidebar.selectbox('REF2:', stocks, index=stocks.index(t2))\n\t\tif st.sidebar.button('Reset'):\n\t\t\tt1 = 'GC=F' # # SP500 'GC=F'\n\t\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\t\t# t1 = st.sidebar.selectbox('ref1:', stocks, index=stocks.index(t1))\n\t\t\t# t2 = st.sidebar.selectbox('ref2:', stocks, index=stocks.index(t2))\n\n\t\t@st.cache(persist=True, show_spinner=False)\n\t\tdef get_dataframes(t1, t2, startdd, endd):\n\t\t\tt1_ohlc = extract(ticker=t1, start_date=startdd, end_date=endd)\n\t\t\tt2_ohlc = extract(ticker=t2, start_date=startdd, end_date=endd)\n\t\t\treturn t1_ohlc, t2_ohlc\n\n\t\tt1_ohlc, t2_ohlc = get_dataframes(t1, t2, startdd, endd)\n\t\tt1_df = pd.DataFrame({f'{t1}-Close': t1_ohlc.Close})\n\t\tt2_df = pd.DataFrame({f'{t2}-Close': t2_ohlc.Close})\n\n\t\t#print(t0_ohlc.shape)\n\t\t#t0_ohlc.head()\n\t\t# print(t1_ohlc.shape)\n\t\t# ticker_ohlc.head()\n\t\t# ticker_ohlc.info()\n\n\t\ttdf = t0_df.join(t1_df).join(t2_df).interpolate().dropna()\n\t\t# tdf.head(10)\n\n\t\t# t0_ohlc.corr(t1_ohlc)\n\t\t#ax = t0_ohlc.Close.plot()\n\t\t#t1_ohlc.Close.plot(ax=ax)\n\n\t\timport numpy as np\n\t\tprint('glocal corrleation1: ', t0_ohlc.Close.corr(t1_ohlc.Close))\n\t\tprint('glocal corrleation2: ', t0_ohlc.Close.corr(t2_ohlc.Close))\n\n\t\tp_window_size = 5\n\t\tr_window_size = 5\n\t\tcentering = False\n\n\n\t\tmodf = lambda x: x\n\t\t#modf = np.log10\n\n\n\t\tmain_stat = f'[{t0}]-mean-roll{p_window_size}'\n\t\talt_stat_1 = f'[{t1}]-mean-roll{p_window_size}'\n\t\talt_stat_2 = f'[{t2}]-mean-roll{p_window_size}'\n\t\t# df_rc = pd.DataFrame({\n\t\t# main_stat : tdf.iloc[:, 0].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_1: tdf.iloc[:, 1].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_2: tdf.iloc[:, 2].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# })\n\t\tcom_val = 0.2\n\t\tdf_rc = pd.DataFrame({\n\t\t main_stat : tdf.iloc[:, 0].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_1: tdf.iloc[:, 1].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_2: tdf.iloc[:, 2].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t})\n\n\t\tdf_rc = df_rc.interpolate()\n\t\tdf_rc[f'[{t0}]-[{t1}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_1])\n\t\tdf_rc[f'[{t0}]-[{t2}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_2])\n\n\t\tf, ax = plt.subplots(3,1,figsize=(16,10),sharex=True)\n\t\t#df_rc.iloc[:,0].plot(ax=ax[0], legend=True)\n\t\tdf_rc.iloc[:,1].plot(ax=ax[0], legend=True, color='gold')\n\t\tdf_rc.iloc[:,2].plot(ax=ax[1], legend=True, color='darkred')\n\t\tdf_rc.iloc[:,3].plot(ax=ax[2], legend=True, color='gold')\n\t\tdf_rc.iloc[:,4].plot(ax=ax[2], legend=True, color='darkred')\n\t\tax[2].axhline(y=0, lw=1, color='black')\n\t\t#t0_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[0])\n\t\t#t1_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[1])\n\t\t# ax[0].set(xlabel='Frame',ylabel='Smiling Evidence')\n\t\t# ax[1].set(xlabel='Frame',ylabel='Pearson r')\n\t\t_=plt.suptitle(f\"{t0} Close rolling correlation to {t1}, {t2}\")\n\n\t\tst.pyplot()\n\n\n\t\tf,ax=plt.subplots(1, 2, figsize=(16,8),sharex=False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[1],\n\t\t y=df_rc.columns[2],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=None,\n\t\t ax=ax[0])\n\n\t\tprint(df_rc.columns)\n\t\tnewr_p = df_rc.iloc[-1, 0]\n\t\tt1_p = df_rc.iloc[-1, 1]\n\t\tt2_p = df_rc.iloc[-1, 2]\n\t\tt1_c = df_rc.dropna().iloc[-1, 3]\n\t\tt2_c = df_rc.dropna().iloc[-1, 4]\n\t\tprint('current_corr:', (t1_c, t2_c))\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, 1].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle = plt.Circle((t1_p, t2_p), xradius, color='r', fill=False)\n\t\tax[0].add_artist(circle)\n\t\t#ax[0].set_xlabel(f'GOLD Price {t1_p:.4f}')\n\t\t#ax[0].set_ylabel(f'OIL Price {t2_p:.4f}')\n\t\t# ax[0].legend().set_visible(False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[-2],\n\t\t y=df_rc.columns[-1],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=True,\n\t\t #linestyle=\n\t\t ax=ax[1])\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, -2].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle1 = plt.Circle((t1_c, t2_c), xradius, color='r', fill=False)\n\t\tax[1].add_artist(circle1)\n\t\t#ax[1].set_ylabel('OIL Correlation')\n\t\t#_= ax[1].set_xlabel('GOLD Correlation')\n\n\n\t\tst.pyplot()", "def stock_view(request):\n if request.method == 'GET':\n try:\n symbol = request.GET['symbol']\n except KeyError:\n return {}\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n return {'company': data}\n except ValueError:\n raise HTTPNotFound()\n if request.method == 'POST':\n try:\n symbol = request.POST['symbol']\n except KeyError:\n raise HTTPBadRequest()\n\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n except ValueError:\n raise HTTPNotFound()\n\n isntance = Stock(**data)\n\n try:\n request.dbsession.add(instance)\n except DBAPIError:\n return Response(DB_ERR_MSG, content_type='text/plain', status=500)\n \n return HTTPFound(location=request.route_url('portfolio'))", "def history():\n\n # obtain stock info from portfolio database\n history = db.execute(\"SELECT symbol, shares, price, date FROM history WHERE id = :id ORDER BY date DESC\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for transaction in history:\n symbol = transaction[\"symbol\"]\n shares = transaction[\"shares\"]\n price = transaction[\"price\"]\n date = transaction[\"date\"]\n\n return render_template(\"history.html\", history = history)", "def index():\n # Selects stock that user actually has\n stockuserhas = db.execute(\n \"SELECT symbol, shares FROM portfolio WHERE userid = :userid GROUP BY symbol HAVING SUM(shares) > 0\", userid=session[\"user_id\"])\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # If the user does not have any stocks, return index using with just money as input\n if not stockuserhas:\n return render_template(\"index.html\", money=money, completetotal=money)\n\n # Selects summarative information for each symbol\n stocks = db.execute(\n \"SELECT SUM(total), symbol, SUM(shares), name FROM portfolio WHERE userid = :userid GROUP BY symbol\", userid=session[\"user_id\"])\n # For each symbol, add the current price of the stock to the end of the dictionary\n for stock in stocks:\n # Looks up current price of stock based on symbol\n stockinfo = lookup(stock[\"symbol\"])\n # Finds current value of stock\n currentprice = float(stockinfo[\"price\"])\n # Adds the price to the dictionary\n stock.update({\"price\": currentprice})\n\n # The total value of stocks user owns\n totalstockvalue = db.execute(\"SELECT SUM(total) FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Total amount a user owns is the cash they have plus the sum of the stocks\n completetotal = float(money + float(totalstockvalue[0]['SUM(total)']))\n # Return index.html with all of the information put together above\n return render_template(\"index.html\", completetotal=completetotal, money=money, stocks=stocks)", "def portfolio(self):\n self.update_portfolio()\n return self._immutable_portfolio", "async def list(self, ctx, user=None, date=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n change = GetPortfolioChange(user.id)\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n await self.bot.say(\n '```%s\\'s portfolio:\\n'\n 'Total Value: $%s (%.2f%s) \\n'\n '%s```' % (user, portfolio.Value(), change, \"%\", portfolio.AsTable()))", "def history():\n\n #Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT * FROM history WHERE id = :id\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n return render_template(\"history.html\", ports=ports)", "def index():\n\n rows = db.execute(\n 'SELECT symbol, SUM(CASE WHEN operation = \"SELL\" THEN -shares ELSE shares END) shares FROM transactions WHERE id = :id GROUP BY symbol;', id=session['user_id'])\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n grand_total = cash\n\n for row in rows:\n stock = lookup(row['symbol'])\n\n row['name'] = stock['name']\n row['price'] = stock['price']\n row['total'] = row['shares'] * stock['price']\n\n grand_total += row['shares'] * stock['price']\n\n rows.append({\n 'symbol': 'CASH',\n 'cash': cash,\n 'total': grand_total\n })\n\n return render_template('index.html', stocks=rows)", "def print_portfolio(self):\n self.__validate_google_credentials()\n sheet = self.service.spreadsheets()\n result = sheet.values().get(spreadsheetId=self.google_spreadsheet_id,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('pulled data:')\n print('----------------')\n print('ALL VALUES\\n', '-----------------\\n', values)\n print('ONLY PRICES\\n', '----------------')\n print('{:25} {}'.format('name', 'price'))\n \n for row in values:\n if len(row) < 8 or row[2] != '[OWN]':\n continue\n else:\n print('{:25} {}'.format(row[0], row[6]))", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def index():\n#Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY symbol\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n #Hold value is the sum of the shares * price of each shares in the portfolios PLUS the remaining cash.\n if grand_total_fl != None:\n hold_value = grand_total_fl + remaining_cash\n #Update hte current hold value of the user\n db.execute(\"UPDATE users SET hold_value = :hold_value WHERE id = :id\", id=session[\"user_id\"], hold_value=hold_value)\n else:\n hold_value = remaining_cash\n\n\n #Query for the symbol in the database for the specific user.\n rows = db.execute(\"SELECT symbol, stock_price FROM portfolio WHERE id = :id GROUP by symbol\", id=session[\"user_id\"])\n\n #Initiate a list for all the open prices of stocks of a certain user.\n price_open = []\n num_stocks = []\n symbol_list = []\n avg_open_list = []\n profit_loss_list = []\n price_today_list = []\n\n\n for i in range(len(rows)):\n print(rows[i]['symbol'])\n symbol = rows[i]['symbol']\n open_price = rows[i]['stock_price']\n print(rows[i]['stock_price'])\n stock = lookup(rows[i]['symbol'])\n price_today = stock['price']\n\n #Insert data into the price_open list\n price_open.insert(i, open_price)\n\n #Count the number of stocks in posession\n share_total = ports[i]['sharetotal']\n\n #Insert data into the num_stocks list\n num_stocks.insert(i, share_total)\n\n #Insert data into the symbol_list list\n symbol_list.insert(i, symbol)\n\n #Insert data into the price_today_list\n price_today_list.insert(i, price_today)\n\n #Compute for the average open price of all stocks of a certain user.\n total_price = ports[i]['total']\n avg_open = total_price/share_total\n avg_open_list.insert(i, avg_open)\n\n profit_loss = ((price_today - avg_open)/avg_open)*100\n\n profit_loss_list.insert(i, (profit_loss))\n\n\n db.execute(\"UPDATE portfolio SET price_today = :price_today, profit_loss = :profit_loss, avg_open = :avg_open WHERE symbol = :symbol AND id = :id\", price_today=price_today, symbol=symbol,profit_loss=profit_loss, avg_open=avg_open, id=session[\"user_id\"])\n\n\n print(\"The symbols are:\", symbol_list)\n print(\"The quantity are: \", num_stocks)\n print(\"The open prices are: \", price_open)\n print(\"The average open prices are: \", avg_open_list)\n print(\"The prices today are: \", price_today_list)\n print(\"The profit and loss are: \", profit_loss_list)\n\n return render_template(\"index.html\", ports=ports, remaining_cash = remaining_cash, hold_value=hold_value,)", "def get_stock(self, investor):\n\n # Find out the stock details \n sym, qty, price = investor.portfolios[0].portfolios[0]\n # p = investor.portfolios[0]\n \n # Check if broker has a portfolio\n if self.portfolios[0]:\n self.portfolios[0].add_stock(sym, qty, price)\n else:\n # Broker doesn't have a portfolio\n p = Portfolio()\n #logging.info(\"p is: %s\" % p)\n p.add_stock(sym, qty, price)\n self.add_portfolio(p)\n logging.info(\"Broker's portfolios AFTER addition: %s\" % self)\n # logging.info(\"WHAT ARE YOU\")\n logging.info(\"Investor portfolio BEFORE removal: %s\" % investor.portfolios[0].portfolios)\n investor.portfolios[0].remove_stock(sym, qty)\n logging.info(\"Investor portfolio AFTER removal: %s\" % investor.portfolios[0])\n # investor.portfolios[0].portfolios.remove( (sym, qty, price) )\n \n # investor.portfolios[0].remove(sym, qty, price)\n total_price = qty * price\n investor.portfolios[0].value -= total_price\n investor.cash += qty * float(price)", "def stocks(request):\n\n try:\n stocks = StockList.objects.all()\n except StockList.DoesNotExist:\n stocks = None\n\n context = {\n 'title': 'Filter Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n }\n\n return render(\n request,\n 'app/stocksview.html',\n context,\n )", "def display_artist_available_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_available_artwork_from_one_artist(artist_name)\n if results:\n for piece in results:\n print(piece)\n else:\n print('Sorry this artist does not have any available art at this time ')\n else:\n print('Sorry, no artwork from this artist to display ')", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def index():\n symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n companies = db.execute(\"SELECT company FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n get_shares = db.execute(\"SELECT SUM(shares) FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n shares = [share['SUM(shares)'] for share in get_shares]\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n return render_template(\"index.html\", symbols_companies_shares=zip(symbols, companies, shares), lookup=lookup, cash=cash)", "def index():\n\n # Get user\n user = session[\"user_id\"]\n\n # Query infos from database\n rows = db.execute(\"SELECT * FROM stocks WHERE user_id = :user\", user=user)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n total_cash = cash\n\n # Populate stocks list wit hstock data\n stocks = []\n for index, row in enumerate(rows):\n stock_data = lookup(row['symbol'])\n stock_data['amount'] = row['amount']\n stock_data['quantity'] = round(stock_data['price'] * stock_data['amount'], 2)\n\n # Generate index table data\n stocks.append(list((\n stock_data['symbol'],\n stock_data['name'],\n stock_data['amount'],\n stock_data['price'],\n stock_data['quantity']\n )))\n total_cash += stocks[index][4]\n\n return render_template(\"index.html\", stocks=stocks, cash=round(cash, 2), total=round(total_cash, 2))", "def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)", "def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def history():\n\n user = session.get(\"user_id\")\n rows = db.execute(\"Select TransDate as Date, Stock, Price, case when Num < 0 then 'Sell' else 'Buy' end as Type, Num as Quantity from portfolio where User = :User order by Date asc\", User = session.get(\"user_id\"))\n\n\n return render_template(\"hist.html\", rows = rows)", "def graph(stock):\n output=stock_price(stock)\n return plt.plot(output)", "def stock():\n # Grab the inputs arguments from the URL\n # This is automated by the button\n args = flask.request.args\n\n # Get all the form arguments in the url with defaults\n if 'company' in args.keys() and args['company']:\n company = args['company']\n else:\n company = 'GOOG'\n\n cl = requests.get(\"https://www.quandl.com/api/v3/datasets/WIKI/%s.json?order=asc&rows=31&start_date=2015-07-01&end_date=2015-09-03\" % (company))\n if cl.status_code == 200:\n \tc2=cl.content\n \tstock=simplejson.loads(c2)\n \tabb=stock['dataset']['dataset_code']\n \tdatanames=stock['dataset']['column_names']\n \tdata=stock['dataset']['data']\n \tdataorg=pd.DataFrame(data,columns=datanames)\n \tdataorg['Date']=pd.to_datetime(dataorg['Date'])\n else:\n ######## THIS IS NOT RECOMMENDED, because now it just returns an error message if not find the ticker.\n return 'Error! Ticker does not exist!'\n\n\n # Create a graph\n fig = figure(x_axis_type=\"datetime\")\n fig.line(dataorg.Date,dataorg.Close)\n fig.title=\"Stock closing price (%s), from 07-01-2015 \" % (company)\n # fig.xaxis_axis_label='Date'\n # fig.yaxis_axis_label='Price'\n\n # Configure resources to include BokehJS inline in the document.\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#module-bokeh.resources\n plot_resources = RESOURCES.render(\n js_raw=INLINE.js_raw,\n css_raw=INLINE.css_raw,\n js_files=INLINE.js_files,\n css_files=INLINE.css_files,\n )\n\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components\n script, div = components(fig, INLINE)\n html = flask.render_template(\n 'embed.html',\n plot_script=script, plot_div=div, plot_resources=plot_resources,\n # color=color,\n company=company\n )\n return encode_utf8(html)", "def stock(request, stock_id):\n stock= Stock.objects.get(id=stock_id)\n entries= stock.entry_set.order_by('-date_added')\n context= {'stock': stock, 'entries': entries}\n return render(request, 'stock_trackers/stock.html', context)", "def home():\n stocks = preprocess()\n\n return render_template(\"main.html\",stocks=stocks)", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def stock_volume_history(stock_values):\n ticker = stock_values.name\n dates = stock_values.index\n \n # stock volume plot \n p2hover = HoverTool(tooltips=[(\"volume\", \"$y\"),])\n\n p = figure(x_axis_type = \"datetime\")\n\n p.title = \"{} Daily Volume\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Kilo Transactions'\n p.yaxis.axis_label_text_font_size = '9'\n p.yaxis[0].formatter = PrintfTickFormatter(format=\"%3d\")\n\n p.quad(top=stock_values['Volume'], bottom=0, left=dates, right=dates,\n fill_color=\"#036564\", line_color=\"#033649\")\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 30'],\n color='#dfbd4d', **line_style)\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 300'],\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 200\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n return p", "def stockButtonClicked(self):\n # Clear text edit box and get the stock symbol from combobox.\n self.central.text3.clear()\n stocksymbol = self.central.combobox.currentText()\n\n URL = 'https://finance.yahoo.com/quote/{0}/profile?p={0}'.format(stocksymbol)\n\n # Safely get the web page using the above URL.\n try:\n r = requests.get(URL)\n except:\n logging.error(\"Failed to get the web page: \" + URL)\n self.central.text3.setText(\"Failed to get the web page: \" + URL)\n return\n\n # Safely turn the response from requests into soup.\n try:\n html = r.text.encode('utf-8')\n soup = bs4.BeautifulSoup(html, 'lxml')\n except:\n logging.error(\"Failed on the soup\")\n self.central.text3.setText(\"Failed on the soup\")\n return\n\n # Safely extract data from the table.\n try:\n table = soup.find_all(\"table\")\n rows = table[0].find_all('tr')\n data = []\n for row in rows:\n cols = row.find_all('td')\n cols = [str.text.strip() for str in cols]\n data.append([str for str in cols if str])\n\n textdisplay = ''\n\n for x in data:\n for y in x:\n print(y)\n textdisplay += y\n textdisplay += '\\n'\n if y.isdigit():\n textdisplay += '\\n'\n self.central.text3.setText(textdisplay)\n\n except:\n logging.error(\"Failed to extract data from the table\")\n self.central.text3.setText(\"Failed to extract data from the table\")\n return\n\n self.updateGraph(symbol=stocksymbol)", "def get_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n result = dict()\n for equity in equities:\n result[equity[0]] = equity[1]\n\n return util.build_json_response(\"Portfolio retrieved\", equities=result)", "def index():\n user_name = db.execute(\"SELECT username FROM users WHERE id = ?\", session[\"user_id\"])\n check = db.execute(\"SELECT name FROM main.sqlite_master WHERE type='table'\")\n #print(check)\n #print('stocks' not in check[0]['name'])\n if not any(c['name'] == 'stocks' for c in check):\n return render_template(\"index.html\", user_name=user_name)\n\n stocks = db.execute(\"SELECT * FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n total_value = user_name[0][\"cash\"]\n sum_stocks = db.execute(\"SELECT symbol, ammount FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n\n for stock in sum_stocks:\n total_value += stock[\"ammount\"] * lookup(stock[\"symbol\"])['price']\n \n #print(stocks)\n return render_template(\"index.html\", stocks=stocks, user_name=user_name, cash=usd(cash[0]['cash']), total_value=usd(total_value))", "def index():\n\n if request.method == \"GET\":\n\n current_user = session[\"user_id\"]\n current_cash=db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n\n # portfolio_table=\"\"\n table_symbols=[]\n table_volumes=[]\n table_share_price=[]\n table_stock_name=[]\n table_total_value=[]\n\n rows=db.execute(\"SELECT stock_symbol,volume FROM portfolio WHERE id = :id\", id=current_user)\n for row in rows:\n symbol=row[\"stock_symbol\"]\n table_symbols.append(str(symbol))\n\n table_volumes.append(row[\"volume\"])\n\n lookedup=lookup(row[\"stock_symbol\"])\n table_share_price.append(lookedup.get(\"price\"))\n table_stock_name.append(lookedup.get(\"name\"))\n\n table_total_value.append(int(lookedup.get(\"price\"))*int(row[\"volume\"]))\n\n # at this point we have lists with stock_symbols, amounts, prices and stock names just need to generate the code for portfolio table\n\n # for row in table_symbols:\n # y=0\n # portfolio_table+=\"<tr><td>\"+str(table_stock_name[y])+\"</td><td>\"+str(table_symbols[y])+\"</td><td>\"+str(table_volumes[y])+\"</td><td>\"+str(table_share_price[y])+\"</td></tr>\"\n # y+=1\n # not sure if this is going to insert into index.html correctly\n\n current_cash=int(current_cash[0][\"cash\"])\n current_total_value=current_cash\n\n for i in range(len(table_volumes)):\n\n volume=int(table_volumes[i])\n price=int(table_share_price[i])\n current_total_value+= volume*price\n\n return render_template(\"index.html\", current_cash=current_cash, table_symbols=table_symbols,table_volumes=table_volumes,table_share_price=table_share_price,table_stock_name=table_stock_name, table_total_value=table_total_value,current_total_value=current_total_value)\n\n else:\n # dont think ill be posting with index\n return apology(\"Should this even exist, how did u get here?\")", "def index():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",\n userid=userid)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=userid)\n grand_total = cash[0][\"cash\"]\n if stocks != []:\n storages = list()\n for symbol in stocks:\n stock_data = lookup(symbol[\"symbol\"])\n current_price = stock_data[\"price\"]\n stock_info = dict()\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase WHERE userid = :userid\\\n GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0][\"shares_sum\"]\n if current_shares > 0:\n stock_info[\"symbol\"] = symbol[\"symbol\"]\n stock_info[\"name\"] = stock_data[\"name\"]\n stock_info[\"price\"] = usd(current_price)\n stock_info[\"shares\"] = current_shares\n total = current_price * current_shares\n grand_total += total\n stock_info[\"total\"] = usd(total)\n storages.append(stock_info)\n return render_template(\"index.html\", storages=storages, cash=usd(cash[0][\"cash\"]), grand_total=usd(grand_total))\n else:\n return render_template(\"index.html\", cash=usd(cash[0][\"cash\"]), grand_total=usd(grand_total))\n return render_template(\"index.html\")", "def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')", "def index():\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n transactions=[]\n grand_total = 0\n for row in rows:\n stock = lookup(row[\"Symbol\"])\n transactions.append({\n \"Symbol\": stock[\"symbol\"],\n \"Name\": stock[\"name\"],\n \"Shares\": row[\"totalShares\"],\n \"Price\": usd(stock[\"price\"]),\n \"Total\": usd(stock[\"price\"] * row[\"totalShares\"])\n })\n grand_total += stock[\"price\"] * row[\"totalShares\"]\n rows = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"cash\"]\n return render_template(\"table.html\", transactions=transactions, cash=usd(cash), grand_total=usd(grand_total))", "def index():\n # query database to get cash on hand\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n\n # query database to get current holdings from transactions list\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n\n # assign names and totals for stocks\n for stock in stocks:\n stock_lookup = lookup(stock[\"symbol\"])\n stock[\"name\"] = stock_lookup[\"name\"]\n stock[\"total\"] = stock[\"shares\"] * stock_lookup[\"price\"]\n\n stocks[:] = [stock for stock in stocks if stock.get(\"shares\") > 0]\n\n totals = user_cash + sum([stock[\"total\"] for stock in stocks])\n\n return render_template(\"index.html\", user_cash=user_cash, stocks=stocks, total=totals, usd=usd)", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)", "def analyze(request, *args, **kwargs):\n\n mode = 'lines+markers'\n\n tickers = Stock.objects.distinct(\n 'ticker').values_list('ticker', flat=True)\n tickers_dict = {ticker: [] for ticker in tickers}\n tickers_count = tickers.count()\n\n actual_dates = Stock.objects.values('date').annotate(\n dcount=Count('date')).filter(dcount=tickers_count).values_list(\n 'date', flat=True).order_by('date')\n date_list = list(actual_dates)\n\n data = Stock.objects.filter(date__in=actual_dates).order_by('date')\n\n for item in data.values('ticker', 'close', 'oopen'):\n tickers_dict[item['ticker']].append(\n round((item['close']-item['oopen'])*100/item['oopen'], 2)\n )\n\n scatters = [Scatter(x=date_list, y=tickers_dict[obj], mode=mode, name=obj,\n opacity=0.8, visible='legendonly') for obj in tickers_dict]\n figure = {'data': scatters, 'layout': {\n 'title': {\n 'text': 'Open-Closed comparision', 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center','yanchor': 'top'},\n 'yaxis_title': \"Daily percent\",\n 'xaxis_title': \"Years\",\n }}\n\n return render(request, \"analyze.html\", context={\n 'plot_div': plot(figure, output_type='div')})", "async def stocks(self, ctx):\n\t\tpass", "def index():\n holdings = db.execute(\"SELECT symbol, amount FROM stocks WHERE stocks.user_id = :userid AND amount != 0\", userid = session[\"user_id\"])\n total = 0\n\n for row in holdings:\n sDict = lookup(row['symbol'])\n row['name'] = sDict['name']\n row['share_total'] = sDict['price'] * row['amount']\n total += row['share_total']\n\n row['price'] = usd(sDict['price'])\n row['share_total'] = usd(row['share_total'])\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session[\"user_id\"])\n cash = cash[0][\"cash\"]\n total += cash\n\n return render_template(\"index.html\", holdings=holdings, cash=usd(cash), total=usd(total))", "def summary(self):\n print '%s Portfolio\\'s %s Strategy' % (self.portfolio.name, self.name)\n print '-' * COL_DASH_WIDTH\n\n self.display_trades()\n\n for symbol in self.portfolio.assets.keys():\n perf = self.performance[symbol]\n\n print '\\nSummary for %s from %s (first trade) to %s (last trade)' % (symbol, perf['start'], perf['end'])\n print '.' * COL_DASH_WIDTH\n print 'Summary:'\n data = [[fmtn(perf['trades']), fmtn(perf['wins']), fmtn(perf['losses']), fmtn(perf['washes'])]]\n print tabulate.tabulate(data, headers=['Total Trades', '# Wins', '# Losses', '# Washes'])\n\n print '\\nPerformance:'\n data = [[\n fmtn(perf['profit']), fmtn(perf['loss']), fmtn(perf['net_profit']),\n fmtp(perf['profit_factor']), fmtp(perf['percent_profitable']), fmtn(perf['average_trade_net_profit'])\n ]]\n print tabulate.tabulate(data, headers=['Profit', 'Loss', 'Net Profit', 'Profit Factor', 'Percent Profitable', 'Average Net Profit per Trade'])\n\n print '\\nDrawdown:'\n data = [[fmtn(perf['max_drawdown']), fmtn(perf['average_drawdown']), fmtn(perf['max_drawdown_days']), fmtn(perf['average_drawdown_days'])]]\n print tabulate.tabulate(data, headers=['Max', 'Average', 'Max Days', 'Average Days'])\n\n print '\\nRisk:'\n data = [[fmtn(perf['volatility_risk']), fmtn(perf['beta']), fmtn(perf['lower_partial_moment_risk']), fmtn(perf['t_r']), fmtn(perf['s_r'])]]\n print tabulate.tabulate(data, headers=['Volatility', 'Beta', 'Lower Partial Moment', 'Treynor Ratio', 'Sharpe Ratio'])", "def index():\n inventory = db.execute(\"SELECT symbol,quantity FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n total = cash\n for i in inventory:\n stock = lookup(i[\"symbol\"])\n i[\"price\"] = stock[\"price\"]\n i[\"name\"] = stock[\"name\"]\n i[\"total\"] = usd(stock[\"price\"] * i[\"quantity\"])\n total += stock[\"price\"] * i[\"quantity\"]\n return render_template(\"index.html\", context={\"inventory\":inventory,\"total\":usd(total),\"cash\":usd(cash)})", "def action(self, history_to_date, stocks, portfolio):\n return {}", "def index():\n\n stocks_owned = db.execute(\"SELECT DISTINCT stock FROM transaction WHERE id = :id;\", id=session['user_id'])\n\n number_of_rows= len(stocks_owned) - 1\n\n i = 0\n\n total_value=0\n\n for stock in stocks_owned:\n\n stock_list=[]\n stock_list[i]=stock\n\n value = db.execute(\"SELECT SUM(total_amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['usestockr_id'], stock=stocks_owned[\"stock\"])\n value_list=[]\n value_list[i] = value\n\n amount_owned = db.execute(\"SELECT SUM(amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['user_id'], stock = stocks_owned[\"stock\"])\n amount_list=[]\n amount_list[i]= amount_owned\n\n quote_input = stocks_owned[i]\n quote_info = lookup(quote_input)\n price = quote_info['price']\n price_list=[]\n price_list[i] = price\n\n\n total_value+=value\n\n i+=1\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id;\", id=session['user_id'])\n\n grand_total = total_value + cash\n\n ###(\"SELECT stock, SUM(total_amount) FROM transaction WHERE id = :id;, id=session['user_id'] GROUP BY stock\")####\n\n\n return render_template(\"index.html\", number_of_rows=number_of_rows, stock_list=stock_list, amount_list=amount_list, value_list=value_list, price_list=price_list, total_value=total_value, grand_total=grand_total)", "def new_portfolio_flow(portfolio_list):\n while True:\n portfolio_name = prompt.shortcuts.input_dialog(\n title=\"Portfolio Name\", text=\"Please type the portfolio name:\"\n ).run()\n if portfolio_name is not None:\n portfolio_id: int = len(portfolio_list)\n stock_list = []\n stock_list = add_stock_flow(stock_list)\n portfolio_list.append(Portfolio(portfolio_name, portfolio_id, stock_list))\n return portfolio_list\n if portfolio_name is None:\n return None", "def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def index():\n #if request.method == \"GET\":\n #Выбрать знак акции,и кол-во акции которые пренадлежат id\n #stocks_shares = db.execute(\"SELECT symbol, shares FROM total WHERE id=:id ORDER BY symbol\",\n #id=session[\"user_id\"])\n #return render_template(\"index.html\")\n #return redirect(url_for(\"index.html\"))\n return apology(\"TODO\")", "def select_stock_object(portfolio_list, portfolio_selected):\n portfolio_stock_choice = prompt.shortcuts.radiolist_dialog(\n values=[(x, x.name) for x in\n [x.stock_list for x in portfolio_list if x.portfolio_id == portfolio_selected][0]],\n title=\"Portfolio Overview\",\n text=\"Please select a stock:\",\n ).run()\n return portfolio_stock_choice", "def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])", "def history():\n if request.method == \"GET\":\n \n user_id = int(session.get('user_id'))\n user_data = db.execute('''SELECT * FROM history WHERE user_id = :user_id''', user_id = user_id)\n \n if not user_data:\n return render_template('quote.html')\n \n #create lists of values for sake of returning them to F2E\n portfolio = []\n \n for i in user_data:\n #getting data from table\n date = i.get('date')\n symbol = i.get('symbol')\n name = i.get('stock_name')\n quantity = i.get('quantity')\n price = round(float(i.get('price')), 2)\n action = str(i.get('deal'))\n \n #inserting data into a list\n a_dict = {\n 'date': date, 'symbol': symbol, \n 'name': name, 'price': price, \n 'quantity': quantity, 'action': action\n }\n portfolio.append(a_dict)\n \n return render_template('history.html',\n portfolio=portfolio)\n else:\n return render_template('index.html')", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def get_portfolio_pnl(self):\n\n return self._portfolio", "def stock_value_history(stock_values, value_name='Close'):\n ticker = stock_values.name\n dates = stock_values.index\n \n # hover tool\n phover = HoverTool(tooltips=[(\"price\", \"$y\"),])\n\n # plot\n p = figure(x_axis_type = \"datetime\", tools=[\"pan,wheel_zoom,box_zoom,reset,resize\", phover])\n\n p.title = \"{} Closing Prices\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Price (US$)'\n p.yaxis.axis_label_text_font_size = '9'\n\n line1_name = value_name\n p.line(np.array(dates, 'M64'), stock_values[value_name], legend=value_name,\n color='#182b8b', **line_style)\n\n line1_name = 'SMA 30'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line1_name], legend=line1_name,\n color='#5477a0', **line_style)\n\n line2_name = 'SMA 100'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line2_name], legend=line2_name,\n color='#dfbd4d', **line_style)\n\n line3_name = 'SMA 300'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line3_name], legend=line3_name,\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 300\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n # legend\n p.legend.orientation = \"bottom_left\"\n p.legend.label_text_font_size = '3'\n \n return p", "def display_stock(stock):\n print(\"Stock contains:-\")\n for typ in stock:\n print(typ + f\" {stock[typ]}\")", "def evaluate_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n e_total = 0\n for equity in equities:\n price = equity[1] * market_data.get_stock_price(equity[0], date, 'low')\n e_total += price\n\n total = round(e_total + user_obj.balance, 2)\n cash = round(user_obj.balance, 2)\n e_total = round(e_total, 2)\n\n return util.build_json_response(\"Portfolio totals retrieved\", equity_total=e_total, cash_balance=cash, account_total=total)", "def initialize_portfolio(self):\n\n raise NotImplementedError('''\n Must implement initialize_portfolio. Call help() for details.\n ''')", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def index_view(request):\n\n\t# Create blank form instances.\n\tform = TickerForm()\n\tcrypto_form = CryptoTickerForm()\n\t\n\t# Check if the request method == POST\n\tif request.method == 'POST':\n\t\tpost_data = request.POST or None\n\t\t# Check that ther is data on the request.\n\t\tif post_data != None:\n\t\t\t# Check if the user enters data and the stock ticker form.\n\t\t\tif request.POST.get(\"form_type\") == 'stock_form':\n\t\t\t\tform = TickerForm(request.POST)\n\t\t\t\t# Check if form is valid.\n\t\t\t\tif form.is_valid():\n\t\t\t\t\t# Get the 'ticker' value from the form and store it the ticker variable.\n\t\t\t\t\tticker = form.cleaned_data.get('ticker')\n\t\t\t\t\t# If the variable ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry: \n\t\t\t\t\t\tif request.user.stocks_set.get(ticker=ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Stock ticker already exists in portfolio.')\n\t\t\t\t\t# Create the Stock Object in the database and link it to the current user.\n\t\t\t\t\texcept Stocks.DoesNotExist:\n\t\t\t\t\t\tStocks.objects.create(\n\t\t\t\t\t\t\tticker = ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the stock that was created from the database.\n\t\t\t\t\t\tcurrent_stock = Stocks.objects.get(ticker=ticker, user=request.user)\n\t\t\t\t\t\t# Get the meta and price data\n\t\t\t\t\t\tcurrent_stock_meta_dict = current_stock.get_meta_data()\n\t\t\t\t\t\tcurrent_stock_price_dict = current_stock.get_price_data()\n\t\t\t\t\t\t# Add the highest price for the stock to the meta data dict\n\t\t\t\t\t\tcurrent_stock_meta_dict['high'] = current_stock_price_dict.get('high')\n\t\t\t\t\t\t# Add a ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_stock_meta_dict['ticker'] = current_stock.ticker\n\t\t\t\t\t\t# Add the meta and price data to the current session\n\t\t\t\t\t\trequest.session['meta_data'][current_stock.ticker] = current_stock_meta_dict\n\t\t\t\t\t\trequest.session['price_data'][current_stock.ticker] = current_stock_price_dict\n\t\t\t\t\t\t# Explicitly save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the form instance.\n\t\t\t\t\t\tform = TickerForm()\n\n\n\t\t\t# Check wether the user enters data on the crypto currency ticker form.\n\t\t\telif request.POST.get(\"form_type\") == 'crypto_form':\n\t\t\t\tcrypto_form = CryptoTickerForm(request.POST)\n\t\t\t\tif crypto_form.is_valid():\n\t\t\t\t\tcrypto_ticker = request.POST['crypto_ticker']\n\t\t\t\t\t# If the variable crypto_ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif request.user.crypto_set.get(crypto_ticker=crypto_ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Crypto ticker already exists in portfolio.')\n\t\t\t\t\t# Else create the Crypto Object in the database and link it to the current user.\n\t\t\t\t\texcept Crypto.DoesNotExist:\n\t\t\t\t\t\tCrypto.objects.create(\n\t\t\t\t\t\t\tcrypto_ticker = crypto_ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the currently created cryptocurrency ticker\n\t\t\t\t\t\tcurrent_crypto = Crypto.objects.get(crypto_ticker = crypto_ticker, user = request.user)\n\t\t\t\t\t\t# Get the meta data and price data for the current cryptocurrency\n\t\t\t\t\t\tcurrent_crypto_meta_dict = current_crypto.get_crypto_meta_data()\n\t\t\t\t\t\tcurrent_crypto_price_dict = current_crypto.get_crypto_price_data()\n\t\t\t\t\t\t# Add a crypto_ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_crypto_meta_dict['crypto_ticker'] = current_crypto.crypto_ticker\n\t\t\t\t\t\t# Handle Error for no data on creation of invalid cryptocurrency object\n\t\t\t\t\t\tif len(current_crypto_price_dict) == 0:\n\t\t\t\t\t\t\tcurrent_crypto_price_dict.append({'topOfBookData':[{'lastPrice':'No_Data'}]})\n\n\t\t\t\t\t\t# Add the meta data and price data to the current session\n\t\t\t\t\t\trequest.session['crypto_meta_data'][current_crypto.crypto_ticker] = current_crypto_meta_dict\n\t\t\t\t\t\trequest.session['crypto_price_data_dict'][current_crypto.crypto_ticker] = current_crypto_price_dict\n\t\t\t\t\t\t# Save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the crypto_form\n\t\t\t\t\t\tcrypto_form = CryptoTickerForm()\n\t\t\t\t\t\n\n\n\t#Call a list of the users stocks and store it to be passed into the context.\n\tstock_list = request.user.stocks_set.all()\n\tcrypto_list = request.user.crypto_set.all()\n\n\t# Initialse dictionaries to store meta data and price data.\n\tstock_metadata_dict = {}\n\tstock_price_data_dict = {}\n\n\tcrypto_metadata_dict = {}\n\tcrypto_price_data_dict = {}\n\n\t# Loop through users stock and crypto portfolios and add meta and price data to respective dictionaries. \n\n\t# Only do this the first time the user logs into the site.\n\tif request.session.get('meta_data') == None:\n\t\tfor stock in stock_list:\n\t\t\tstock_metadata_dict[stock.ticker] = stock.get_meta_data()\n\t\t\tstock_price_data_dict[stock.ticker] = stock.get_price_data()\n\t\t\t# Add stocks highest price data to meta data dict for use on index page.\n\t\t\tstock_metadata_dict[stock.ticker]['high'] = stock_price_data_dict[stock.ticker].get('high')\n\t\t\t# Add a ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tstock_metadata_dict[stock.ticker]['ticker'] = stock.ticker\n\n\t\tfor crypto in crypto_list:\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker] = crypto.get_crypto_meta_data()\n\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = crypto.get_crypto_price_data()\n\t\t\t# Add a crypto_ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker]['crypto_ticker'] = crypto.crypto_ticker\n\t\t\t# Handle error when there is no data recieved for an incorrect ticker.\n\t\t\tif len(crypto_price_data_dict[crypto.crypto_ticker]) == 0:\n\t\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = [{'topOfBookData':[{'lastPrice':'No Data'}]}]\n\t\n\t\t# Set session variables for meta and price data to be used throughout site.\n\t\trequest.session['meta_data'] = stock_metadata_dict\n\t\trequest.session['price_data'] = stock_price_data_dict\n\n\t\trequest.session['crypto_meta_data'] = crypto_metadata_dict\n\t\trequest.session['crypto_price_data_dict'] = crypto_price_data_dict\n\t\n\tcontext = {\n\t\t'form' : form,\n\t\t'crypto_form' : crypto_form,\n\t}\n\n\treturn render(request, 'index.html', context)", "def sell():\n if request.method == \"POST\":\n\n #test for selection of stocks\n if request.form.get(\"symbol\") == \"\" or request.form.get(\"shares\") == \"\":\n return apology(\"Please fill in all fields\")\n\n #test for positive integer\n if str.isdigit(request.form.get(\"shares\")) == False:\n return apology(\"Please select a positive number of shares\")\n\n # does the user have enough shares of that stock\n user_stock = request.form.get(\"symbol\")\n user_number = int(request.form.get(\"shares\"))\n owned = db.execute(\"SELECT SUM(number) FROM portfolio WHERE userid=:id AND stock=:stock\", stock = user_stock, id=session[\"user_id\"])\n owned = int(owned[0]['SUM(number)'])\n if user_number > owned:\n return apology(\"You don't have enough shares\")\n\n #in the portfolio table, add a negative to the number field of the purchased stock\n #in the cash table, lookup the current price and add the cash to the user's cash balanace\n else:\n pay = lookup(request.form.get(\"symbol\"))\n user_number = int(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash=cash+:total WHERE id=:userid\", total=(pay['price'] * user_number), userid=session[\"user_id\"])\n\n user_number = int(request.form.get(\"shares\")) * -1\n db.execute(\"INSERT INTO portfolio (stock, number, price, trans_price, userid) VALUES (:stock, :number, :price, :trans_price, :userid)\", stock=user_stock, number=user_number, price=(pay['price'] * user_number), trans_price=usd(pay['price']), userid=session[\"user_id\"])\n\n user_id=session[\"user_id\"]\n return redirect(url_for('index'))\n\n if request.method == \"GET\":\n #get stocks from portfolio and return to html form\n stocks = db.execute(\"SELECT stock FROM portfolio WHERE userid=:id GROUP BY stock\", id=session[\"user_id\"])\n return render_template(\"sell.html\", stocks=stocks)", "def plot_stock_prices(self, ins_id):\n # creating api-object\n # using api-object to get stock prices from API\n stock_prices = self._borsdata_api.get_instrument_stock_prices(ins_id)\n # calculating/creating a new column named 'sma50' in the table and\n # assigning the 50 day rolling mean to it\n stock_prices['sma50'] = stock_prices['close'].rolling(window=50).mean()\n # filtering out data after 2015 for plot\n filtered_data = stock_prices[stock_prices.index > dt.datetime(2015, 1, 1)]\n # plotting 'close' (with 'date' as index)\n plt.plot(filtered_data['close'], color='blue', label='close')\n # plotting 'sma50' (with 'date' as index)\n plt.plot(filtered_data['sma50'], color='black', label='sma50')\n # show legend\n plt.legend()\n # show plot\n plt.show()", "def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res", "def index():\n\n # Create lists containing values for the table\n symbols = []\n names = []\n shares = []\n totals = []\n prices = []\n\n # Query database for the current amount of cash and stocks\n cash = db.execute(\"SELECT cash FROM users WHERE id = :username\", username=session[\"user_id\"] )[0][\"cash\"]\n stocks = db.execute(\"SELECT * FROM summary WHERE id = :username\", username=session[\"user_id\"] )\n grand = cash\n\n # Append to the lists from the database\n for item in stocks:\n symbol = item[\"symbol\"]\n symbols.append(symbol)\n names.append(lookup(symbol)[\"name\"])\n share = db.execute(\"SELECT shares FROM summary WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=symbol)[0][\"shares\"]\n shares.append(share)\n prices.append(lookup(symbol)[\"price\"])\n total = int(share) * lookup(symbol)[\"price\"]\n totals.append(total)\n grand += total\n\n # Obtain list length\n length = len(symbols)\n\n # Direct users to the index page\n return render_template(\"index.html\", symbols = symbols, length = length, cash=cash, names = names, shares = shares, totals = totals, prices = prices, grand = grand)", "def index(request):\n # Get biggest movers\n stock_mover = top_movers()\n\n # Get latest data\n stock_mover_quotes = {}\n for stock in stock_mover:\n all_of_quote = get_current_quote(stock.ticker)\n # Get jUut the fields you need from the result\n stock_mover_quotes[stock.ticker] = {\n k: all_of_quote.get(k, None) for k in ('Symbol', 'Name', 'Bid', 'Change', 'PercentChange')}\n\n # XXX messages should be a list of messages of the biggest movers\n messages = list(Message.objects.filter(source=\"twitter\"))[:33]\n messages += list(Message.objects.filter(source=\"stocktwits\"))[:33]\n messages += list(Message.objects.filter(source=\"reddit\"))[:33]\n random.shuffle(messages)\n\n return render(\n request,\n 'index.html',\n {\"streamer\": messages, \"stock_list\": stock_mover_quotes.values()}\n )", "def layout(symbols):\n periods = [\n ('1 day', 0),\n ('1 week', 1),\n ('1 month', 2),\n ('3 months', 3),\n ('1 year', 4),\n ('5 years', 5)\n ]\n return Div([\n H3('Stock prices'),\n Div([\n Div([_symbol_selector_dropdown(symbols)],\n style={\n 'width': '45%',\n 'float': 'left',\n 'display': 'inline-block'\n }),\n Div([_period_selector_radio(periods)],\n style={\n 'width': '45%',\n 'float': 'right',\n 'display': 'inline-block'\n })\n ], style={'display': 'inline-block', 'width': '100%'}),\n Graph(\n id='plot-stock',\n config={'displayModeBar': False}\n )\n ])", "def index():\n\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n stocks = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n print(\"Stocks= \", stocks)\n\n holdings = 0\n for stock in stocks:\n print(stock[\"stock_code\"])\n stockDetail = lookup(stock[\"stock_code\"])\n print(\"StockDetail: \", stockDetail)\n stock_name = stockDetail[\"name\"]\n print(\"Stock Name: \", stock_name)\n\n if stockDetail == None:\n return apology(\"Not able to determine stock value\", 403)\n\n else:\n stockPrice = stockDetail[\"price\"]\n print(\"price of stock\", stockPrice)\n stock_name = stockDetail[\"name\"]\n # total value of each stock the user owns\n stock_value = stock[\"stock_quantity\"] * stockPrice\n holdings = holdings + stock_value\n stock[\"stock_name\"] = stock_name\n stock[\"stock_price\"] = usd(stockPrice)\n stock[\"stock_value\"] = usd(stock_value)\n print(\"Total value of each stock: \", stock_value)\n\n return render_template(\"index.html\", stocks=stocks,cash=usd(cash),total=usd(holdings+cash))", "def get_portfolio_object(self):\n return self.__get_portfolio_object(self.portfolio_name, self.portfolio_user)", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)", "def sell():\n rows = db.execute(\"SELECT stock_id, shares, stocks.symbol FROM portfolio JOIN stocks ON portfolio.stock_id = stocks.id WHERE user_id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"sell.html\", rows=rows)\n else:\n symbol = request.form.get(\"symbol\")\n if symbol==\"None\":\n return apology(\"You must select a symbol\")\n # shares sold will be stored in history table with negative value\n shares = int(request.form.get(\"shares\"))*(-1)\n if abs(shares) > rows[0][\"shares\"]:\n return apology(\"You don't own enough shares\")\n # run lookup function\n dict_4 = lookup(symbol)\n price = dict_4[\"price\"]\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], price=price, shares=shares, buy=0)\n # UPDATE shares in 'portfolio' table\n new_shares = (rows[0][\"shares\"])+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], shares=new_shares)\n # Update cash in 'users' table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"]-(price*shares)\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user sells stock\n flash('Sold!')\n return redirect(\"/\")", "def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)", "def sell():\n\n if request.method == \"GET\":\n\n #Query for all the stocks in posession.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", ports=ports)\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n #Query for the total quantity of that stock in posession\n get_quantity = db.execute(\"SELECT quantity FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n #Convert the quantity dict to int\n get_quantity_int = int(get_quantity[0]['quantity'])\n\n #Check if the user input a positive number.\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n #Lookup the stock symbol data (price, symbol, company name)\n if shares > get_quantity_int:\n return apology (\"Selling more than you own?\", 400)\n stock = lookup(symbol)\n\n stock_price = stock['price']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares sold (One company stock only)\n total = stock_price * float(shares)\n\n #Update the total amount of cash in hand by adding the sold stocks.\n db.execute(\"UPDATE users SET cash = cash + :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n #Check if the total quantity of shares is equal to the quantity the user is trying to sell.\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(shares) * -1, float(stock_price), date_time, session[\"user_id\"] )\n\n #If it's equal then delete the stock in the portfolio. #Else, Update the quantity of that stock in the portfolio.\n if shares == get_quantity_int:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n flash('You successfully sold the stock!')\n else:\n db.execute(\"UPDATE portfolio SET quantity = quantity - :shares, total = total -:total WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, shares=shares, total=total)\n flash('You successfully sold the stock!')\n return redirect (url_for('index'))", "def _fetch_stock_page(*markets) -> bs4.BeautifulSoup:\n\n if len(markets) == 0:\n raise ValueError('No markets given')\n\n params = {\n 'Exchange' : 'NMF',\n 'SubSystem': 'Prices',\n 'Action' : 'GetMarket',\n 'app' : '/osakkeet',\n 'Market' : ','.join([x.value for x in markets]),\n # 'ext_xslt': '/nordicV3/inst_table_shares.xsl'\n }\n\n r = requests.get(_API_URL, params)\n response_text = r.text\n soup = bs4.BeautifulSoup(response_text, 'lxml')\n\n return soup", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def get_stock_data_frame(time, stock):\n\n print(\"Getting\", time, \"stock data for\", stock)\n url = 'https://api.iextrading.com/1.0/stock/'+stock+'/chart/'+time\n req = requests.get(url)\n print(url)\n\n print(\"Parsing data.\")\n rjson = req.text\n\n rdata = json.loads(rjson)\n\n dates = []\n openprices = []\n highprices = []\n lowprices = []\n closeprices = []\n volumes = []\n\n for i in rdata:\n date = i['date']\n dates.append(date)\n openprices.append(float(i['open']))\n highprices.append(float(i['high']))\n lowprices.append(float(i['low']))\n closeprices.append(float(i['close']))\n volumes.append(float(i['volume']))\n\n index = pd.DatetimeIndex(dates, dtype='datetime64[ns]')\n _open = pd.Series(openprices, index=index)\n high = pd.Series(highprices, index=index)\n low = pd.Series(lowprices, index=index)\n close = pd.Series(closeprices, index=index)\n data_frame_data = {'Open' : _open, 'High' : high, 'Low' : low, 'Close' : close}\n\n return pd.DataFrame(data_frame_data)", "def stocks_history(request):\n\n symbol = request.args.get('symbol')\n\n if symbol is None:\n return jsonify([])\n\n client = bigquery.Client()\n qry = client.query(\"\"\"\n SELECT \n date,\n adj_close,\n symbol,\n sma_20,\n std_20,\n sma_50,\n sma_200,\n bb_perc_20\n FROM `ticker-224822.ticker_test_120718.analytics_view`\n where \n symbol = '{symbol}'\n and extract(year from date) >= 2010\n \"\"\".format(symbol=symbol))\n\n results = qry.result()\n results = [dict(row.items()) for row in results]\n resp = custom_jsonify(results)\n resp.headers.add('Access-Control-Allow-Origin', '*')\n resp.headers.add('Access-Control-Allow-Methods', 'GET')\n return resp" ]
[ "0.7349525", "0.7257509", "0.7092902", "0.70840114", "0.70700926", "0.70322937", "0.70166755", "0.6955458", "0.69515324", "0.6818638", "0.67683095", "0.67541313", "0.6739686", "0.66745", "0.66665316", "0.6584005", "0.65794265", "0.6577333", "0.6556571", "0.65012944", "0.64815927", "0.64735025", "0.6465535", "0.6312532", "0.63112044", "0.62762743", "0.62346", "0.62303627", "0.6182704", "0.61450326", "0.6122251", "0.611445", "0.61025023", "0.6082663", "0.607333", "0.606219", "0.60384244", "0.6035732", "0.60054773", "0.5978382", "0.59634656", "0.5923448", "0.591538", "0.5908193", "0.58903754", "0.58614", "0.58470833", "0.582957", "0.58252543", "0.5823499", "0.58229727", "0.58187795", "0.5812596", "0.58111256", "0.5809224", "0.5803079", "0.5794168", "0.5789423", "0.5779193", "0.5767472", "0.5761658", "0.5754713", "0.5751872", "0.5741388", "0.5727221", "0.5722986", "0.57177275", "0.571762", "0.57147825", "0.57071006", "0.5668939", "0.5649701", "0.56434345", "0.5641363", "0.5619027", "0.5619027", "0.5618465", "0.56122184", "0.5611063", "0.55942863", "0.55919564", "0.5583445", "0.5568468", "0.556346", "0.555581", "0.55501395", "0.55491537", "0.55472136", "0.55404145", "0.55395675", "0.5537525", "0.5537442", "0.55305004", "0.5529658", "0.5527494", "0.552352", "0.5509948", "0.5504186", "0.55028063", "0.54978365", "0.5485333" ]
0.0
-1
Buy shares of stock
def buy(): if request.method == "POST": if not request.form.get("symbol") or not lookup(request.form.get("symbol")): return apology("must provide valid symbol",400) if not request.form.get("shares") or int(request.form.get("shares")) <= 0: return apology("shares must be positive integer!",400) row=db.execute("SELECT * FROM users WHERE id=:s",s=session["user_id"]) dict=lookup(request.form.get("symbol")) cost=dict["price"]* int(request.form.get("shares")) if row[0]["cash"]>cost: db.execute("INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)",s=dict["symbol"],sh=int(request.form.get("shares")),p=dict["price"],t=time.asctime( time.localtime(time.time())),u_i=session["user_id"],status='bought') row[0]["cash"]=row[0]["cash"]-cost db.execute("UPDATE users SET cash = :cash WHERE id=:s",cash=row[0]["cash"],s=session["user_id"]) exist=db.execute("SELECT * FROM portofolio WHERE symbol=:s AND user_id=:u_i",s=dict["symbol"],u_i=session["user_id"]) if len(exist) == 0 : db.execute("INSERT INTO portofolio(symbol,name,shares,price,total,user_id) VALUES (:s,:n,:sh,:p,:t,:u_i)",s=dict["symbol"],n=dict["name"],sh=int(request.form.get("shares")),p=dict["price"],t=cost,u_i=session["user_id"]) else: db.execute("UPDATE portofolio SET shares =shares+:sh, price=:p, total=total+:t WHERE symbol=:s AND user_id=:u_i",sh=int(request.form.get("shares")),p=dict["price"],t=dict["price"] * int(request.form.get("shares")),s=dict["symbol"],u_i=session["user_id"]) else: return apology("Can't afford!",400) return redirect("/") else: return render_template("buy.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def buy(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot buy less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\ttry:\n\t\t\tbal = await bank.withdraw_credits(ctx.author, shares * price)\n\t\texcept ValueError:\n\t\t\tbal = await bank.get_balance(ctx.author)\n\t\t\tawait ctx.send(\n\t\t\t\tf'You cannot afford {shares} share{plural} of {name}. '\n\t\t\t\tf'It would cost {price * shares} {currency} ({price} {currency} each). '\n\t\t\t\tf'You only have {bal} {currency}.'\n\t\t\t)\n\t\t\treturn\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name in user_stocks:\n\t\t\t\tuser_stocks[name]['count'] += shares\n\t\t\telse:\n\t\t\t\tuser_stocks[name] = {'count': shares, 'total_count': stock_data[name]['total_count']}\n\t\tawait ctx.send(\n\t\t\tf'You purchased {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def buy(self, stock, amount):\n self.orders[stock] += amount", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n \n # Creates dict\n symbol_info = lookup(request.form.get(\"symbol\"))\n \n # Checks that symbol exists\n if symbol_info == None:\n return apology(\"Invalid Symbol\", 403)\n \n # Ensure number of shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 403)\n \n # Ensure shares is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough money to buy share\n user_money = db.execute(\"SELECT cash FROM users WHERE id=:userid\", userid=session[\"user_id\"])\n cash = float(user_money[0][\"cash\"])\n if cash < float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")):\n return apology(\"Not enough money\", 403)\n \n # Update user\n updated_money = cash - (float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")))\n db.execute(\"UPDATE users SET cash = :updated WHERE id=:usid\", updated=updated_money, usid=session[\"user_id\"])\n \n # Update shares table\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id = :usid\", usid=session[\"user_id\"])\n exist = 0\n for i in range(len(symbol_dicts)):\n if symbol_dicts[i][\"share\"].upper() == request.form.get(\"symbol\").upper():\n exist = 1\n break\n \n if exist == 0:\n db.execute(\"INSERT INTO shares (user_id, share, share_count) VALUES (:usid, :symbol, :count)\", usid=session[\"user_id\"], symbol=request.form.get(\"symbol\").upper(), count=int(request.form.get(\"shares\")))\n else:\n db.execute(\"UPDATE shares SET share_count = share_count + :count WHERE share = :symbol AND user_id = :usid\", count=int(request.form.get(\"shares\")), symbol=request.form.get(\"symbol\").upper(), usid=session[\"user_id\"])\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol_info[\"symbol\"], shares=request.form.get(\"shares\"), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(symbol_info[\"price\"]))\n \n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure that user requests for correct number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # Retrieve the cash a user has\n dollars = db.execute(\"SELECT cash FROM users WHERE id = :id\", \\\n id=session[\"user_id\"])\n\n # check if enough cash to buy\n if not dollars or float(dollars[0][\"cash\"]) < stock[\"price\"] * shares:\n return apology(\"You cannot buy shares! Please add more cash\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n\n # update history of shares bought\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltrans) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method = \"Buy\", times= date_time, totaltrans = shares * stock[\"price\"] )\n\n # update user cash\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # Select user shares of that symbol\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # if user doesn't has shares of that symbol, create new stock object\n if not user_shares:\n db.execute(\"INSERT INTO portfolio (id, name, shares, symbol, price, total) \\\n VALUES(:id, :name, :shares, :symbol, :price, :total)\", \\\n id=session[\"user_id\"] , name=stock[\"name\"], \\\n shares=shares, symbol=stock[\"symbol\"], price=usd(stock[\"price\"]), \\\n total=usd(shares * stock[\"price\"]))\n\n # Else increment the shares count\n else:\n shares_total = user_shares[0][\"shares\"] + shares\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def buy():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 400)\n \n # proceed buy function\n buy_result: Tuple[float, str] = buy_share(db, user_id, symbol, qty )\n if buy_result[0] == -1:\n return apology(buy_result[1], 400)\n\n return redirect(\"/\", 200)", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n # handle fractional, negative, and non-numeric share number\n if not symbol or lookup(symbol) == None:\n return apology(\"must provide valid symbol and share number\", 400)\n elif shares.isdigit() == False or int(shares) <= 0:\n return apology(\"must provide valid share number\", 400)\n\n # calculate total price for the buy request\n curr_price = lookup(symbol)[\"price\"]\n total_price = curr_price * int(shares)\n\n # db.execute returns list of dicts (one dict, actually), where key == \"cash\" and value - cash left in user's account\n cash_left = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n\n #ensure user has enough money to buy the shares\n if total_price > cash_left:\n return apology(\"not enough cash left\")\n\n # add stock to the users portfolio\n db.execute(\"INSERT INTO portfolio (id, Symbol, Company, Shares, Price, Total) VALUES(:id, :Symbol, :Company, :Shares, :Price, :Total)\",\n id=session[\"user_id\"], Symbol=symbol.upper(), Company=lookup(symbol)[\"name\"],\n Shares=shares, Price=curr_price, Total=total_price)\n\n # update cash\n db.execute('UPDATE users SET cash = :cash WHERE id = :id', cash=cash_left - total_price, id=session[\"user_id\"])\n\n flash(\"Bought!\")\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def marketBuy(self, currency_pair, amount):\n # calcular o rate num 'for'\n asks = self.rOrderBook(currency_pair=currency_pair, field='asks')\n list_resp = []\n for ask in asks:\n if ask[1] < amount:\n bought = self.limitBuy(currency_pair, rate=ask[0], amount=ask[1], ioc=True)\n list_resp.append(bought)\n amount -= ask[1]\n elif ask[1] >= amount:\n bought = self.limitBuy(currency_pair, rate=ask[0], amount=amount, ioc=True)\n list_resp.append(bought)\n amount -= amount\n break\n return list_resp", "async def buy(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Buy(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to buy\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to buy\", 400)\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n cost = num_shares * company_quote[\"price\"]\n if balance < cost:\n return apology(\"Insufficient cash\", 400)\n else:\n new_balance = balance - cost\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1: #if nothing is returned i.e id and symbol combination does not already exist, insert it\n return_val = db.execute(\"INSERT INTO totalshares (id, symbol, numshares, totalvalue) VALUES (:id, :symbol, :numshares, :totalvalue)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], numshares=num_shares, totalvalue=cost)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n else: #if id, symbol combination exists already, update numshares and totalvalue\n new_numshares = rows[0][\"numshares\"] + num_shares\n new_totalvalue = rows[0][\"totalvalue\"] + cost\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n\n else:\n return render_template(\"buy.html\")", "def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)", "def sell():\n if request.method == \"POST\":\n # Ensure data is inputted\n if not request.form.get(\"symbol\"):\n return apology(\"Insert symbol\", 403)\n \n if not request.form.get(\"shares\"):\n return apology(\"Insert number of shares to sell\", 403)\n \n # Ensure shares value is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough shares to sell \n share_count_dict = db.execute(\"SELECT share_count FROM shares WHERE user_id=:usid AND share=:share\", usid=session[\"user_id\"], share=request.form.get(\"symbol\").upper())\n share_count = int(share_count_dict[0][\"share_count\"])\n \n if int(request.form.get(\"shares\")) > share_count:\n return apology(\"You don't own enough shares\", 403)\n \n # Create variables\n symbol = request.form.get(\"symbol\").upper()\n quantity = int(request.form.get(\"shares\"))\n \n # Add cash to user data\n new_cash = float(lookup(symbol)[\"price\"]) * quantity\n db.execute(\"UPDATE users SET cash= cash + :cash WHERE id=:usid\", cash=new_cash, usid=session[\"user_id\"]) \n \n # Remove shares of user data\n db.execute(\"UPDATE shares SET share_count = share_count - :shares WHERE user_id=:usid AND share = :share\", shares=quantity,share=symbol, usid=session[\"user_id\"])\n db.execute(\"DELETE FROM shares WHERE user_id=:usid AND share_count = :shares\", usid=session[\"user_id\"], shares=0)\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol, shares='-' + str(quantity), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(lookup(symbol)[\"price\"]))\n \n return redirect(\"/\")\n \n else:\n # Create list with purchased symbols\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id=:usid\", usid=session[\"user_id\"])\n symbol_list = [None] * len(symbol_dicts)\n \n # Insert symbols into list\n for i in range(len(symbol_dicts)):\n symbol_list[i] = symbol_dicts[i][\"share\"]\n \n return render_template(\"sell.html\", longitude=len(symbol_dicts), symbols=symbol_list)", "def buy():\n if request.method == \"POST\":\n\n if not request.form.get(\"symbol\"):\n return apology(\"Missing symbol\")\n\n elif not request.form.get(\"shares\"):\n return apology(\"Missing shares\")\n # Проверка поля внутри формы, число или нет.\n elif not request.form.get(\"shares\").isdigit():\n return apology(\"Please chose integer\")\n # проверка числа на позитивность.\n elif int(request.form.get(\"shares\")) < 1:\n return apology(\"number of stocks is less zero\", 400)\n\n # проверка цены по символу\n symbol = request.form.get(\"symbol\")\n quote = lookup(symbol)\n # Проверка на валидность символа\n if quote == None :\n return apology(\"The stock does not exist\", 400)\n # Сохраняем цену данного символа в переменную\n price = quote[\"price\"]\n # Вибираем кеш пользователя из базы данных.\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n # цену символа умножаем на число пользователя, если оно больше чем бюджет,вернуть apology\n if float(price) * int(request.form.get(\"shares\")) > float(cash[0][\"cash\"]):\n return apology(\"You don't have enough cash\", 400)\n #\n else:\n # обновляем кеш\n rows3 = db.execute(\"UPDATE users SET cash =:update_cash WHERE id=:id\", update_cash = float(cash[0][\"cash\"]) - (float(price)*int(request.form.get(\"shares\"))), id=session[\"user_id\"])\n # Вибираем в портфеле все символы, для проверки на наличие shares (кол-во) акций\n rows2 = db.execute(\"SELECT * FROM portfolio WHERE id=:id AND symbol=:symbol\",id=session[\"user_id\"], symbol=symbol )\n # Если нету shares в определенном символе,тогда добавить.\n if len(rows2) == 0:\n db.execute(\"INSERT INTO partfolio ( id, symbol, shares) VALUES (:id, :symbol, :shares)\",id=session[\"user_id\"] )\n else:\n #Если есть уже кол-во акций,тогда обновить старое кол-во на новое кол-во.\n db.execute(\"UPDATE partfolio SET shares= shares + :shares\",shares = shares)\n\n\n else:\n\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n\n sym = request.form.get(\"symbol\").upper()\n shares = request.form.get(\"shares\")\n row = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n\n if not sym:\n return apology(\"must provide stock's symbol\")\n\n if not shares:\n return apology(\"must provide desired shares\")\n\n query = lookup(sym)\n if not query:\n return apology(\"lookup failed, try again later\")\n\n price = query['price']\n name = query['name']\n cash = row[0]['cash']\n user = row[0]['username']\n\n if cash < price * int(shares):\n return apology(\"Cannot afford \" + shares + \" shares of \" + sym)\n else:\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=sym, name=name, price=price, shares=int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash-price*int(shares), id=session['user_id'])\n return redirect(url_for(\"index\"))\n else:\n return render_template(\"buy.html\")", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def buy():\n if request.method == \"POST\":\n quote = lookup(request.form.get(\"symbol\"))\n if quote is None:\n return apology(\"Please enter a valid symbol\")\n shares = int(request.form.get(\"Shares\"))\n if shares < 0:\n return apology(\"Please enter a positive value\")\n shares = request.form.get(\"Shares\")\n symbol = request.form.get(\"symbol\")\n rows = db.execute(\"SELECT * FROM cash\")\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n\n if request.form.get(\"id\") not in rows:\n db.execute(\"INSERT INTO cash (id, symbol, name, shares, cash) VALUES(:id, :symbol, :name, :shares, :cash)\", id=session[\"user_id\"], symbol=symbol, name = quote[\"name\"], shares=shares, cash=10000)\n\n else:\n for row in cash:\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n if row[\"Symbol\"] == symbol:\n db.execute(\"UPDATE cash SET shares=:shares WHERE Symbol=:Symbol\", shares=cash[row][\"shares\"]+int(shares), Symbol=symbol)\n else:\n db.execute(\"INSERT INTO cash (symbol, name, shares) VALUES(:symbol, :name, :shares)\", symbol=symbol, name = quote[\"name\"], shares=shares)\n\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n\n current_cash = cash[0][\"Cash\"] - (int(shares)*int(quote[\"price\"]))\n\n if current_cash > 0:\n db.execute(\"UPDATE cash SET cash = :cash WHERE symbol=:symbol\", cash=current_cash, symbol=symbol)\n flash(\"Bought!\")\n else:\n return apology(\"Not enough cash\", 403)\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def buy():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n quote = lookup(symbol)\n \n if not quote:\n return apology(\"Invalid Symbol\")\n \n price = usd(quote[\"price\"])\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM users WHERE id= :id\", id=session[\"user_id\"])\n cash = row[0]['cash']\n \n total = shares * quote['price']\n \n if cash - total < 0:\n return apology('cannot afford')\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=(cash-total), id=session['user_id'])\n \n username = row[0]['username']\n \n #current_time = time.strftime(\"%H:%M:%S %m/%d/%Y\")\n current_time = time.asctime( time.localtime(time.time()) )\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n \n if result:\n old_shares = result[0]['shares']\n new_shares = old_shares + shares\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n else:\n db.execute(\"INSERT INTO portfolio (username, symbol, shares) VALUES (:username, :symbol, :shares)\", username=username,symbol=symbol,shares=shares)\n \n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n stock = lookup(symbol)\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n if symbol == None or stock == None:\n return apology(\"The stock symbol you searched for doesn't exist.\")\n if not shares.isdigit():\n return apology(\"You have not entered a valid quantity of shares to buy.\")\n shares = int(shares)\n if shares < 1:\n return apology(\"You have entered an incorrect value for stock 'quantity'\")\n if (stock[\"price\"] * shares) > cash:\n return apology(\"You don't have enough cash to buy those stocks\")\n cost = round(shares*stock[\"price\"]*(-1), 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":stock[\"symbol\"],\"va\":stock[\"price\"],\"qu\":shares,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\",{\"cash\":float(cash + cost),\"userid\":session[\"user_id\"]})\n inv = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":stock[\"symbol\"]})\n if not inv:\n db.execute(\"INSERT INTO inventory (symbol,quantity,userid) VALUES(:sy,:qu,:uid)\",\n {\"sy\":stock[\"symbol\"],\"qu\":shares,\"uid\":session[\"user_id\"]})\n else:\n quan = (shares + inv[0][\"quantity\"])\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":quan,\"uid\":session[\"user_id\"],\"sy\":stock[\"symbol\"]})\n flash(\"Purchase completed successfully!\")\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def buy():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n if available_money < total_price:\n return apology(\"no money bro\", 400)\n\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"B\")\n updating = db.execute(\"UPDATE users SET cash = cash - :upd_price WHERE id = :current_id\",\n upd_price=total_price, current_id=session[\"user_id\"])\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"buy_result.html\",\n shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(available_money_upd))\n else:\n return render_template(\"buy.html\")", "def buyshares():\n # Initialise buy form\n buyform = BuyShareForm()\n # Validate and process form data\n if(buyform.validate_on_submit()):\n # Buys shares\n issuerID = buyform.buysharecode.data\n quantity = buyform.buyquantity.data\n userID = current_user.userID\n # Call buyshare API\n buyshare = gdb.buyshare(userID, issuerID, quantity)\n if(buyshare):\n # Flash with success message\n flash(\"Share purchase successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share purchase unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_required = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n if net_required > net_cash:\n return apology(\"Oops Don't Have enough Cash!!\")\n \n \n #Update Cash\n net_cash = net_cash - net_required\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=request.form.get(\"sharesnumber\"))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"buy.html\")\n \n \n #return apology(\"TODO\")", "def sell():\n if request.method == \"GET\":\n return render_template('sell.html')\n \n if request.method == \"POST\":\n symbol = request.form['symbol']\n shares = request.form['shares']\n stock = lookup(symbol)\n \n if not stock:\n return apology('Invalid symbol')\n \n user_shares = db.execute(\"SELECT shares FROM profile \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n if not user_shares or int(user_shares[0][\"shares\"]) < int(shares):\n return apology(\"Not enough shares\")\n db.execute(\"INSERT INTO history (company, shares, value, id, date) \\\n VALUES(:symbol, :shares, :price, :id, :date)\", \\\n symbol=stock[\"symbol\"], shares=-int(shares), \\\n price=stock[\"price\"], id=session[\"user_id\"], date = str(date.today())) \n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n \n shares_total = user_shares[0][\"shares\"] - int(shares)\n if shares_total == 0:\n db.execute(\"DELETE FROM profile \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n else:\n db.execute(\"UPDATE profile SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n return redirect(url_for(\"index\"))", "def sell():\n userid = session[\"user_id\"]\n if request.method == \"GET\":\n symbol = db.execute(\"SELECT symbol FROM purchase WHERE id=:uid\",uid=userid)\n # print(symbol)\n symbols = []\n for s in symbol:\n temp = s[\"symbol\"]\n symbols.append(temp)\n # print(symbols)\n return render_template(\"sell.html\", symbols=symbols)\n else:\n symbol_entry = request.form.get(\"symbol\")\n shares_entry = int(request.form.get(\"shares\"))\n if not symbol_entry or not shares_entry:\n return apology(\"Please select both symbol and shares\")\n\n data = db.execute(\"SELECT symbol, shares FROM purchase WHERE id=:uid\",uid=userid)\n share_check = 0\n\n for s in data:\n if(s[\"symbol\"] == symbol_entry):\n share_check = s[\"shares\"]\n # print(share_check)\n if shares_entry > share_check:\n return apology(\"You don't have this many shares of this company\")\n\n current_cash = (db.execute(\"SELECT cash FROM users WHERE id=:uid\", uid=userid))[0].get(\"cash\")\n query = lookup(symbol_entry)\n share_price = query[\"price\"]\n sold_price = share_price * shares_entry\n\n db.execute(\"UPDATE users SET cash=:sold WHERE id=:uid\",sold=sold_price+current_cash, uid=userid)\n if shares_entry == share_check:\n db.execute(\"DELETE FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol_entry, uid=userid)\n else:\n db.execute(\"UPDATE purchase SET shares=:shares WHERE symbol=:symbol AND id=:uid\",shares=share_check-shares_entry,symbol=symbol_entry, uid=userid)\n\n nshare = -shares_entry\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol_entry,shares=nshare,price=share_price, time=dt)\n return render_template(\"sell.html\", message=\"Sold!\")\n print(data)", "def buy():\n\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\")\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be a positive integer\", 400)\n\n if not request.form.get(\"shares\"):\n return apology(\"missing shares\")\n\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology(\"shares must be a positive integer\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n if not quote:\n return apology(\"Invalid symbol\")\n\n row = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = int(row[0][\"cash\"])\n\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n\n if cash < amount:\n return apology(\"not enough cash\")\n\n # add transaction to history\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=request.form.get(\"shares\"), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n # update cash remaining in database\n db.execute(\"UPDATE users SET cash = cash - :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n # check if user owns a share of symbol already\n user_shares = db.execute(\"SELECT * FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=quote[\"symbol\"])\n\n # if symbol is new\n if not user_shares:\n db.execute(\"INSERT INTO 'portfolio' ('Symbol','Shares','id','Name','Price') VALUES (:symbol, :shares, :id, :name, :price) \",\n symbol=quote[\"symbol\"], shares=request.form.get(\"shares\"), id=session[\"user_id\"], name=quote[\"name\"], price=quote[\"price\"])\n flash(\"Bought\")\n return redirect(\"/\")\n\n # if user already owns some share of the symbol\n else:\n total_shares = user_shares[0][\"shares\"] + int(request.form.get(\"shares\"))\n db.execute(\"UPDATE portfolio SET shares=:total_shares WHERE id=:id AND symbol=:symbol\",\n total_shares=total_shares, id=session[\"user_id\"], symbol=quote[\"symbol\"])\n flash(\"Bought\")\n return redirect(\"/\")\n\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n\n # get share symbol from form\n symb = request.form.get(\"symbol\")\n # check if there is text and that it is a symbol\n if symb is None:\n return apology(\"Invalid symbol\", 400)\n else:\n # retrieve stock price, symbol and stock name via lookup function\n quote = lookup(request.form.get(\"symbol\"))\n\n # retrieve number of shares wanted as an int\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n # return apology if not int\n return apology(\"Invalid amount\", 400)\n\n # if stock does not exist or is blank or if there is no quantity then apologise\n if quote is not None and shares > 0 :\n # get current user's cash. query session dict for current user logged in\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = cash[0][\"cash\"]\n\n # check that user has enough cash to purchase shares\n if cash > shares * quote[\"price\"]:\n # insert transaction into portfolio table if user has enough cash\n db.execute(\"INSERT INTO portfolio (name, userid, price, quantity) VALUES (:name, :userid, :price, :quantity)\",name=quote[\"symbol\"],userid=session[\"user_id\"], price=quote[\"price\"], quantity=shares)\n # update user's cash in the users table\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash-shares*quote[\"price\"], id=session[\"user_id\"])\n # return user to index summary page after purchase\n return redirect(\"/\")\n else:\n flash(\"Not enough cash!\")\n return redirect(\"/buy\")\n else:\n return apology(\"Stock does not exist or quantity not given\", 400)\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")\n\n return apology(\"Buy failed\", 400)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n if float(money[0][\"cash\"]) < quote[\"price\"] * shares:\n return apology(\"Not enough money\")\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n\n if not findshares:\n db.execute(\"INSERT INTO purchases (username, shares, price, total, ticker, user_id) VALUES(:username, :shares, :price, :total, :ticker, :id)\", username=quote[\"name\"], shares=shares, price=usd(quote[\"price\"]), total=usd(shares * quote[\"price\"]), ticker=quote[\"symbol\"], id=session[\"user_id\"])\n else:\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) + int(shares))\n return redirect(url_for(\"index\"))", "def buy(self,\n currency_pair,\n rate,\n amount):\n pass", "def buy():\n\n if request.method == 'GET':\n return render_template('buy.html')\n\n elif request.method == 'POST':\n try:\n shares = int(request.form.get('shares'))\n except:\n return apology('Quantidade de ações não inteira')\n\n if shares < 0:\n return apology('Quantidade de ações não positiva')\n elif not lookup(request.form.get('symbol')):\n return apology('Código de ação inválido')\n\n stock_symbol = request.form.get('symbol')\n price = lookup(stock_symbol)['price']\n total_purchase_cost = round((price * shares), 2)\n user_id = session.get('user_id')\n user_money = db.execute('SELECT dinheiro FROM users WHERE id = ?', user_id)[0]['dinheiro']\n\n if total_purchase_cost > user_money:\n return apology(\"Dinheiro insuficiente\")\n\n table_name = f'stocks_user{user_id}'\n db.execute(\"CREATE TABLE IF NOT EXISTS ? (stock_symbol TEXT NOT NULL, shares NUMBER NOT NULL, price NUMBER NOT NULL, time TEXT NOT NULL)\", table_name)\n db.execute(\"INSERT INTO ? (stock_symbol, shares, price, time) VALUES(?, ?, ?, ?)\", table_name, stock_symbol, shares, price, time_date())\n db.execute(\"UPDATE users SET dinheiro = ? WHERE id = ?\", (user_money - total_purchase_cost), user_id)\n\n return redirect('/')", "async def sell(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot sell less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name not in user_stocks:\n\t\t\t\tawait ctx.send(f'You do not have any shares of {name}.')\n\t\t\t\treturn\n\t\t\tif shares > user_stocks[name]['count']:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\tf'You do not have enough shares of {name}. '\n\t\t\t\t\tf'You only have {user_stocks[name]} share{plural}.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tuser_stocks[name]['count'] -= shares\n\t\t\tif user_stocks[name]['count'] == 0:\n\t\t\t\tdel user_stocks[name]\n\t\tbal = await bank.deposit_credits(ctx.author, shares * price)\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(\n\t\t\tf'You sold {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell():\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to sell\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to sell\", 400)\n\n rows = db.execute(\"SELECT id, symbol, numshares FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1:\n return apology(\"You do not have shares of \" + symbol, 400)\n if num_shares > rows[0][\"numshares\"]:\n return apology(\"You cannot sell more shares than you have\", 400)\n\n sale_value = num_shares * company_quote[\"price\"]\n\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n new_balance = balance + sale_value\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares*-1, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n new_numshares = rows[0][\"numshares\"] - num_shares\n new_totalvalue = rows[0][\"totalvalue\"] - sale_value\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n else:\n rows = db.execute(\"SELECT symbol, numshares FROM totalshares WHERE id = :id\", id=session[\"user_id\"])\n symbol_options = []\n if rows != None and len(rows) > 0:\n for row in rows:\n if row[\"numshares\"] > 0:\n symbol_options.append(row[\"symbol\"])\n return render_template(\"sell.html\", symbol_options=symbol_options)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n stock = lookup(request.form.get(\"symbol\"))\n\n if stock == None:\n return apology(\"Symbol not found. Please re-check the symbol and try again!\")\n\n shares = int(request.form.get(\"shares\"))\n if not shares or int(shares) <= 0:\n return apology(\"Invalid shares. Please re-check and try again!\")\n\n company_name = stock[\"name\"]\n price = float(stock[\"price\"])\n symbol = stock[\"symbol\"]\n userid = session[\"user_id\"]\n available_cash = (db.execute(\"SELECT cash FROM users WHERE id=:id\", id = userid))[0].get(\"cash\")\n total = shares*price\n if total > available_cash:\n return apology(\"Sorry! You do not have sufficient balance\")\n else:\n check = (db.execute(\"SELECT symbol FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol, uid=userid))\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol,shares=shares,price=price, time=dt)\n db.execute(\"UPDATE users SET cash=:cash WHERE id=:uid\", cash=available_cash-shares*price, uid=userid)\n\n # check = (db.execute(\"SELECT symbol FROM history WHERE symbol=:symbol\", symbol=symbol))[0].get(\"symbol\")\n print(len(check))\n if len(check) == 0:\n db.execute(\"INSERT INTO purchase (id, symbol, name, shares) VALUES (:userid, :symbol, :name, :shares)\", userid=userid, symbol=symbol, name=company_name, shares=shares)\n else:\n exshares = int((db.execute(\"SELECT shares FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"shares\"))\n # print(exshares+\" \"+type(exshares))\n extotal = float((db.execute(\"SELECT total FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"total\"))\n db.execute(\"UPDATE purchase SET shares=:newshares WHERE symbol=:symbol AND id=:uid\", newshares=shares+exshares, symbol=symbol, uid=userid)\n return render_template(\"bought.html\", company_name=company_name, shares=shares, symbol=symbol, usd=usd(shares*price), balance=usd(available_cash-shares*price))", "def sell():\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure proper number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # select the symbol shares of that user\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # check if enough shares to sell\n if not user_shares or int(user_shares[0][\"shares\"]) < shares:\n return apology(\"You don't hold enough shares\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n # update history of a sell\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltarns) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=-shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method= \"sell\", times= date_time, totaltrans = shares * stock[\"price\"])\n\n # update user cash (increase)\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # decrement the shares count\n amountshares = user_shares[0][\"shares\"] - shares\n\n # if after decrement is zero, delete shares from portfolio\n if amountshares == 0:\n db.execute(\"DELETE FROM portfolio \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n # otherwise, update portfolio shares count\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=amountshares, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide ticker\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif not request.form.get(\"shares\").isdigit():\n return apology(\"must enter numbers\", 400)\n elif float(request.form.get(\"shares\")) <= 0 or (float(request.form.get(\"shares\")) % 1 != 0):\n return apology(\"number must be integer greater than one\", 400)\n elif not lookup(request.form.get(\"symbol\")):\n return apology(\"couldn't find company\", 400)\n\n currentSymbols = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n\n for symbol in currentSymbols:\n if symbol[\"symbol\"].lower() == request.form.get(\"symbol\").lower():\n return apology(\"you've already bought that stock\", 403)\n\n currentBalance = db.execute(\"SELECT cash from users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n buyingPrice = lookup(request.form.get(\"symbol\"))[\"price\"] * float(request.form.get(\"shares\"))\n if currentBalance < buyingPrice:\n return apology(\"not enough cash\", 403)\n else:\n db.execute(\"UPDATE users SET cash = cash - {0} WHERE id=:userId\".format(buyingPrice), userId=session[\"user_id\"])\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n symbol = lookup(request.form.get(\"symbol\"))[\"symbol\"]\n numberOfShares = float(request.form.get(\"shares\"))\n price = lookup(request.form.get(\"symbol\"))[\"price\"]\n date = datetime.datetime.utcnow()\n db.execute(\"INSERT INTO portfolio (username, symbol, number, price, date) VALUES(:username, :symbol, :number, :price, :date)\",\n username=username, symbol=symbol, number=numberOfShares, price=price, date=date)\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=symbol, buyorsell=1, number=float(request.form.get(\"shares\")),\n price=price, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def sell():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n units_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE id = :current_id AND stock = :stock_code\",\n current_id=session[\"user_id\"], stock_code=request.form.get(\"symbol\"))\n available_units = units_list[0][\"SUM(units)\"]\n\n if available_units < int(request.form.get(\"shares\")):\n return apology(\"no units bro\", 400)\n\n new_cash = available_money + total_price\n\n updating = db.execute(\"UPDATE users SET cash = :upd_cash WHERE id = :current_id\",\n upd_cash=new_cash, current_id=session[\"user_id\"])\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"S\")\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"sell_result.html\", shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(new_cash))\n else:\n available_stocks_info = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_stocks_list = []\n for element in available_stocks_info:\n if element[\"stock\"] not in available_stocks_list:\n available_stocks_list.append(element[\"stock\"])\n\n return render_template(\"sell.html\", available_stocks=available_stocks_list)", "def sell():\n\n if request.method == \"POST\":\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n if not symbol:\n return apology(\"please select a valid symbol\")\n\n target_stock = db.execute(\"SELECT *, sum(shares) FROM transactions WHERE user=:user AND symbol=:symbol\",\n user=user, symbol=symbol)\n print(target_stock)\n if not shares:\n return apology(\"must provide how many shares to sell\")\n\n elif shares > target_stock[0]['sum(shares)'] or shares < 1:\n return apology(\"shares must be more than 0 and less than \" + str(target_stock[0]['shares']))\n\n query = lookup(symbol)\n price = query['price']\n name = query['name']\n cash = entry[0]['cash']\n\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=symbol, name=target_stock[0]['name'], price=price, shares=-int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash+price*shares, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n\n return render_template(\"sell.html\", stocks=owned)", "def buy():\n if request.method == \"POST\":\n\n stock = lookup(request.form.get(\"symbol\"))\n\n # This took a while for check to confirm. First check that user input is digit.\n if not request.form.get(\"shares\").isdigit():\n return apology(\"Inavalid number of shares\")\n numOfShares = request.form.get(\"shares\")\n\n # If request is POST firstly check if anything has been submitted.\n if not request.form.get(\"symbol\"):\n return apology(\"You haven't typed a symbol\")\n # if stock lookup request is None or if the numOfShares is not a number of 1 or higher return apologies.\n if stock is None:\n return apology(\"This doesn't seem to be a valid symbol, try again\")\n # userID and user serparate in case both are required.\n userID = session[\"user_id\"]\n user = db.execute(\"SELECT * FROM users WHERE id = :id\", id=userID)\n #funds is a float and can be multiplied by number of shares\n funds = float(user[0][\"cash\"])\n purchasePrice = stock[\"price\"] * int(numOfShares)\n\n date_time = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n\n if funds < purchasePrice:\n return apology(\"You don't have sufficient funds to make this purchase\")\n else:\n # Take price off total cash for current user.\n db.execute(\"UPDATE users SET cash = cash - :purchasePrice WHERE id = :userID\", purchasePrice=purchasePrice, userID=userID)\n # Insert into transactions table the id, symbol, number of share bought, price per share, the time,date and the BUY order.\n db.execute(\"INSERT INTO transactions (id, symbol, num_shares, price_ps, date_time, buy_or_sell) VALUES (:id, :symbol, :num_shares, :price_ps, :date_time, :buy_or_sell)\",\n id=userID, symbol=stock[\"symbol\"], num_shares=numOfShares, price_ps=stock[\"price\"], date_time=date_time, buy_or_sell=\"BUY\")\n # stockowned allows search of portfolio table for results that have userID and the bought stock.\n stockOwned = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND id=:userID\", symbol=stock[\"symbol\"], userID=userID)\n # If there are nor results (not stockowned) then insert into portfolio\n if not stockOwned:\n db.execute(\"INSERT INTO portfolio (id, symbol, numOwned, pricePerShare, totalValue) VALUES (:userID, :symbol, :numOwned, :pricePerShare, :totalValue)\",\n userID=userID, symbol=stock[\"symbol\"], numOwned=numOfShares, pricePerShare=stock[\"price\"], totalValue=purchasePrice)\n # Other wise update the current results. Had to ensuer numOf Share was floas was sotred as a str. Using indexes of stockowned for values.\n else:\n newNumOwned = stockOwned[0][\"numOwned\"] + float(numOfShares)\n newTotalValue = stockOwned[0][\"totalValue\"] + purchasePrice\n newPPS = \"%.2f\"%(newTotalValue / newNumOwned)\n db.execute(\"UPDATE portfolio SET numOwned = :newNumOwned, totalValue = :newTotalValue, pricePerShare = :newPPS WHERE symbol=:symbol AND id=:userID\",\n newNumOwned=newNumOwned, newTotalValue=newTotalValue, newPPS=newPPS, symbol=stock[\"symbol\"], userID=userID)\n\n return redirect(\"/\")\n\n # If a GET request, return the buy.html template.\n else:\n return render_template(\"buy.html\")", "def buy():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Check if submitted shares string is a positive integer\n if not shares.isdigit() :\n return apology(\"shares is not a number\", 400)\n # Shares is valid\n else:\n shares = int(shares)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Ensure valid symbol was submitted\n if QUOTED is None:\n return apology(\"invalid symbol\", 400)\n\n # Check if user has enough cash to buy shares\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cost = QUOTED[\"price\"] * shares\n if cash < cost:\n return apology(\"can't afford\", 400)\n\n # New amount of cash user has after buying shares\n new_cash_total = cash - cost\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert buy log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], shares, QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # If shares have not been bought before\n if not current_shares:\n db.execute(\"INSERT INTO shares VALUES (?, ?, ?, ?, ?, ?)\",\n user_id, QUOTED[\"symbol\"], QUOTED[\"name\"], shares, QUOTED[\"price\"], QUOTED[\"price\"])\n\n # If shares have been bought before\n else:\n new_shares_total = current_shares[0][\"shares_count\"] + shares\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Bought!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n # Ensure there is proper symbol\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n else:\n quote = lookup(request.form.get(\"symbol\"))\n\n if not quote:\n return apology(\"Please enter a valid stock symbol\")\n\n # Ensure proper number of shares\n try:\n share = int(request.form.get(\"shares\"))\n if share < 0:\n return apology(\"Shares must be positive\")\n except:\n return apology(\"Shares msut be positive integer\")\n # Total Amount the user have to pay\n total_amount = quote[\"price\"] * share\n\n # Taking user's cash in account\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\",id=session[\"user_id\"])\n if float(cash[0][\"cash\"]) >= total_amount:\n # Update history table\n # Update do here bro\n # Update cash of user\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\",id=session[\"user_id\"], purchase=(quote[\"price\"] * float(share)))\n\n # Select the users share of that symbol\n user_share = db.execute(\"SELECT shares FROM portfolio WHERE id=:id\",id=session[\"user_id\"])\n\n # If there is no stock in user's portfolio\n if not user_share:\n db.execute(\"INSERT INTO portfolio(id, name, shares, price, total, symbol) VALUES(:id, :name, :shares, :price, :total, :symbol)\",id=session[\"user_id\"]\n , name=quote[\"name\"], shares=share, price = usd(quote[\"price\"]), total = usd(total_amount), symbol = quote[\"symbol\"])\n #else increment share count\n else:\n total_shares = user_share[0][\"shares\"] + share\n db.execute(\"UPDATE portfolio SET shares = :shares WHERE id = :id AND symbol = :symbol\", shares = total_shares, id = session[\"user_id\"], symbol=quote[\"symbol\"])\n return redirect(\"/\")\n else:\n return apology(\"You Dont have enough cash \", 406)\n # User reach via another route(get)", "def buy():\n\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = int(request.form.get(\"shares\"))\n quote = lookup(symbol)\n userid = session[\"user_id\"]\n\n if quote is None:\n return apology(\"Incorrect symbol, try again\", 400)\n else:\n rows = db.execute(\"SELECT cash FROM users WHERE id = :userid\",\n userid=userid)\n cash = rows[0][\"cash\"]\n price = quote[\"price\"]\n tot = price * shares\n\n if cash < tot:\n return apology(\"you can't afford this stock\")\n else:\n db.execute(\"UPDATE users SET cash = cash - :tot WHERE id = :userid\", tot=tot, userid=userid)\n db.execute(\"\"\"INSERT INTO purchase (userid, symbol, shares, tot)\n VALUES (:userid, :symbol, :shares, :tot)\"\"\", userid=userid,\n symbol=symbol, shares=shares, tot=tot)\n flash(\"Bought!\")\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n # Ensures symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"Must provide symbol\", 400)\n # Ensures shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"Must provide amount of shares\", 400)\n # Ensures what is inputed for shares is numeric\n if not request.form.get(\"shares\").isdigit():\n return apology(\"Must provide a valid amount of shares\", 400)\n\n # Sets quote to the information about symbol inputed by user\n quote = lookup(request.form.get(\"symbol\"))\n # Ensures symbol is a valid symbol that has a quote\n if not quote:\n return apology(\"Symbol invalid\", 400)\n # Cost of stock\n cost = quote[\"price\"]\n # Symbol of stock\n symbol = quote[\"symbol\"]\n # Name of stock\n name = quote[\"name\"]\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # Total amount of money needed to buy the amount and type of stock user has inputed\n total = float(request.form.get(\"shares\")) * cost\n # If user is able to afford the stock(s), update the cash colomn and add info to portfolio table\n if money >= total:\n # Remaining is the amount of cash a user has left after buying the stock\n remaining = money - total\n # Inserts amount remaining into the cash field\n db.execute(\"UPDATE users SET cash = ':remaining' WHERE id=:userid\", remaining=remaining, userid=session[\"user_id\"])\n # Logs stock transaction in portfolio\n db.execute(\"INSERT INTO portfolio (userid, symbol, price, shares, TOTAL, transacted, name) VALUES(:userid, :symbol, :price, :shares, :TOTAL, :transacted, :name)\",\n userid=session[\"user_id\"], symbol=symbol, price=cost, shares=request.form.get(\"shares\"), TOTAL=total, transacted=datetime.datetime.now(), name=name)\n\n # If user cannot afford stock(s), return apology\n else:\n return apology(\"You do not have enough money\", 400)\n\n # Return back to index page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n shares = int(request.form.get(\"shares\"))\n price = round(float(quote[\"price\"]),2)\n if shares < 1:\n return apology(\"must provide a positive integer of shares\")\n \n # compare user's cash amount to calculated cost of shares\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n cost = round(float(shares * price),2)\n \n # return error if not enough cash\n if cost > cash[0][\"cash\"]:\n return apology (\"insufficient funds\")\n \n # if sufficient cash present, update users, portfolio and history tables with new info\n else:\n db.execute(\"UPDATE users SET cash = cash - :cost WHERE id = :id\", cost=cost, id = session[\"user_id\"])\n db.execute(\"UPDATE portfolio SET shares = shares + :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"],symbol=quote[\"symbol\"],shares=shares)\n db.execute(\"INSERT OR IGNORE INTO portfolio (id,symbol,shares) VALUES (:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=quote[\"symbol\"],shares=shares)\n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=shares,price=price)\n \n flash('Bought!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares to sell\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n \n #Check No of Shares\n no_of_shares = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol = :symbol\",id=session[\"user_id\"],symbol =request.form.get(\"symbol\"))\n no_of_shares = int(no_of_shares[0]['shares'])\n if int(request.form.get(\"sharesnumber\")) > no_of_shares:\n return apology(\"Sorry!! Don't Have Enough shares\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_worth = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n \n \n \n #Update Cash\n net_cash = net_cash + net_worth\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=(-1)*int(request.form.get(\"sharesnumber\")))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= -int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"sell.html\")\n #return apology(\"TODO\")", "def sell():\n if request.method == \"POST\":\n dict=lookup(request.form.get(\"symbol\"))\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\") or not lookup(request.form.get(\"symbol\")):\n return apology(\"Must provide valid symbol and positive integer\",400)\n else:\n row=db.execute(\"SELECT *FROM portofolio WHERE symbol=:s AND user_id=:u_i\",s=request.form.get(\"symbol\"),u_i=session[\"user_id\"])\n if len(row) == 0 or int(request.form.get(\"shares\")) > row[0][\"shares\"]:\n return apology(\"you don't have enough shares of this company\",400)\n else:\n db.execute(\"INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)\",s=dict[\"symbol\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=time.asctime( time.localtime(time.time())),u_i=session[\"user_id\"],status='sold')\n db.execute(\"UPDATE portofolio SET shares =shares-:sh, price=:p, total=total-:t WHERE symbol=:s AND user_id=:u_i\",sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=dict[\"price\"] * int(request.form.get(\"shares\")),s=dict[\"symbol\"],u_i=session[\"user_id\"])\n db.execute(\"UPDATE users SET cash=cash+:extra WHERE id=:i\",extra=int(request.form.get(\"shares\")) * dict[\"price\"],i=session[\"user_id\"])\n db.execute(\"DELETE FROM portofolio WHERE shares=0\")\n return redirect(\"/\")\n else:\n rows=db.execute(\"SELECT *FROM portofolio where user_id=:u_i \",u_i=session[\"user_id\"])\n arr=[]\n for row in rows:\n arr.append(row['symbol'])\n return render_template(\"selling.html\",arr=arr)", "def sell():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",userid=userid)\n\n if request.method == \"POST\":\n symbol_sell = request.form.get(\"symbol\")\n shares_sell = float(request.form.get(\"shares\"))\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol_sell)\n if shares_info[0][\"shares_sum\"] < shares_sell:\n return apology(\"You don't have that many shares\", 400)\n else:\n current = lookup(symbol_sell)\n price = current[\"price\"]\n amount = -shares_sell * price\n cash = db.execute(\"SELECT cash FROM users WHERE id =:userid\", userid=userid)\n balance = cash[0][\"cash\"] - amount\n db.execute(\"INSERT INTO purchase (userid, symbol, shares, tot) VALUES(:userid, :symbol, :shares, :tot)\",\n userid=userid, symbol=symbol_sell, shares=-shares_sell, tot=amount)\n db.execute(\"UPDATE users SET cash = :balance WHERE id = :userid\", balance=balance, userid=userid)\n flash(\"SOLD!!\")\n return redirect(\"/\")\n else:\n list_symbol = list()\n for symbol in stocks:\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid = userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0]\n if shares_info[0][\"shares_sum\"]:\n list_symbol.append(symbol[\"symbol\"])\n return render_template(\"sell.html\", list_symbol=list_symbol)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n # run lookup function\n dict_3 = lookup(symbol)\n if not dict_3:\n return apology(\"invalid symbol\")\n else:\n symbol = dict_3[\"symbol\"].upper()\n name = dict_3[\"name\"]\n price = dict_3[\"price\"]\n row_stock = db.execute(\"SELECT id FROM stocks WHERE symbol==:symbol\", symbol=symbol)\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"] - (price*shares)\n if new_cash < 0:\n return apology(\"Not enough cash\")\n # Add new stock symbol to table stocks\n if not row_stock:\n db.execute(\"INSERT INTO stocks(symbol, name) VALUES(:symbol, :name)\", symbol=symbol, name=name )\n # Get id from new inserted stock\n row_stock = db.execute(\"SELECT id FROM stocks WHERE symbol==:symbol\", symbol=symbol)\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], price=price, shares=shares, buy=1)\n # INSERT information in 'portfolio' table\n row_portfolio = db.execute(\"SELECT stock_id FROM portfolio WHERE user_id==:user_id and stock_id=:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"])\n if not row_portfolio:\n db.execute(\"INSERT INTO portfolio(user_id, stock_id, shares) VALUES(:user_id, :stock_id, :shares)\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], shares=shares)\n else:\n # UPDATE shares in 'portfolio' table\n shares_db = db.execute(\"SELECT shares FROM portfolio WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"])\n if shares_db:\n new_shares = shares_db[0][\"shares\"]+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], shares=new_shares)\n # Update cash in 'users' table\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user buys stock\n flash('Bought!')\n return redirect(\"/\")", "async def buy(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n price = self.iex.price(symbol)\r\n cost = quantity * price\r\n if company.balance < cost:\r\n await ctx.send(f\"{company.name}\\nBalance: {company.balance} USD\\nPurchase cost: {cost} USD\")\r\n raise StonksError()\r\n\r\n value = price * quantity\r\n self.iex.buy(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``-{value} {company.name} ⯮ {quantity} {symbol} @ {price}``\")", "def buy():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol as it is used many times\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # query database for cash balance\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n # cost of requested shares\n purchase_cost = volume * stock_info['price']\n\n # if sufficient cash, make purchase, else return apology\n if purchase_cost <= cash:\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if no existing shares, add them\n if not existing:\n new = db.execute(\"INSERT INTO portfolio (id, symbol, num_shares) VALUES(:id, :symbol, :num_shares)\", id=id, symbol=symbol, num_shares=volume)\n\n # if there are existing shares, add new volume to them\n else:\n add = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] + volume, id=id, symbol=symbol)\n\n # set date string\n dstring = time(str(datetime.datetime.utcnow()))\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # reduce cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash-purchase_cost, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))\n else:\n return apology(\"insufficient funds\")", "def sell():\n if request.method == \"POST\":\n bef = db.execute(\"SELECT symbol FROM ind WHERE user_id = ?\", session[\"user_id\"])\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which valid stock to sell\", 403)\n symbol = request.form.get(\"symbol\")\n p = db.execute(\"SELECT COUNT(symbol) FROM ind WHERE user_id = ?\", session[\"user_id\"])\n q = 0\n\n for i in range(int(p[0][\"COUNT(symbol)\"])):\n if symbol == bef[i][\"symbol\"]:\n q = 1\n if q == 0:\n return apology(\"Please specify which valid stock to sell\", 403)\n if not request.form.get(\"shares\"):\n return apology(\"Please specify how many stocks you want to sell\", 403)\n if int(request.form.get(\"shares\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"shares\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) < int(request.form.get(\"shares\")):\n return apology(\"You do not own that many shares\", 403)\n shares = int(request.form.get(\"shares\"))\n db.execute(\"CREATE TABLE IF NOT EXISTS sells (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, shares INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"shares\"))\n money = bro[0][\"cash\"]\n money = money + cost\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"INSERT INTO sells(user_id, symbol, name, price, shares, cost, time) VALUES (:user_id, :symbol, :name, :price, :shares, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], shares = shares, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"SOLD\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = shares, cost = cost, time = datetime.datetime.now())\n\n db.execute(\"UPDATE ind SET nos = ? WHERE symbol = ? AND user_id = ?\", int(hav[0][\"nos\"]) - shares, request.form.get(\"symbol\"), session[\"user_id\"])\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) == 0:\n db.execute(\"DELETE FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n return redirect(\"/\")\n\n else:\n stocks = db.execute(\"SELECT * FROM ind WHERE user_id = ?\", session[\"user_id\"])\n\n return render_template(\"sell.html\", stocks = stocks)", "def sell():\n\n table = db.execute(\"SELECT symbol FROM portfolio WHERE id=:id\", id=session[\"user_id\"])\n symbols = []\n for i in range(len(table)):\n symbols.append(table[i][\"symbol\"])\n\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = int(db.execute(\"SELECT shares FROM portfolio where id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"shares\"])\n\n if owned_shares < int(request.form.get(\"shares\")):\n return apology(\"Too many shares\", 400)\n\n updated_shares = owned_shares - int(request.form.get(\"shares\"))\n\n # update shares in portfolio\n if updated_shares > 0:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE id=:id AND symbol=:symbol\",\n shares=updated_shares, id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n else:\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n # update cash in database\n quote = lookup(request.form.get(\"symbol\"))\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash = cash + :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=0-int(request.form.get(\"shares\")), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n return render_template(\"sell.html\", symbols=symbols)", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def buy_stock(self, stock, amount, date=None):\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n self.order_stock(stock, self.stock_data[stock].position['Position'][date] + amount, date)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = request.form.get('shares')\n\n if not symbol or not shares or symbol == \"Select Stock\":\n return apology(\"Please input a valid symbol and number of shares\")\n elif int(shares) <= 0:\n return apology(\"Please input a positive number for shares\")\n else:\n symbol = symbol.lower()\n shares = int(shares)\n get_cur_shares = db.execute(\n \"SELECT SUM(shares) FROM History WHERE id = :id AND symbol = :symbol GROUP BY symbol\", id=session['user_id'], symbol=symbol)\n try:\n cur_shares = [share['SUM(shares)'] for share in get_cur_shares][0]\n except IndexError:\n return apology(\"Please input a valid number of shares\")\n if shares > cur_shares:\n return apology(\"Sorry, you don't have enough shares to sell\")\n else:\n cur_price = float(lookup(symbol)['price'])\n sell_val = cur_price * float(shares)\n sell_val = float(sell_val)\n get_bal = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n balance = [bal['cash'] for bal in get_bal][0]\n balance = float(balance)\n new_balance = balance + sell_val\n company = lookup(symbol)['name']\n new_database_balance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n cash=new_balance, id=session['user_id'])\n new_database_transaction = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=-shares, price=cur_price,\n totalprice=sell_val, id=session['user_id'], transaction_type=\"SELL\")\n return redirect(\"/\")\n else:\n get_symbols = db.execute(\n \"SELECT symbol FROM History WHERE id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=session['user_id'])\n if not get_symbols:\n return apology(\"Sorry, could not find valid symbol\")\n else:\n symbols = [symbol['symbol'] for symbol in get_symbols]\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"Symbol\")\n if symbol is None:\n return apology(\"Enter a symbol\", 403)\n shares = request.form.get(\"Shares\")\n if int(shares) < 0:\n return apology(\"Please enter postive shares\", 403)\n\n stock = lookup(symbol)\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n for row in rows:\n if row[\"Symbol\"] == symbol:\n if int(shares) > row[\"totalShares\"]:\n return apology(\"Too many shares\")\n\n rows = db.execute(\"SELECT Cash FROM cash WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"Cash\"]\n\n current_cash = cash + int(shares)*stock[\"price\"]\n db.execute(\"UPDATE cash SET Cash=:current_cash WHERE id=:id\", current_cash = current_cash, id=session[\"user_id\"])\n db.execute(\"INSERT INTO cash (id, Symbol, Name, Shares) VALUES (:id, :Symbol, :Name, :Shares)\", id=session[\"user_id\"], Symbol=stock[\"symbol\"], Name=stock[\"name\"], Shares=-1*int(shares))\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n rows = db.execute(\"SELECT Symbol FROM cash WHERE id=:id GROUP BY Symbol HAVING SUM(Shares) > 0\", id=session[\"user_id\"])\n # Shorthand for obtaining the symbol for every row in rows. So would output AAPL e.g.\n return render_template(\"sell.html\", symbols=[ row[\"Symbol\"] for row in rows ])", "def buy():\n\n if request.method == \"POST\":\n numShares = 0\n try:\n numShares = float(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Enter a numerical value!\", 400)\n if numShares % 1 != 0:\n return apology(\"Fractional Shares not allowed!\", 400)\n if numShares <= 0:\n return apology(\"Enter a number greater than 0!\", 400)\n if not request.form.get(\"symbol\"):\n return apology(\"Enter a symbol!\", 400)\n if not request.form.get(\"shares\"):\n return apology(\"Enter a number of shares!\", 400)\n\n company = lookup(request.form.get(\"symbol\"))\n if not company:\n return apology(\"Invalid ticker symbol\", 400)\n price = float(company[\"price\"])\n total = float(price * numShares)\n symbl = company[\"symbol\"]\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n remainingCash = float(userRows[0][\"cash\"])\n if total > remainingCash:\n return apology(\"You cannot afford the stock(s)!\", 400)\n else:\n currentUser = session.get(\"user_id\")\n purchased = db.execute(\"INSERT INTO portfolio (UserID, Symbol, Company, NumberOfShares, UnitPrice, TotalPrice) VALUES(:userid, :symbol, :name, :shares, :unitPrice, :totalPrice)\", userid=currentUser, symbol=symbl, name=company['name'], shares=numShares, unitPrice=price, totalPrice=total)\n\n\n if not purchased:\n return apology(\"Unable to purchase\", 400)\n else:\n remainingCash = remainingCash - total\n db.execute(\"UPDATE users set cash=:balance WHERE id=:userid\", balance=remainingCash, userid=currentUser)\n '''Update history'''\n dateNow = datetime.datetime.now()\n historized = db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbol, :shares, :price, :date, :userid)\", symbol = symbl, shares = numShares, price = total, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def buy():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure stock symbol and share validity\n if lookup(request.form.get(\"symbol\")) == None:\n return apology(\"invalid stock symbol\", 403)\n elif int(request.form.get(\"shares\")) < 1:\n return apology(\"must purchase at least one stock\", 403)\n\n # Compute the value of the purchase\n price = lookup(request.form.get(\"symbol\"))[\"price\"]\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n total = price * int(request.form.get(\"shares\"))\n\n # Ensure the user has enough cash to pay for the stocks\n if total > cash:\n return apology(\"not enough cash to purchase\", 403)\n\n # Insert into database that is used to retrieve history\n db.execute(\"INSERT INTO purchase (id, symbol, shares, price, created_at) VALUES(:id,:symbol,:shares,:value, datetime('now'))\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"), shares=request.form.get(\"shares\"), value=price)\n\n # Insert into database that is used for the index page\n number = db.execute(\"SELECT COUNT(*) FROM purchase WHERE id=:id AND symbol=:symbol\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"COUNT(*)\"]\n\n # Insert into database if the current stock has not been purchased before\n if number == 1:\n db.execute(\"INSERT INTO summary (id, symbol, shares, price) VALUES(:id,:symbol,:shares,:value)\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"), shares=request.form.get(\"shares\"), value=price)\n\n # Update database if the stock has been purchased before\n else:\n share = db.execute(\"SELECT SUM(shares) FROM purchase WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"SUM(shares)\"]\n db.execute(\"UPDATE summary SET shares= :shares WHERE (id = :username AND symbol= :symbol)\", shares=share, username=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n db.execute(\"UPDATE users SET cash = :new\", new = cash - total)\n\n # Redirect users to the index page\n return redirect(\"/\")\n\n # User reached route via GET (as by submitting a form via GET)\n else:\n return render_template(\"buy.html\")", "def buy():\n username = session.get(\"username\")\n # print(f'username: {username}')\n\n if request.method==\"POST\":\n symbol = request.form.get(\"symbol\")\n quantity = request.form.get(\"shares\")\n if not quantity.isdigit() or int(quantity)<=0:\n return apology(\"Quantity must be a positive integer\", 400)\n quantity = int(quantity)\n price = 0\n message = \"\"\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n response = lookup(symbol)\n if not response:\n return apology(\"Invalid symbol\", 400)\n\n price = response[\"price\"]\n name = response[\"name\"]\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n cost = price * float(quantity)\n status = \"bought\"\n if cash >= cost:\n cash -= cost\n db.execute(\"UPDATE users SET cash=:cash WHERE username=:username\", cash=cash, username=username)\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :stock_symbol, :unit_price, :time, :quantity, :name, :status)\",\n username = username, stock_symbol=symbol, unit_price=price, time=time, quantity=quantity, name=name, status=status)\n message = f'Recorded purchase {quantity} share(s) of {name} for total of {usd(cost)}, your remaining cash is {usd(cash)}'\n return render_template(\"buy.html\", message=message)\n else:\n return apology(\"Not enough cash\", 400)\n else:\n return render_template(\"buy.html\")", "def buy(self, date_idx: int, cash_balance: float, buy_budget: float) -> float:\n todays_price: float = float(self.price_history.iat[date_idx, 1])\n bought_shares: float = buy_budget / todays_price\n self.shares = bought_shares\n new_cash_balance: float = cash_balance - buy_budget\n self.last_bought_at_price = todays_price\n if Helpers.is_verbose_on():\n print(f\"{self.ticker}: buy {self.shares:.2f} shares at {todays_price:.2f} \"\n f\"for ${buy_budget:.2f} on date {date_idx}. Cash balance: {new_cash_balance:.2f}\")\n return new_cash_balance", "def sell():\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n #if shares < int(money[0][\"shares\"]):\n # return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n \n \n if not findshares:\n return apology(\"You don\"t have those shares >:(\")\n else:\n if int(findshares[0][\"shares\"]) < int(shares):\n return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) - int(shares))\n return redirect(url_for(\"index\"))\n\nif __name__ == \"__main__\":", "def sell():\n if request.method == \"GET\":\n rows = db.execute(text(\n \"SELECT symbol, sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id GROUP BY symbol\"),\n id=session[\"user_id\"])\n symbols = [row[\"symbol\"] for row in rows if row[\"shares\"]]\n return render_template(\"sell.html\", symbols=symbols,\n symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = db.execute(text(\n \"SELECT sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id AND symbol=:symbol\"),\n id=session[\"user_id\"],\n symbol=request.form.get(\"symbol\")).fetchone()[\"shares\"]\n requested_shares = int(request.form.get(\"shares\"))\n if requested_shares > owned_shares:\n return apology(\"too many shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=-requested_shares,\n p=quote[\"price\"])\n sell_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n db.execute(text(\"UPDATE users SET cash=cash+:c WHERE id=:id\"),\n c=sell_price,\n id=session[\"user_id\"])\n flash(\"Sold!\")\n return redirect(\"/\")", "def sell():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 403)\n \n # proceed buy function\n sell_result: Tuple[float, str] = sell_shares(db, user_id, symbol, qty )\n if sell_result[0] == -1:\n return apology(sell_result[1], 403)\n\n return redirect(\"/\")", "def buy_to_open(self, symbol, date, price):\n\n # Figure out how much we are willing to spend\n cash_available = self.cash - self.trade_fee\n cash_to_spend = cash_available / self.free_position_slots\n \n # Calculate buy_price and number of shares. Fractional shares allowed.\n purchase_price = (1 + self.percent_slippage) * price\n shares = cash_to_spend / purchase_price\n\n # Spend the cash\n self.cash -= cash_to_spend + self.trade_fee\n assert self.cash >= 0, 'Spent cash you do not have.'\n self.portfolio_history.record_cash(date, self.cash) \n\n # Record the position\n positions_by_symbol = self.active_positions_by_symbol\n assert not symbol in positions_by_symbol, 'Symbol already in portfolio.' \n position = Position(symbol, date, purchase_price, shares)\n positions_by_symbol[symbol] = position", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\", symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n quote = lookup(request.form.get(\"symbol\"))\n if not quote:\n return apology(\"invalid symbol\", 400)\n\n cash = db.execute(text(\"SELECT * FROM users WHERE id = :id\"),\n id=session[\"user_id\"]).fetchone()[\"cash\"]\n purchase_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n # Cast decimal.Decimal (from Postgres numeric) to float.\n if purchase_price > float(cash):\n return apology(\"can't afford\", 400)\n\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=request.form.get(\"shares\"),\n p=quote[\"price\"])\n db.execute(text(\"UPDATE users SET cash=cash-:c WHERE id=:id\"),\n c=purchase_price,\n id=session[\"user_id\"])\n flash(\"Bought!\")\n return redirect(\"/\")", "def buy():\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n # User reached route via POST (as by submitting a form via POST)\n shares = int(request.form.get(\"shares\"))\n symbol = request.form.get(\"symbol\")\n quote = lookup(symbol)\n\n if not quote:\n return apology(\"invalid symbol\", 404)\n\n price = quote['price']\n value = round(shares * price, 2)\n user = Users.query.get(session.get(\"user_id\"))\n\n if value > user.cash:\n return apology(\"You don't have enough cash\", 406)\n\n record = Records(symbol=quote['symbol'], company_name=quote['name'],\n transact_type=\"buy\", shares=shares, price=price, user_id=user.id)\n user.cash -= value\n db.session.add(record)\n db.session.commit()\n\n flash(\"Bought\")\n return redirect(url_for('index'))", "def buy(self, amount):\n trades = []\n buy_amount = 0\n precision = pow(10, self.pair.get_quote_token().get_decimals() - self.pair.get_base_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_SELL])):\n offer = self.book[Trade.WAY_SELL][i]\n amount_quote = offer.get_quote_amount()\n amount_base = offer.get_base_amount()\n price = offer.get_price()\n\n if amount_base >= amount:\n tmp = int(\"%d\" % (amount / price * precision))\n trade = Trade(self.pair, Trade.WAY_BUY, price, amount, tmp, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n buy_amount = buy_amount + trade.get_amount_quote()\n trades.append(trade)\n return trades, int(buy_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue with next offer.\n '''\n trade = Trade(self.pair, Trade.WAY_BUY, price, amount_base, amount_quote, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n buy_amount = buy_amount + trade.get_amount_quote()\n amount = amount - amount_base\n trades = trades + [trade]\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def buy():\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n quantity = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n\n if int(quantity) < 0:\n return apology (\"Please enter a positive value\", 400)\n\n #Lookup the stock symbol data (price, symbol, company name)\n stock = lookup(symbol)\n\n if not symbol:\n return apology (\"Invalid ticker symbol\", 400)\n\n if not stock:\n return apology (\"Invalid ticker symbol\", 400)\n\n stock_price = stock['price']\n\n #Get the current percent change of the stock\n changePercent = stock['changePercent']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares bought (One company stock only)\n total = stock_price * float(quantity)\n\n if total > check_cash:\n return apology(\"Not enough cash\", 403)\n\n #Check if the cash on hand is enough to purchase the order.\n if check_cash > total:\n #Update the total amount of cash in hand by subtracting the ordered stocks.\n db.execute(\"UPDATE users SET cash = cash - :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n\n #Check if the total cash is enough for the stock purchase.\n if total < check_cash:\n #Query if the stock symbol is already in the portfolio.\n rows = db.execute(\"SELECT * FROM portfolio WHERE symbol = :symbol AND id = :id\", id=session[\"user_id\"], symbol=symbol)\n\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(quantity), float(stock_price), date_time, session[\"user_id\"] )\n\n #If the stock already exists in the portfolio. Update the quantity.\n if len(rows) == 1:\n db.execute(\"UPDATE portfolio SET quantity = quantity + :quantity, total = total + :total, stock_price = :stock_price WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, quantity=quantity, total=total, stock_price = float(stock_price))\n flash('You successfuly bought the stock')\n else:\n #Insert the user, shares bought, shares price, and the quantity bought in portfolio table.\n db.execute(\"INSERT INTO portfolio (quantity, total, symbol, id, stock_price, name, percent_change) VALUES (?, ?, ?, ?, ?, ?, ?)\", int(quantity), total, symbol, session['user_id'], float(stock_price), stock['name'], changePercent)\n flash('You successfully bought the stock!')\n\n #return redirect (url_for('index'))\n return render_template(\"buy.html\")", "async def trade(self, ctx, sell_amount : float, sell_symbol, \n buy_amount : float, buy_symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(sell_amount, sell_symbol)\n portfolio.Buy(buy_amount, buy_symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (user, portfolio.Value()))\n portfolio.Save()", "async def stocks(self, ctx):\n\t\tpass", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n elif request.method == \"POST\":\n shares = request.form.get(\"shares\")\n symbol = request.form.get(\"symbol\")\n try:\n float(shares)\n except ValueError:\n return apology(\"please input a valid number of shares\")\n try:\n int(shares)\n except ValueError:\n return apology(\"please input a valid number of shares\")\n shares = int(shares)\n\n if not shares or not float(shares) or not float(shares).is_integer() or float(shares) <= 0:\n return apology(\"input a valid number of shares to buy\")\n\n elif not symbol or not lookup(symbol):\n return apology(\"input a valid symbol\")\n\n elif type(shares) != int:\n return apology(\"How did you even get this error?!\")\n\n else:\n quote = lookup(symbol)\n current_price = float(quote[\"price\"])\n company = quote[\"name\"]\n shares_num = int(request.form.get(\"shares\"))\n shares_tcost = float(shares_num * current_price)\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n # balance[0] b/c the returned value of balance is a dict of multiple lists\n flbal = [float(i) for i in list(balance[0].values())]\n for bal in flbal:\n if bal - shares_tcost < 0:\n return apology(\"Sorry, you don't have enough money\")\n else:\n newshares = bal - shares_tcost\n newbalance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=newshares, id=session['user_id'])\n newpurchase = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=shares_num, price=current_price, totalprice=shares_tcost, id=session['user_id'], transaction_type=\"BUY\")\n\n return redirect('/')", "def sell():\n\n if request.method == \"POST\":\n sellstock = request.form.get(\"symbol\")\n sellq = int(request.form.get(\"shares\"))\n if sellstock == None:\n return apology(\"Please select a stock symbol to sell.\")\n if sellq < 0:\n return apology(\"Please enter a valid quantity of stocks to sell\")\n invq = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":sellstock})[0][\"quantity\"]\n if sellq > invq:\n return apology(\"You don't have enough shares.\")\n stock = lookup(sellstock)\n cost = round(sellq*stock[\"price\"], 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":sellstock,\"va\":stock[\"price\"],\"qu\":sellq,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":(invq-sellq),\"uid\":session[\"user_id\"],\"sy\":sellstock})\n db.execute(\"UPDATE users SET cash = cash + :cash WHERE id =:uid\", {\"cash\":cost,\"uid\":session[\"user_id\"]})\n flash(\"Shares successfully sold!\")\n return redirect(\"/\")\n inventory = db.execute(\"SELECT symbol FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n return render_template(\"sell.html\", context = inventory)", "def buy():\n if request.method == \"POST\":\n\n # Ensure buy order\n if not request.form.get(\"symbol\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure buy order\n elif not request.form.get(\"shares\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure stock is balid else display an apology\n elif lookup(request.form.get(\"symbol\")) == None:\n return apology(\"invalid stock\", 400)\n\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"shares must be a positive integer\", 400)\n\n\n # Check if its negative\n #elif int(request.form.get(\"shares\")) < 1:\n # return apology(\"must provide valid order info\", 400)\n\n\n # Add stock to user's portfolio\n\n stock = lookup(request.form.get(\"symbol\"))['name']\n num = request.form.get(\"shares\")\n price = (lookup(request.form.get(\"symbol\"))['price'])\n user = session.get(\"user_id\")\n amount = (float(request.form.get(\"shares\")) * float(lookup(request.form.get(\"symbol\"))['price']))\n\n # check if they have enough cash\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE id = :id\", id = session.get(\"user_id\"))\n rows = float(rows[0][\"cash\"])\n\n\n # Add trasnaction to portfolio if user has enough cash\n if (float(num) * float(price)) <= rows:\n result = db.execute(\"INSERT INTO portfolio (User, Stock, Price, Num) VALUES(:User, :Stock, :Price, :Num)\", User = session.get(\"user_id\"), Stock = stock, Price = usd(price), Num = num)\n if not result:\n return apology(\"TX did not recrod\", 400)\n# Update cash\n result = db.execute(\"UPDATE users set cash = cash - :amount where id = :User \", User = session.get(\"user_id\"), amount = amount)\n if not result:\n return apology(\"Cash did not update\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n else:\n\n return apology(\"Not enough Cash\", 403)\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which stock to buy\", 403)\n if not request.form.get(\"nos\"):\n return apology(\"Please specify how many stocks you want to buy\", 403)\n if int(request.form.get(\"nos\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"nos\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n symbol = request.form.get(\"symbol\")\n if not lookup(symbol):\n return apology(\"Invalid symbol\", 403)\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"nos\"))\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n money = bro[0][\"cash\"]\n if cost > money:\n return apology(\"Cannot afford\", 400)\n money = money - cost\n bef = db.execute(\"SELECT COUNT (?) FROM ind WHERE user_id = ?\", lookup(symbol)[\"symbol\"], session[\"user_id\"])\n if len(bef):\n tot = 0\n nob = 0\n tota = cost\n\n else:\n tot = db.execute(\"SELECT total FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n no = db.execute(\"SELECT nos FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n nob = no[0][\"nos\"]\n tota = tot[0][\"total\"] - cost\n\n\n\n\n nos = int(request.form.get(\"nos\"))\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"CREATE TABLE IF NOT EXISTS buys (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, nos INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"BOUGHT\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO buys(user_id, symbol, name, price, nos, cost, time) VALUES (:user_id, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n bef = db.execute(\"SELECT symbol FROM ind WHERE symbol=:symbol AND user_id=:id\", symbol=lookup(symbol)[\"symbol\"], id=session[\"user_id\"])\n\n # add to portfolio database\n # if symbol is new, add to portfolio\n if not bef:\n db.execute(\"INSERT INTO ind (symbol, name, nos, user_id, price, total) VALUES (:symbol, :name, :nos, :id, :price, :total)\",\n name = lookup(symbol)[\"name\"], symbol=lookup(symbol)[\"symbol\"], nos=int(request.form.get(\"nos\")), id = session[\"user_id\"], price = lookup(symbol)[\"price\"], total = cost)\n\n # if symbol is already in portfolio, update quantity of shares and total\n else:\n db.execute(\"UPDATE ind SET nos=nos+:nos WHERE symbol=:symbol AND user_id=:id\",\n nos=int(request.form.get(\"nos\")), symbol=lookup(symbol)[\"symbol\"], id = session[\"user_id\"]);\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n price = lookup(symbol)['price']\n \n if not request.form.get('ammount').isnumeric() or int(request.form.get('ammount')) % 100 != 0:\n return apology(\"The ammount is not a valid number, should be a multiple of 100\", 501)\n\n ammount = int(request.form.get('ammount'))\n cost = price * ammount\n\n current_stock = db.execute(\"SELECT * FROM stocks WHERE user_id = ? AND symbol = ?\", session[\"user_id\"], symbol)\n current_cash = db.execute(\"SELECT * FROM users WHERE id = ?\", session[\"user_id\"])\n \n if cost > current_cash[0][\"cash\"]:\n return apology(\"Not enough money\", 999)\n else:\n update_database(session[\"user_id\"], symbol, ammount, price, \"buy\", current_stock, current_cash[0])\n \n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n sharesToSell = int(request.form.get(\"shares\"))\n if sharesToSell < 0:\n return apology(\"Shares to sell cannot be negative\", 400)\n\n sharesRows = db.execute(\"SELECT * FROM portfolio WHERE UserID = :userid AND Symbol = :enteredSymbol\",\n userid=session.get(\"user_id\"), enteredSymbol = symbol)\n\n numSharesOwned = 0\n for row in sharesRows:\n numSharesOwned += row[\"NumberOfShares\"]\n\n if numSharesOwned < sharesToSell:\n return apology(\"You don't own that many shares!\", 400)\n\n remainingSharesToSell = sharesToSell\n for row in sharesRows:\n numShares = row[\"NumberOfShares\"]\n if remainingSharesToSell >= numShares:\n '''delete row'''\n delete = db.execute(\"DELETE FROM portfolio WHERE id = :rowid\", rowid = row[\"id\"])\n remainingSharesToSell -= numShares\n else:\n '''update row'''\n updatedShares = numShares - remainingSharesToSell\n update = db.execute(\"UPDATE portfolio SET NumberOfShares = :numshares, TotalPrice = :tp WHERE id = :rowid\",\n numshares = updatedShares, tp = updatedShares * row[\"UnitPrice\"], rowid = row[\"id\"])\n remainingSharesToSell = 0\n\n if remainingSharesToSell == 0:\n break;\n\n quote = lookup(symbol)\n cashToReturn = quote[\"price\"] * sharesToSell\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session.get(\"user_id\"))\n usersCurrentCash = userRows[0][\"cash\"]\n\n updatedBalance = usersCurrentCash + cashToReturn\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = updatedBalance, userid = session.get(\"user_id\"))\n '''Update history'''\n dateNow = datetime.datetime.now()\n db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbl, :shares, :price, :date, :userid)\", symbl = symbol, shares = -1 * sharesToSell, price = -1 * cashToReturn, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n else:\n symbolRows = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n symbls = []\n for row in symbolRows:\n symbls.append(row[\"Symbol\"])\n\n return render_template(\"sell.html\", symbols=symbls)", "def attempt_market_buy(self, decision: Decision, available_capital: float, state: Series, is_backtest: bool = False, crypto: bool = False) -> Transaction:\n try: latest_price = self.latest_price(decision.symbol, state, is_backtest, crypto)\n except:\n print('Error retrieving latest price')\n return Transaction(False, TransactionType.MarketBuy, 0, 0, decision, state['date'])\n\n # Determine how many shares we can/should purchase given a decision\n share_quantity = 0\n if isinstance(decision.quantity, BuyQuantity):\n diviser = 1 if decision.quantity == BuyQuantity.Max else 2\n share_quantity = ((available_capital * .9) / diviser) / latest_price\n else:\n max_purchase_quantity = (available_capital * .9) / latest_price\n if not self.allow_fractional and not crypto: max_purchase_quantity = math.floor(max_purchase_quantity)\n\n if decision.quantity < max_purchase_quantity: # decision is valid quantity\n share_quantity = decision.quantity\n else: # Can't afford requested amount, instead buy as much as possible\n share_quantity = max_purchase_quantity\n\n if not self.allow_fractional and not crypto:\n try: share_quantity = math.floor(share_quantity)\n except: print('Error getting share quantity:', share_quantity, decision.quantity, available_capital, self.latest_price(decision.symbol, state, is_backtest, crypto))\n\n strike_price: float\n succeeded = True\n \n if share_quantity == 0 or (not self.allow_fractional and not crypto and share_quantity < 0):\n print('share_quantity=0 error - returning')\n strike_price = 0\n share_quantity = 0\n succeeded = False\n elif is_backtest:\n c_type = 'crypto' if crypto else 'stock'\n # spread = .01 if c_type == 'stock' else 0\n spread = 0\n buy_fee = state['close'] * self.get_fee_pct(c_type)[0] + self.get_fixed_fee(c_type, state[\"close\"], share_quantity)\n self.total_fees += buy_fee\n self.trade_volume_shares += share_quantity\n print(f'unadjusted price: {state[\"close\"]} | fee: {buy_fee} | trade volume: {self.trade_volume} | total fees: {self.total_fees}')\n strike_price = state['close'] + buy_fee + spread\n else:\n try:\n if crypto:\n try:\n print('attempting crypto market buy @ ', latest_price)\n res = asyncio.get_event_loop().run_until_complete(wait_for_cb_order_fill(self.cb_client, decision.contract, 'buy', share_quantity, latest_price))\n (strike_price, share_quantity, succeeded) = res\n except Exception as e:\n print('asnycio wait_for_cb_order_fill error:', e)\n strike_price = 0\n succeeded = False\n else:\n print(f'attempting {decision.symbol} ib market buy @ {latest_price}')\n # buy_order = MarketOrder('BUY', share_quantity)\n buy_order = LimitOrder('BUY', share_quantity, latest_price)\n res = asyncio.get_event_loop().run_until_complete(wait_for_ib_order_fill(self.ib_client.ib, buy_order, decision.contract))\n \n print('market buy res:', res)\n (strike_price, share_quantity, succeeded) = res\n\n except Exception as e: # Failed to purchase at limit price\n print('market buy error:', e)\n succeeded = False\n strike_price = 0\n share_quantity = 0\n\n self.trade_volume += (strike_price * share_quantity)\n return Transaction(succeeded, TransactionType.MarketBuy, strike_price, share_quantity, decision, state['date'])", "def trade_action(self, BUY_QTY):\n BUY_QTY = 4500\n self.trade(BUY_QTY)\n #self.show()", "def sell():\n userId = session[\"user_id\"]\n\n sharesOwned = db.execute(f\"SELECT symbol, SUM(shares) FROM transactions WHERE user_id={userId} GROUP BY symbol HAVING SUM(shares)>0\")\n\n if request.method == \"GET\":\n\n return render_template(\"sell.html\", sharesOwned=sharesOwned)\n\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\")) * (-1)\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n shareCount = float(db.execute(f\"SELECT SUM(shares) FROM transactions WHERE user_id={userId} AND symbol='{symbolInput}' GROUP BY symbol HAVING SUM(shares)>0\")[0][\"SUM(shares)\"] * (-1))\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares > 0 or shares < shareCount:\n return apology(\"No sell for you senpai!\")\n\n else:\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','SELL','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\"))\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares < 1:\n return apology(\"No buy for you senpai!\")\n\n else:\n userId = session[\"user_id\"]\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n if totalPrice > availableCash:\n return apology(\"Not enough available tendies\")\n else:\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','BUY','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def sellshares():\n # Initialise buy and sell share forms\n sellform = SellShareForm()\n # Validate and process form data\n if(sellform.validate_on_submit()):\n # Buys shares\n issuerID = sellform.sellsharecode.data\n quantity = sellform.sellquantity.data\n userID = current_user.userID\n # Call buyshare API\n sellshare = gdb.sellshare(userID, issuerID, quantity)\n if(sellshare):\n # Flash with success message\n flash(\"Share sale successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share sale unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def buy():\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def buy():\n \n if request.method == \"POST\":\n \n time = str(datetime.now())\n \n quantity = int(request.form.get(\"quantity\"))\n \n if quantity < 1:\n return apology(\"you need to provide right quantity\")\n \n # get user's cash\n user_id = int(session.get('user_id'))\n \n data_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id = user_id)\n \n convert = data_cash[0]\n cash = convert.get('cash')\n \n # getting stock request data\n quote = session['quote']\n \n symbol, name, price = quote['symbol'], quote['name'], float(quote['price'])\n total = price * quantity\n \n #check if user can afford so much stock\n \n if total > cash:\n return apology('you don\\'t have enough money')\n \n #INSERT bought stock into history table\n db.execute('''INSERT INTO history (date, user_id, stock_name, symbol, quantity, price, deal) \n VALUES (:date, :user_id, :stock_name, :symbol, :quantity, :price, :deal)''',\n date = time,\n user_id = user_id,\n stock_name = name,\n symbol = symbol,\n quantity = quantity,\n price = total,\n deal = 'buy')\n #update portfolio\n #check if user has bought this stock before\n symbol_check = db.execute('''SELECT symbol FROM portfolio WHERE user_id = :user_id''',\n user_id = user_id)\n \n if [x for x in symbol_check if x['symbol'] == symbol]:\n #update stock if user has bought such shares before\n db.execute('''UPDATE portfolio \n SET quantity = quantity + :quantity \n WHERE (user_id = :user_id AND symbol = :symbol)''', \n quantity = quantity, user_id = user_id, symbol = symbol)\n \n else:\n #add new shares to portfolio\n db.execute('''INSERT INTO portfolio VALUES (:user_id, :symbol, :quantity)''',\n user_id = user_id, symbol = symbol, quantity = quantity)\n \n #update cash\n db.execute('UPDATE users SET cash = cash - :total WHERE id = :user_id', total = total, user_id = user_id)\n \n return redirect(url_for(\"index\"))\n \n else:\n return redirect(url_for(\"quote\"))", "def buy():\n\n # User reached route via POST\n if request.method == 'POST':\n\n # Ensure shares is a positive integer:\n try:\n if int(request.form.get('shares')) < 1:\n return apology(\"input isn't a positive integer\", 400)\n except ValueError:\n return apology(\"input isn't an integer\", 400)\n\n # Ensure symbol was provided\n if not request.form.get('symbol'):\n return apology('must provide symbol', 403)\n\n # Ensure symbol exists\n if lookup(request.form.get('symbol')) == None:\n return apology(\"symbol doens't exist\")\n\n shares = int(request.form.get('shares'))\n\n stock_price = lookup(request.form.get('symbol'))['price']\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n # Check if the user can afford the stock\n if stock_price * shares > cash:\n return apology(f\"You don't have enough cash to buy {shares} shares.\", 403)\n\n db.execute('INSERT INTO transactions (id, operation, symbol, shares, price) VALUES(:id, :operation, :symbol, :shares, :stock_price)',\n id=session['user_id'],\n symbol=request.form.get('symbol').upper(),\n operation='BUY',\n shares=shares,\n stock_price=stock_price\n )\n\n db.execute('UPDATE users SET cash = :cash WHERE id = :id',\n cash=cash - shares * stock_price,\n id=session['user_id'])\n\n # Redirect user to home page\n return redirect('/')\n\n # User reached route via GET\n else:\n return render_template('buy.html')", "async def sell(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def sell():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Check if user has enough shares to sell as requested\n shares_count = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n if shares > shares_count:\n return apology(\"not enough shares owned\", 400)\n\n # User has enough shares to sell as requested\n else:\n # Calculate new cash amount user has\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cash_gained = QUOTED[\"price\"] * shares\n new_cash_total = cash + cash_gained\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert sell log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], -(shares), QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n new_shares_total = current_shares - shares\n\n # If 0 shares left of the stock owned\n if new_shares_total == 0:\n db.execute(\"DELETE FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User still owns shares of the stock\n else:\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # Select share symbols from shares table for logged in user\n SHARES = db.execute(\"SELECT symbol FROM shares WHERE user_id = ?\", user_id)\n\n return render_template(\"sell.html\", shares=SHARES)", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure the user inputs a symbol\n symbol = request.form.get(\"symbol\").upper()\n if not symbol:\n return apology(\"must provide a symbol\", 403)\n\n # ensure number of shares is submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide number of shares\", 403)\n\n\n # do a try except for handling negative values or empty spaces in shares input box\n try:\n shares = int(shares)\n if shares < 0:\n return apology(\"Enter a positive integer for shares\", 403)\n except ValueError:\n return apology(\"No empty spaces allowed enter a positive integer\", 403)\n\n # call lookup in helpers.py to look up a stock’s current price.\n stockPriceDetail = lookup(symbol)\n\n # render apology for invalid symbol input by user\n if stockPriceDetail == None:\n return apology(\"Invalid symbol\", 403)\n else:\n price = stockPriceDetail[\"price\"]\n\n # calculate the total price of the number of shares\n totalCost = price * shares\n print(totalCost)\n\n\n # based on user's input check if they have enough cash to buy stocks\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n # Check for sufficient cash\n if cash < totalCost:\n return apology(\"you have insufficient cash balance\", 403)\n\n balance = cash - totalCost\n\n # insert row in transactions table\n result = db.execute(\"\"\"insert into transactions\n (user_id,stock_code,stock_quantity,stock_price,\n start_balance,end_balance,transaction_type)\n values(:userid, :symbol, :shares, :price, :cash,\n :balance,:ttype)\"\"\",\n userid=session[\"user_id\"],shares=shares,\n symbol=symbol,price=price,\n cash=cash,balance=balance,ttype=\"BOUGHT\")\n\n # update users balance\n result = db.execute(\"update users set cash = :balance where id = :userid\",\n userid=session[\"user_id\"],balance=balance)\n\n # Redirect user to index page\n return redirect(\"/\")\n\n else:\n symbol = request.args.get('symbol')\n return render_template(\"buy.html\",symbol=symbol)", "def sell():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol input\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if sufficient cash, make purchase, else return apology\n if not existing:\n return apology(\"you don't own this stock\")\n else:\n if existing[0]['num_shares'] < volume:\n return apology('you cannot sell more shares than you own')\n else:\n # query database for\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n minus = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] - volume, id=id, symbol=symbol)\n\n # set date string\n dstring = str(datetime.datetime.utcnow())\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=-volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # calculate sale price\n sale_price = stock_info['price'] * volume\n\n # increase cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+sale_price, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))", "def buy():\n if request.method == \"POST\":\n\n #validate input\n try:\n shares = int(request.form.get(\"shares\"))\n stock = lookup(request.form.get(\"symbol\"))\n except:\n return apology(\"enter a valid ticker\")\n\n #check shares not blank\n if not stock:\n return apology(\"please enter a stock\")\n\n #are shares there and more than 0?\n if not shares or shares <= 0:\n return apology(\"Please fill in all fields\")\n\n #does the user have enough cash\n money = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n money = int(money[0]['cash'])\n if stock[\"price\"] * shares > money:\n return apology(\"You don't have enough money\")\n else:\n db.execute(\"INSERT INTO portfolio (stock, price, trans_price, number, userid) VALUES (:stock, :price, :trans_price, :number, :userid)\", stock=stock['symbol'], price=stock['price'], trans_price=usd(stock['price']), number=shares, userid=session[\"user_id\"])\n db.execute(\"UPDATE users SET cash=cash-:total WHERE id=:userid\", total=(stock['price'] * shares), userid=session[\"user_id\"])\n\n return redirect(\"/\")\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n if returned_quote == None:\n return apology(\"symbol does not exist\", 403)\n\n elif returned_quote[\"price\"] * int(request.form.get(\"shares\")) > row[0][\"cash\"]:\n return apology(\"cannot afford number of shares at current price\", 403)\n\n else:\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash - :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Bought\")\n return redirect(\"/\")\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "async def price(self, ctx, name):\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\treal = str(price)\n\t\treal = ('0' * (3 - max(len(real), 0))) + real\n\t\treal = '$' + real[:-2] + '.' + real[-2:]\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(f'**{name}:** {price} {currency} per share ({real}).')", "def buy():\n current_cash= db.execute(\"select cash from users where id = \" + str(session[\"user_id\"]))[0]['cash']\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n\n x=lookup(symbol)\n if x == None:\n return apology(\"invalid symbol\", 400)\n\n price = int(shares)*x['price']\n new_cash = current_cash - price\n\n\n #print(\"insert into users (cash) values (?)\", new_cash + \" where id = \"+ str(session[\"user_id\"]))\n\n db.execute(\"UPDATE users SET cash = \"+ str(new_cash) +\" WHERE id = \"+ str(session[\"user_id\"]) +\";\")\n db.execute(\"insert into purchases (user_id, shares, symbol, price_total, price_per_shares) values (?, ?, ?, ?,? )\", session[\"user_id\"], shares, symbol, price, x['price'])\n db.execute(\"insert into history (user_id, type, amount, time, shares, name) values (?,?,?,?,?,?)\",str(session[\"user_id\"]), \"buy\", str(price), str(datetime.now()), str(shares), symbol)\n return redirect(\"/\")\n\n return render_template(\"buy.html\")", "def transact_shares(self, action, quantity, price, commission, bid=None, ask=None):\n if bid is None: \n bid = price\n if ask is None:\n ask = price\n\n if action is None:\n return\n\n self.total_commission += commission\n\n # Adjust total bought and sold\n if action == \"BOT\":\n self.avg_bot = (self.avg_bot * self.buys + price * quantity) / (self.buys + quantity)\n\n if self.net < 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (self.avg_price - price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n # Increasing long position\n self.avg_price = (self.avg_price * self.net + price * quantity + commission) / (self.net + quantity)\n self.buys += quantity\n self.total_bot = self.buys * self.avg_bot\n\n # action == \"SLD\"\n else:\n self.avg_sld = (self.avg_sld * self.sells + price * quantity) / (self.sells + quantity)\n\n if self.net > 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (price - self.avg_price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n\n self.avg_price = (self.avg_price * self.net - price * quantity - commission) / (self.net - quantity)\n self.sells += quantity\n self.total_sld = self.sells * self.avg_sld\n\n # Adjust net values, including commissions\n self.net = self.buys - self.sells\n self.net_total = self.total_sld - self.total_bot\n self.net_incl_comm = self.net_total - self.total_commission\n self.cost_basis = self.net * self.avg_price\n\n self.update_market_value(bid, ask)", "def sell():\n\n # User submits information\n if request.method == \"POST\":\n\n # Ensure user entered a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must choose a stock\")\n\n # Get stock selected\n symbol = request.form.get(\"symbol\")\n \n # Ensure is a valid stock symbol\n if not lookup(symbol):\n return apology(\"Invalid stock symbol\")\n\n # Ensure user owns the stock requested\n test = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n if not test:\n return apology(\"you have 0 shares of this stock\")\n\n owns = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n # Ensure user entered a number in shares\n if not request.form.get(\"shares\") or not isinstance(request.form.get(\"shares\"), int):\n return apology(\"must enter postive whole number of shares\")\n\n shares = request.form.get(\"shares\")\n\n # Ensure number is positive\n if shares <= 0:\n return apology(\"must enter a positive number\")\n\n # Ensure user owns the amount of stock entered to sell\n if shares > owns[0]['shares']:\n return apology(\"you don't own that much of this stock\")\n\n # Get date and time for transaction\n day = datetime.now()\n time = datetime.now().time()\n\n # Get total and stock name for transaction\n price = lookup(symbol)['price']\n total = price * shares\n name = lookup(symbol)['name']\n\n # Sell shares of the stock and add to transactions history\n db.execute(\"INSERT INTO transactions (user_id, date, time, price, shares, total, stock, name, type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n session[\"user_id\"], day, time, price, shares * -1, total, symbol, name, \"sell\")\n\n # Update portfolios table\n db.execute(\"UPDATE portfolios SET shares = shares - ? WHERE user_id = ? AND stocks = ?\", shares, session[\"user_id\"], symbol)\n\n # If stock shares is 0, delete from portfolio\n db.execute(\"DELETE FROM portfolios WHERE shares = ? \", 0)\n\n return redirect(\"/\")\n\n # If user reached page via link or redirect\n else:\n\n # Get list of stocks owned\n owns = db.execute(\"SELECT stocks FROM portfolios WHERE user_id = ? ORDER BY stocks\", session[\"user_id\"])\n\n return render_template(\"sell.html\", owns=owns)", "def buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.buy(symbol, quantity, in_force, extended)", "def Buy(self, X, Y):\n if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:\n raise TradeError(\"Not Enough Money\")\n self.share[X] += int(Y)\n self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"BUY:{str(int(Y))}:{str(X)}\", flush = True)", "def sell():\n\n if request.method == \"POST\":\n\n # define stock variables\n symbol = request.form.get(\"symbol\")\n stock = lookup(request.form.get(\"symbol\"))\n\n # error checking\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n # check if stock is owned\n try:\n sold_stock = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id AND symbol = :symbol GROUP BY symbol\", user_id=session[\"user_id\"], symbol=symbol)[0]\n except IndexError:\n return apology(\"Stock not owned\", 400)\n\n # check for shares input\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 Share\", 400)\n\n if int(sold_stock[\"shares\"]) < shares:\n return apology(\"Not enough shares to sell\", 400)\n\n else:\n # define variables for inserting into transactions table and updating cash\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n user_cash = user_cash + (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with selling transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=-shares,\n price=stock[\"price\"]\n )\n\n flash(\"You paper-handed that one!\")\n return redirect(\"/\")\n\n else:\n # query db for current holdings\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n stocks[:] = [stock for stock in stocks if stock.get('shares') > 0]\n return render_template(\"sell.html\", stocks=stocks)", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n elif int(request.form.get(\"shares\")) > (db.execute(\"SELECT sum(shares) as shares FROM 'transaction' WHERE u_id = :user_id and symbol = :symbol\", user_id = session[\"user_id\"], symbol = request.form.get(\"symbol\")))[0][\"shares\"]:\n return apology(\"cannot sell more shares than owned\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = -1*int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash + :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Sold\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n rows = db.execute(\"SELECT symbol, SUM(shares) as shares FROM 'transaction' WHERE u_id = :user_id GROUP BY symbol\", user_id = session[\"user_id\"])\n\n if len(rows) > 0:\n return render_template(\"sell.html\", rows = rows)\n else:\n return apology(\"no shares to sell\", 403)", "def buy():\n lookedup = []\n if request.method == \"POST\":\n if not request.form.get(\"buy_symbol\"):\n return apology(\"Must provide stock symbol\", 403)\n shares_to_buy = request.form.get(\"buy_amount\")\n if not shares_to_buy:\n return apology(\"Must provide number of shares to buy\", 403)\n\n shares_to_buy = int(shares_to_buy)\n\n if shares_to_buy <= 0:\n return apology(\"Must provide positive number of shares to buy\", 403)\n\n else:\n lookedup = lookup(request.form.get(\"buy_symbol\"))\n\n if not lookedup:\n return apology(\"Not a stock symbol\", 403)\n\n\n current_user = session[\"user_id\"]\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n\n # see if properly selecting cash amount\n if not user_cash:\n return apology(\"Didn't find user's current balance\", 000)\n\n\n current_cash = user_cash[0][\"cash\"]\n current_cash = int(current_cash)\n\n stock_name = lookedup.get(\"name\")\n stock_price = lookedup.get(\"price\")\n stock_symbol = lookedup.get(\"symbol\")\n\n total_cost = shares_to_buy * stock_price\n if current_cash < total_cost:\n return apology(\"You do not have enough money for this purchase\", 000)\n\n new_balance = current_cash - total_cost\n\n db.execute(\"UPDATE users SET cash = :new_balance WHERE id = :id\", new_balance=new_balance, id=current_user)\n\n db.execute(\"INSERT INTO purchases (id,stock_symbol,volume_purchased,price,date_purchased) VALUES(:id,:symbol,:amount,:price,datetime('now'))\", id=current_user, symbol=stock_symbol, amount=shares_to_buy, price=stock_price)\n\n check_holdings = db.execute(\"SELECT volume FROM portfolio WHERE id = :id AND stock_symbol=:stock_symbol\", id=current_user, stock_symbol=stock_symbol)\n\n if not check_holdings:\n db.execute(\"INSERT INTO portfolio (id,stock_symbol,volume) VALUES(:id,:stock_symbol,:volume)\", id=current_user, stock_symbol=stock_symbol, volume=shares_to_buy)\n else:\n old_volume = check_holdings[0][\"volume\"]\n old_volume = int(old_volume)\n new_volume = old_volume+shares_to_buy\n db.execute(\"UPDATE portfolio SET volume = :new_volume\", new_volume=new_volume)\n\n\n\n return render_template(\"bought.html\", stock_name=stock_name,stock_price=stock_price, stock_symbol=stock_symbol, shares_to_buy=shares_to_buy, total_cost= total_cost)\n\n\n\n else:\n return render_template(\"buy.html\")\n\n\n return apology(\"TODO BUY\")" ]
[ "0.75090504", "0.73669976", "0.719782", "0.7076133", "0.702035", "0.70142156", "0.69631433", "0.6943623", "0.6915625", "0.691014", "0.6897359", "0.68731195", "0.6865242", "0.68647367", "0.6850216", "0.6848369", "0.6818854", "0.67891747", "0.6722476", "0.6719868", "0.67188966", "0.67112154", "0.6705635", "0.6700653", "0.66989964", "0.6695444", "0.6687238", "0.6684804", "0.66811556", "0.6677518", "0.6655224", "0.6651959", "0.6649536", "0.6624344", "0.66173476", "0.6613126", "0.6609052", "0.6603438", "0.6595926", "0.6585596", "0.6578174", "0.65766925", "0.6573936", "0.6562317", "0.65583086", "0.65577334", "0.6526294", "0.65238345", "0.6519029", "0.6516538", "0.65035534", "0.64914435", "0.64667463", "0.6459902", "0.6451228", "0.64483476", "0.6447599", "0.64350283", "0.6431609", "0.6421019", "0.6420768", "0.6420553", "0.6419165", "0.6416702", "0.64161277", "0.64114636", "0.6407591", "0.6406082", "0.63956153", "0.6387202", "0.63830394", "0.638049", "0.635205", "0.6341835", "0.6338913", "0.63325727", "0.63291985", "0.6327545", "0.632726", "0.632575", "0.6305297", "0.6302711", "0.6300179", "0.6298004", "0.62972134", "0.62947214", "0.628081", "0.62716454", "0.62636733", "0.62556595", "0.6254726", "0.625456", "0.6253792", "0.62462944", "0.6244217", "0.6216623", "0.62081665", "0.62070346", "0.62038547", "0.6201706" ]
0.64506495
55
Show history of transactions
def history(): user_history=db.execute("SELECT * FROM history WHERE user_id=:u_i",u_i=session["user_id"]) return render_template("history.html",s=user_history)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n\n #Convert Price to US Dollars and format transaction time\n for t in trans:\n t.price = usd(t.price)\n t.transacted = t.transacted.strftime('%Y-%m-%d %H:%M:%S')\n\n #Return history.html\n return render_template('history.html', trans=trans)", "def history():\n transactions_list = db.execute(\"SELECT stock, units, price, time, type FROM transactions WHERE id = :current_id\",\n current_id=session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions_list)", "def history():\n\n # get all transactions for current user\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n\n # render history.html with all user transactions\n return render_template(\"history.html\", transactions=transactions, usd=usd)", "def history():\n\n rows = db.execute(\"SELECT * FROM 'transaction' WHERE u_id = :user_id\", user_id = session[\"user_id\"])\n return render_template(\"history.html\", rows = rows)", "def history():\n\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = ? ORDER BY date DESC, time DESC\", session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)", "def history():\n\n transactions = db.execute(\"SELECT stock, amount, price, date, time, total_amount FROM transactions WHERE id=:id\", id=session['user_id'])\n\n\n return render_template(\"index.html\", transactions=transactions)", "def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])", "def history():\n\n user = session[\"user_id\"]\n rows = db.execute(\"SELECT * FROM transactions WHERE user_id = :user\", user=user)\n\n # transactions list\n transactions = []\n for row in rows:\n stock_data = lookup(row['symbol'])\n transactions.append(list((\n stock_data['symbol'],\n stock_data['name'],\n row['amount'],\n row['value'],\n row['date'],\n )))\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Price, Date FROM history WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n return render_template(\"history.html\", transactionList = transactions, currentUser=session.get(\"user_id\"))", "def history():\n\n userId = session[\"user_id\"]\n\n shares = db.execute(f\"SELECT symbol, shares, price, trans_time FROM transactions WHERE user_id={userId} ORDER BY trans_id DESC\")\n\n return render_template(\"history.html\", shares=shares)", "def history():\n rows = db.execute(text(\n \"SELECT symbol, shares, price, time FROM transactions \"\n \"WHERE user_id=:id\"),\n id=session[\"user_id\"])\n transactions = []\n for row in rows:\n transaction = dict(row)\n transaction[\"price\"] = usd(transaction[\"price\"])\n transactions.append(transaction)\n return render_template(\"history.html\", transactions=transactions)", "def history():\n # query database for history\n transactions = db.execute(\"SELECT symbol, volume, share_price, dtstamp FROM `transaction` WHERE id = :id\", id = session[\"user_id\"])\n\n # initialise dict\n dic = {}\n\n # interate through history array\n\n # pass data to template\n return render_template(\"history.html\", transactions = transactions)", "def history():\n # name variable to show current users name in template\n name = db.execute(\"SELECT username FROM users WHERE id=:id\", id=session[\"user_id\"])\n\n # user's transaction history\n hist = db.execute(\"SELECT transactid, name, price, quantity, date FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n\n # return the template with the relevant objects for jinja\n return render_template(\"history.html\", name=name, hist=hist)\n\n # if function fails\n return apology(\"Can't display history\", 400)", "def history():\n get_trans_codes = db.execute(\"SELECT transaction_code FROM History WHERE id = :id\", id=session['user_id'])\n get_symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id\", id=session['user_id'])\n get_companies = db.execute(\"SELECT company FROM History WHERE id = :id\", id=session['user_id'])\n get_trans_types = db.execute(\"SELECT transaction_type FROM History WHERE id = :id\", id=session['user_id'])\n get_shares = db.execute(\"SELECT shares FROM History WHERE id = :id\", id=session['user_id'])\n get_prices = db.execute(\"SELECT price FROM History WHERE id = :id\", id=session['user_id'])\n get_timestamps = db.execute(\"SELECT timestamp FROM History WHERE id = :id\", id=session['user_id'])\n\n trans_codes = [code['transaction_code'] for code in get_trans_codes]\n symbols = [symbol['symbol'] for symbol in get_symbols]\n companies = [company['company'] for company in get_companies]\n trans_types = [types['transaction_type'] for types in get_trans_types]\n shares = [share['shares'] for share in get_shares]\n prices = [price['price'] for price in get_prices]\n timestamps = [timestamp['timestamp'] for timestamp in get_timestamps]\n\n return render_template(\"history.html\", values=zip(trans_codes, symbols, companies, trans_types, shares, prices, timestamps))", "def history():\n query = Records.query.filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"history.html\", rows=query)", "def history():\n\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user ORDER BY date\",\n user=user)\n\n return render_template(\"history.html\", stocks = owned)", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def history():\n transactions = db.execute(\"SELECT * FROM history WHERE user_id = ?\", session[\"user_id\"])\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n \n return render_template(\"history.html\", transactions=transactions, user_name=user_name[0][\"username\"])", "def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)", "def history():\n userID = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM transactions WHERE id=:userID\", userID=userID)\n\n for row in transactions:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"total\"] = usd(row[\"num_shares\"] * row[\"price_ps\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n histories = db.execute(\"SELECT * from purchases WHERE user_id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", histories=histories)", "def history():", "def do_gethistory(self,args):\n #Very rough. pretty print it\n history=bitstamp.get_usertransactions()\n ppdict(history)", "def history():\n userid = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM purchase WHERE userid = :userid\", userid = userid)\n for transaction in transactions:\n transaction[\"price\"] = usd(transaction[\"tot\"]/transaction[\"shares\"])\n transaction[\"name\"] = lookup(transaction[\"symbol\"])['name']\n return render_template(\"history.html\", transactions=transactions)", "def history():\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)", "def history():\n username = session.get(\"username\")\n history=db.execute(\"SELECT stock_symbol, unit_price, time, quantity, stock_name, status FROM history WHERE username=:username\",\n username=username)\n return render_template(\"history.html\", history=history)", "def transaction_history(user_id):\n # Run the transaction in the background\n executor.submit(transaction_run)\n user_id = login_session['user_id']\n # Get all transaction made by all the users\n user_tran = Transaction.query.filter_by(done=True).filter_by(user_id=user_id).all()\n target_tran = Transaction.query.filter_by(done=True).filter_by(target_user=user_id).all()\n user_curr = Currency.query.filter_by(user_id=user_id).first()\n\n return render_template('trans_history.html',\n transactions=user_tran + target_tran,\n currency=user_curr)", "def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)", "def history():\n user_id = session[\"user_id\"]\n\n history_list = db.execute(\"SELECT symbol, price, amount, timestamp FROM stocks WHERE user_id = :user_id\", user_id = user_id)\n\n rows = len(history_list)\n\n history = []\n\n for row in range(rows-1, -1, -1):\n history.append([history_list[row][\"symbol\"], history_list[row][\"amount\"], history_list[row][\"price\"], history_list[row][\"timestamp\"]])\n\n return render_template(\"history.html\", history = history, rows = rows)", "def history():\n\n symbols = []\n shares = []\n prices = []\n times = []\n\n purchases = db.execute(\"SELECT * FROM purchase WHERE id = :username\", username=session[\"user_id\"])\n length = len(purchases)\n\n for item in purchases:\n symbols.append(item[\"symbol\"])\n shares.append(item[\"shares\"])\n prices.append(item[\"price\"])\n times.append(item[\"created_at\"])\n\n return render_template(\"history.html\", symbols = symbols, shares = shares, prices = prices, times = times, length = length)", "def history():\n rows = db.execute(\"SELECT * FROM histories WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", rows=rows)", "def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", history = history)", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", history = history)", "def history():\n \n value_dicts = db.execute(\"SELECT * FROM history WHERE user_id = :usid\", usid=session[\"user_id\"])\n return render_template(\"history.html\", value_dicts=value_dicts)", "def show_history_log(self):\n self.visual.print_enum(self.visual.history_log)", "def history():\n\n user_id = session.get('user_id')\n table_name = f'stocks_user{user_id}'\n rows = db.execute(\"SELECT * FROM ?\", table_name)\n\n return render_template('history.html', rows=rows)", "def view_order_history(request):\n\n\ttemplate_name = 'order_history.html'\n\tall_orders = Order.objects.filter(user=request.user, payment__isnull=False)\n\tcontext = {'orders': all_orders}\n\treturn render(request, template_name, context)", "def history():\n\n data = db.execute(\"select * from history\")\n return render_template(\"history.html\", data=data)", "def history():\n \n #select user's portfolio\n rows = db.execute(\"SELECT stock, number, trans_price, transaction_stamp FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def history():\n current_userid = session[\"user_id\"]\n userbalance = get_userbal(db, current_userid)\n userstocks = get_userstock(db, current_userid)\n stockhistory = get_history(db, current_userid)\n stocklist = get_stocklist(db, stocksid=True, prices=True)\n if request.method == \"GET\":\n return render_template(\"history.html\", userbalance=usd(userbalance),\n userstocks=userstocks, buystocks=stocklist,\n stockhistory=stockhistory)\n else:\n return apology(\"TODO\")", "def get_transaction_history(self, txn_id_or_ref):\n response = self.get(f\"{self.gateway_path}/timeline/{txn_id_or_ref}\")\n return response", "def history():\n\n user = session.get(\"user_id\")\n rows = db.execute(\"Select TransDate as Date, Stock, Price, case when Num < 0 then 'Sell' else 'Buy' end as Type, Num as Quantity from portfolio where User = :User order by Date asc\", User = session.get(\"user_id\"))\n\n\n return render_template(\"hist.html\", rows = rows)", "def history():\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n symbols = db.execute(\"SELECT symbol FROM history WHERE username=:username\", username=username)\n buyorsells = []\n for item in db.execute(\"SELECT buyorsell FROM history WHERE username=:username\", username=username):\n if item[\"buyorsell\"]:\n buyorsells.append(\"Bought\")\n else:\n buyorsells.append(\"Sold\")\n numbers = db.execute(\"SELECT number FROM history WHERE username=:username\", username=username)\n prices = db.execute(\"SELECT price FROM history WHERE username=:username\", username=username)\n dates = db.execute(\"SELECT date FROM history WHERE username=:username\", username=username)\n return render_template(\"history.html\", username=username, symbols=symbols, buyorsells=buyorsells, numbers=numbers,\n prices=prices, dates=dates)", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def add_history(self):\n # add separator, if there already are history entries\n if self.parentApp.History != '':\n self.parentApp.History += (\n '\\n\\n--- --- --- --- --- --- --- --- --- --- --- ---\\n\\n'\n )\n\n # add the transaction to it\n self.parentApp.History += self.parentApp.tmpTransC.to_str()", "def show_history(self, ts=0, count=0, fmt='raw'):\n print \"Querying the station for historical records...\"\n for i, r in enumerate(self.station.genArchiveRecords(ts)):\n if fmt.lower() == 'raw':\n self.print_raw(r['datetime'], r['ptr'], r['raw_data'])\n elif fmt.lower() == 'table':\n self.print_table(r['datetime'], r['data'], i == 0)\n else:\n print r['datetime'], r['data']\n if count and i > count:\n break", "def history():\n # User reached route via GET (as by clicking a link or via redirect)\n if request.method == \"GET\":\n # Select to buy-sell table\n bs = db.execute(\"SELECT * FROM bs WHERE userID=:userID\", userID=session[\"user_id\"])\n\n # len of buy sell table\n bslen = len(bs)\n\n # Falsh massage\n flash('history')\n\n # Rander buy sell and total return value list\n return render_template(\"history.html\", bs=bs, bslen=bslen)", "def history_testnet(btc_address):\n history = []\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/address/txs/' + btc_address))\n if response.get('status') == 'success':\n data = response.get('data')\n txs = data.get('txs')\n\n for tx in reversed(txs):\n history.append(get_tx_info(tx.get('tx')))\n\n return history", "def history():\n \n # only prints shifts from current user\n usernum = db.execute(\"SELECT * FROM users WHERE id=:id\", id = session[\"user_id\"])[0][\"id\"]\n \n # stores shift data into hours\n hours = db.execute(\"SELECT * FROM history WHERE User=:id\", id = usernum)\n \n # calculates total amount of cash ever paid to user\n cash = db.execute(\"SELECT sum(total) FROM history WHERE User=:id\", id = session[\"user_id\"])[0][\"sum(total)\"]\n \n return render_template(\"history.html\", hours = hours, Total = cash)", "def history():\n #Get users history (no amount = 0)\n #Prepare table\n\n rows = db.execute(\"SELECT * from history WHERE user_id = :userid AND amount != 0\", userid = session[\"user_id\"])\n for row in rows:\n row['price'] = usd(row['price'])\n\n return render_template(\"history.html\", history=rows)", "def history(request):\n\treturn render(request,'history.html',None)", "def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)", "def history():\n\n # obtain stock info from portfolio database\n history = db.execute(\"SELECT symbol, shares, price, date FROM history WHERE id = :id ORDER BY date DESC\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for transaction in history:\n symbol = transaction[\"symbol\"]\n shares = transaction[\"shares\"]\n price = transaction[\"price\"]\n date = transaction[\"date\"]\n\n return render_template(\"history.html\", history = history)", "def history():\n hist = db.execute(\"SELECT * FROM shares WHERE userid = :uid ORDER BY date DESC\", uid=session[\"user_id\"])\n for h in hist:\n h[\"total\"] = round(h[\"value\"]*h[\"quantity\"],2)\n return render_template(\"history.html\", context=hist)", "def show_history(user_id):\n return History.where('user_id', user_id).get()", "def get_deposit_history(self, currency=None):\n if not currency:\n currency = \"\"\n return self.__call__('balance', \"getdeposithistory\", \n {\"currencyname\": currency})", "def history(self, request, *args, **kwargs):\n account = self.get_object()\n\n try:\n history = HistoricoConta.objects.filter(conta=account).order_by('-created')\n except ObjectDoesNotExist as obj:\n return Response({\"detail\": \"Could not find history for thus account\",\n \"status_code\": status.HTTP_404_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n return Response(HistoricoContaSerializer(history, many=True).data)", "def list_history(request):\n history = History.objects\n\n if not is_admin(request.user):\n history = history.filter(submitter=request.user)\n history = history.order_by('-submission_date')\n\n return render('editor/list_history.mako', request, {\n 'history': history,\n })", "def orders_history(self): \n return(self._d_orders['history'])", "def history(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/history.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Work History',\r\n 'contact': Contact.objects.get(pk=1),\r\n 'work_histories': WorkHistory.objects.all().order_by('-start_date'),\r\n 'current_application': Application.objects.get(pk=1),\r\n\r\n })\r\n )", "def show_history(self, ts=0, count=0, fmt='raw'):\n records = self.station.get_records(since_ts=ts, num_rec=count)\n for i,r in enumerate(records):\n if fmt.lower() == 'raw':\n raw_dump(r['datetime'], r['ptr'], r['raw_data'])\n elif fmt.lower() == 'table':\n table_dump(r['datetime'], r['data'], i==0)\n else:\n print(r['datetime'], r['data'])", "def get_history(self, taxlot_view):\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master", "def history():\n db.execute(\"CREATE TABLE IF NOT EXISTS hist(user_id INTEGER NOT NULL, typ TEXT NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, nos INTEGER NOT NULL, cost NUMERIC NOT NULL, time DATETIME NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n stocks = db.execute(\"SELECT * FROM hist WHERE user_id = ?\", session[\"user_id\"])\n return render_template(\"history.html\", stocks = stocks)", "def history():\n \n u_row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = u_row[0]['username']\n \n result = db.execute(\"SELECT * FROM history WHERE username=:username\", username=username)\n \n if result:\n dict = {}\n dict['symbol'] = []\n dict['shares'] = []\n dict['price'] = []\n dict['time'] = []\n \n for row in result:\n symbol = row['symbol']\n shares = row['shares']\n time = row['time']\n \n quote = lookup(symbol)\n name = quote['name']\n price = quote['price']\n total = shares * price\n \n dict['symbol'].append(symbol)\n dict['shares'].append(shares)\n dict['price'].append(usd(price))\n dict['time'].append(time)\n \n length = len(dict['symbol'])\n \n return render_template(\"history.html\",length=length,dict=dict)\n \n else:\n return render_template(\"history.html\",length=0,dict=[])", "def GetHistory(index=0):\n if index == \"clear\":\n state_mgr.entire_history = []\n else:\n print state_mgr.entire_history[int(index):]", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def view_transactions(self) -> None:\n user_choice = Menu.prompt_view_transactions()\n if user_choice == 5:\n print(\"Returning to main menu...\")\n return\n\n budget_category = BudgetManager.category_mapping[user_choice]\n print(f\"\\nTransactions in the {budget_category.value} \"\n f\"category: \")\n for tx in self.user.tx_manager:\n if tx.budget_category == user_choice:\n print(f\"\\n{tx}\")", "def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"", "def history():\n userid = session[\"user_id\"]\n history = db.execute(\"SELECT * FROM history WHERE id=:uid\", uid=userid)\n dic = {}\n data = []\n for row in history:\n # print(row)\n dic[\"symbol\"] = row[\"symbol\"]\n dic[\"shares\"] = row[\"shares\"]\n dic[\"price\"] = usd(row[\"price\"])\n dic[\"time\"] = row[\"time\"]\n data.append(dic.copy())\n # print(data)\n return render_template(\"history.html\", data=data)", "def history():\n\n if request.method == 'POST':\n user_input_uuid = request.form['uuid']\n\n dm = DatabaseManager()\n genes, diseases, uuid, query, genpanel, date =\\\n dm.retreieve_zoekopdracht(user_input_uuid)\n\n make_session(\"uuid\", uuid, 2)\n\n return redirect(url_for('vis_results'))\n\n hislis = []\n\n if session.get('history'):\n hislis = reversed(session['history'])\n\n return render_template(\"history.html\", hislis=hislis)", "def QueryHistory(self):\n return []", "def history(self):\n return self.info['history']", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def get_withdrawal_history(self, currency=None):\n if not currency:\n currency = \"\"\n return self.__call__('balance', \"getwithdrawalhistory\", \n {\"currencyname\": currency})", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_history(self):\n return self.history", "def account_history(self, account=None, type='all', range=\"all\"):\n \n if not (utils.check(type) and utils.check(range)):\n return {}\n \n # Imply account\n if account == None:\n account = self.params['account']\n \n # Assemble URL\n url = self.endpoints['base'] +\\\n 'accounts/' +\\\n str(account) +\\\n '/history.json'\n # Add parameters\n data = {\n 'range':range,\n 'transactions':type\n }\n \n # Create HTTP Request objects\n session = requests.Session()\n auth = self.create_auth()\n req = requests.Request('GET',url,params=data,auth=auth).prepare()\n \n \n results = {'response':session.send(req).json()}\n results['request'] = utils.pretty_print_POST(req)\n \n return results['response']['response']['transactions']['transaction']", "def history():\n files = os.listdir(app.config['SEGMENTS_FOLDER'])\n if len(files) <= 3:\n flash('There is no history yet', 'warning')\n return redirect(url_for('home'))\n\n range_list, segments_list, full_track_dict_list = generate_track_and_segments_data(app, files)\n\n return render_template(\"history.html\", segments_list=segments_list,\n full_track_dict_list=full_track_dict_list,\n range_list=range_list,\n title=\"history\")", "def view_transactions(request, id):\n account = get_object_or_404(Account, pk=id, user=request.user)\n return render(request, 'ledger/pages/view_transactions.html', {\n 'title': \"View Transactions\",\n 'breadcrumbs': [account],\n 'account': account,\n })", "def history():\n\n #Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT * FROM history WHERE id = :id\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n return render_template(\"history.html\", ports=ports)", "def get_deposit_history(self, asset: Asset, start_time: Optional[int] = None,\n end_time: Optional[int] = None, receive_window: Optional[int] = None):\n api_params = {\n \"asset\": asset.value,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if start_time is not None:\n api_params['startTime'] = start_time\n\n if end_time is not None:\n api_params['endTime'] = end_time\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/deposit/history', params=api_params)", "def generate_history(self):\n self.reporter.generate()", "async def history(message, client, extra_args):\n\n if not extra_args or not (user_id := utils.from_mention(extra_args[0])):\n user_id = message.author.id\n\n @database.query\n def get_transactions(conn):\n cursor = conn.cursor()\n cursor.execute(\n \"SELECT * FROM funnypts WHERE awarder = ? OR awardee = ? ORDER BY date DESC\", (user_id, user_id))\n transactions = cursor.fetchall()\n cursor.close()\n conn.close()\n return transactions\n\n if not (transactions := get_transactions()):\n await message.channel.send(\"THIS USER HAS NO HISTORY, THEY SHOULD THOUGH\")\n return\n\n @utils.paginated_embeds\n def populate(embed, entry, entry_number):\n awarder = client.get_user(entry[0]).name.split(\"#\", 1)[0]\n awardee = client.get_user(entry[1]).name.split(\"#\", 1)[0]\n transaction = \"GIVEN TO\" if entry[3] > 0 else \"TAKEN FROM\"\n date = entry[4].split(\" \", 1)[0]\n reason = \"\\\"{0}\\\"\".format(entry[2])\n\n embed.add_field(\n name=\"{0} — {2} — {1} • {3}\".format(awarder, awardee, transaction, date), value=reason, inline=False)\n\n title = f\"{client.get_user(user_id).name}'s FUNNYPOINT HISTORY\"\n embeds = populate(title, transactions, page_length=5)\n await utils.sauce_pages(embeds, message, client)", "def history(ticker):\n head, body = gethistory(ticker)\n html_str = htmltable(head, body)\n return html_str", "def see_all_transfers(request):\n transfers = Transaction.objects.all().order_by('-executed_time')\n return render(request, 'app/allTransfers.html', {'transfers': transfers})", "def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')", "def history():\n backup_history()\n yield\n reset_history()", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def history_list(name):\n service_histories = request_service_history(name)\n table = present(lambda: service_histories,\n renderer='table',\n headers=['History Version', 'Service Name', 'Date Created', 'Manifest'],\n columns=['id', 'name', 'created_at', 'manifest'])\n if table:\n click.echo(table)\n else:\n click.echo('There is no record of your service deployments available.')\n # click.echo('https://docs.fandogh.cloud/docs/services.html\\n')", "def history():\n if request.method == \"GET\":\n \n user_id = int(session.get('user_id'))\n user_data = db.execute('''SELECT * FROM history WHERE user_id = :user_id''', user_id = user_id)\n \n if not user_data:\n return render_template('quote.html')\n \n #create lists of values for sake of returning them to F2E\n portfolio = []\n \n for i in user_data:\n #getting data from table\n date = i.get('date')\n symbol = i.get('symbol')\n name = i.get('stock_name')\n quantity = i.get('quantity')\n price = round(float(i.get('price')), 2)\n action = str(i.get('deal'))\n \n #inserting data into a list\n a_dict = {\n 'date': date, 'symbol': symbol, \n 'name': name, 'price': price, \n 'quantity': quantity, 'action': action\n }\n portfolio.append(a_dict)\n \n return render_template('history.html',\n portfolio=portfolio)\n else:\n return render_template('index.html')", "def account_df_history(self, improve=False):\n return(self.account_df('history', improve))", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass" ]
[ "0.82002884", "0.8140903", "0.8114919", "0.8042388", "0.8033679", "0.80168986", "0.79338187", "0.7822898", "0.77456313", "0.77341163", "0.7718728", "0.7716838", "0.7708978", "0.7628142", "0.7624304", "0.7530955", "0.74965966", "0.7481556", "0.74478734", "0.7423578", "0.7382277", "0.7288208", "0.72825384", "0.72540516", "0.7240194", "0.7233303", "0.7193252", "0.7184091", "0.7178525", "0.71768135", "0.7147903", "0.7144208", "0.7143306", "0.71186686", "0.7105528", "0.7099931", "0.7093764", "0.7089086", "0.70393765", "0.7030488", "0.70249367", "0.7018011", "0.701332", "0.7010136", "0.6967728", "0.69604427", "0.69457144", "0.6897571", "0.68591934", "0.681676", "0.67798513", "0.6772446", "0.675553", "0.6723649", "0.6720259", "0.66986763", "0.6693779", "0.6682868", "0.66739595", "0.6656746", "0.6606525", "0.66026926", "0.6577423", "0.65742314", "0.65712106", "0.65543926", "0.65473205", "0.6535811", "0.65318495", "0.65282375", "0.65255904", "0.6510534", "0.64600503", "0.6457786", "0.64558256", "0.64352304", "0.6426731", "0.6421675", "0.6371526", "0.6371526", "0.6371526", "0.6316858", "0.62508285", "0.6249339", "0.62471", "0.62274307", "0.622531", "0.62213767", "0.62128365", "0.619218", "0.61842453", "0.6180837", "0.61752397", "0.6164259", "0.6141393", "0.6132607", "0.6131243", "0.61254805", "0.61166203", "0.61134505" ]
0.68055445
50
Sell shares of stock
def sell(): if request.method == "POST": dict=lookup(request.form.get("symbol")) if not request.form.get("symbol") or not request.form.get("shares") or not lookup(request.form.get("symbol")): return apology("Must provide valid symbol and positive integer",400) else: row=db.execute("SELECT *FROM portofolio WHERE symbol=:s AND user_id=:u_i",s=request.form.get("symbol"),u_i=session["user_id"]) if len(row) == 0 or int(request.form.get("shares")) > row[0]["shares"]: return apology("you don't have enough shares of this company",400) else: db.execute("INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)",s=dict["symbol"],sh=int(request.form.get("shares")),p=dict["price"],t=time.asctime( time.localtime(time.time())),u_i=session["user_id"],status='sold') db.execute("UPDATE portofolio SET shares =shares-:sh, price=:p, total=total-:t WHERE symbol=:s AND user_id=:u_i",sh=int(request.form.get("shares")),p=dict["price"],t=dict["price"] * int(request.form.get("shares")),s=dict["symbol"],u_i=session["user_id"]) db.execute("UPDATE users SET cash=cash+:extra WHERE id=:i",extra=int(request.form.get("shares")) * dict["price"],i=session["user_id"]) db.execute("DELETE FROM portofolio WHERE shares=0") return redirect("/") else: rows=db.execute("SELECT *FROM portofolio where user_id=:u_i ",u_i=session["user_id"]) arr=[] for row in rows: arr.append(row['symbol']) return render_template("selling.html",arr=arr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sell_stock(self, symbol):\n amount_to_sell = self.get_equity(symbol)\n chirp.order_sell_fractional_by_price(symbol, amount_to_sell)\n self.L.add_line('', symbol, 'SOLD', amount_to_sell)", "async def sell(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot sell less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name not in user_stocks:\n\t\t\t\tawait ctx.send(f'You do not have any shares of {name}.')\n\t\t\t\treturn\n\t\t\tif shares > user_stocks[name]['count']:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\tf'You do not have enough shares of {name}. '\n\t\t\t\t\tf'You only have {user_stocks[name]} share{plural}.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tuser_stocks[name]['count'] -= shares\n\t\t\tif user_stocks[name]['count'] == 0:\n\t\t\t\tdel user_stocks[name]\n\t\tbal = await bank.deposit_credits(ctx.author, shares * price)\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(\n\t\t\tf'You sold {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def sell(self, stock, amount):\n self.orders[stock] -= amount", "def sell():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 403)\n \n # proceed buy function\n sell_result: Tuple[float, str] = sell_shares(db, user_id, symbol, qty )\n if sell_result[0] == -1:\n return apology(sell_result[1], 403)\n\n return redirect(\"/\")", "def marketSell(self, currency_pair, amount):\n # calcular o rate num 'for'\n bids = rOrderBook(currency_pair=currency_pair, field='bids')\n list_resp = []\n for bid in bids:\n if bid[1] < amount:\n sold = self.limitSell(currency_pair, rate=bid[0], amount=bid[1], ioc=True)\n list_resp.append(sold)\n amount -= bid[0]\n elif bid[1] >= amount:\n sold = self.limitSell(currency_pair, rate=bid[0], amount=amount, ioc=True)\n list_resp.append(sold)\n amount -= amount\n break\n return list_resp", "def sell():\n if request.method == \"POST\":\n # Ensure data is inputted\n if not request.form.get(\"symbol\"):\n return apology(\"Insert symbol\", 403)\n \n if not request.form.get(\"shares\"):\n return apology(\"Insert number of shares to sell\", 403)\n \n # Ensure shares value is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough shares to sell \n share_count_dict = db.execute(\"SELECT share_count FROM shares WHERE user_id=:usid AND share=:share\", usid=session[\"user_id\"], share=request.form.get(\"symbol\").upper())\n share_count = int(share_count_dict[0][\"share_count\"])\n \n if int(request.form.get(\"shares\")) > share_count:\n return apology(\"You don't own enough shares\", 403)\n \n # Create variables\n symbol = request.form.get(\"symbol\").upper()\n quantity = int(request.form.get(\"shares\"))\n \n # Add cash to user data\n new_cash = float(lookup(symbol)[\"price\"]) * quantity\n db.execute(\"UPDATE users SET cash= cash + :cash WHERE id=:usid\", cash=new_cash, usid=session[\"user_id\"]) \n \n # Remove shares of user data\n db.execute(\"UPDATE shares SET share_count = share_count - :shares WHERE user_id=:usid AND share = :share\", shares=quantity,share=symbol, usid=session[\"user_id\"])\n db.execute(\"DELETE FROM shares WHERE user_id=:usid AND share_count = :shares\", usid=session[\"user_id\"], shares=0)\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol, shares='-' + str(quantity), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(lookup(symbol)[\"price\"]))\n \n return redirect(\"/\")\n \n else:\n # Create list with purchased symbols\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id=:usid\", usid=session[\"user_id\"])\n symbol_list = [None] * len(symbol_dicts)\n \n # Insert symbols into list\n for i in range(len(symbol_dicts)):\n symbol_list[i] = symbol_dicts[i][\"share\"]\n \n return render_template(\"sell.html\", longitude=len(symbol_dicts), symbols=symbol_list)", "def sell():\n if request.method == \"GET\":\n return render_template('sell.html')\n \n if request.method == \"POST\":\n symbol = request.form['symbol']\n shares = request.form['shares']\n stock = lookup(symbol)\n \n if not stock:\n return apology('Invalid symbol')\n \n user_shares = db.execute(\"SELECT shares FROM profile \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n if not user_shares or int(user_shares[0][\"shares\"]) < int(shares):\n return apology(\"Not enough shares\")\n db.execute(\"INSERT INTO history (company, shares, value, id, date) \\\n VALUES(:symbol, :shares, :price, :id, :date)\", \\\n symbol=stock[\"symbol\"], shares=-int(shares), \\\n price=stock[\"price\"], id=session[\"user_id\"], date = str(date.today())) \n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n \n shares_total = user_shares[0][\"shares\"] - int(shares)\n if shares_total == 0:\n db.execute(\"DELETE FROM profile \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n else:\n db.execute(\"UPDATE profile SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n return redirect(url_for(\"index\"))", "def sell():\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure proper number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # select the symbol shares of that user\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # check if enough shares to sell\n if not user_shares or int(user_shares[0][\"shares\"]) < shares:\n return apology(\"You don't hold enough shares\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n # update history of a sell\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltarns) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=-shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method= \"sell\", times= date_time, totaltrans = shares * stock[\"price\"])\n\n # update user cash (increase)\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # decrement the shares count\n amountshares = user_shares[0][\"shares\"] - shares\n\n # if after decrement is zero, delete shares from portfolio\n if amountshares == 0:\n db.execute(\"DELETE FROM portfolio \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n # otherwise, update portfolio shares count\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=amountshares, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def sell():\n if request.method == \"POST\":\n bef = db.execute(\"SELECT symbol FROM ind WHERE user_id = ?\", session[\"user_id\"])\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which valid stock to sell\", 403)\n symbol = request.form.get(\"symbol\")\n p = db.execute(\"SELECT COUNT(symbol) FROM ind WHERE user_id = ?\", session[\"user_id\"])\n q = 0\n\n for i in range(int(p[0][\"COUNT(symbol)\"])):\n if symbol == bef[i][\"symbol\"]:\n q = 1\n if q == 0:\n return apology(\"Please specify which valid stock to sell\", 403)\n if not request.form.get(\"shares\"):\n return apology(\"Please specify how many stocks you want to sell\", 403)\n if int(request.form.get(\"shares\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"shares\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) < int(request.form.get(\"shares\")):\n return apology(\"You do not own that many shares\", 403)\n shares = int(request.form.get(\"shares\"))\n db.execute(\"CREATE TABLE IF NOT EXISTS sells (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, shares INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"shares\"))\n money = bro[0][\"cash\"]\n money = money + cost\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"INSERT INTO sells(user_id, symbol, name, price, shares, cost, time) VALUES (:user_id, :symbol, :name, :price, :shares, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], shares = shares, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"SOLD\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = shares, cost = cost, time = datetime.datetime.now())\n\n db.execute(\"UPDATE ind SET nos = ? WHERE symbol = ? AND user_id = ?\", int(hav[0][\"nos\"]) - shares, request.form.get(\"symbol\"), session[\"user_id\"])\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) == 0:\n db.execute(\"DELETE FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n return redirect(\"/\")\n\n else:\n stocks = db.execute(\"SELECT * FROM ind WHERE user_id = ?\", session[\"user_id\"])\n\n return render_template(\"sell.html\", stocks = stocks)", "def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def Sell(self, X, Y):\n if (self.share[X] - int(Y)) * (1 + self.taxe) < 0:\n raise TradeError(\"Not Enough Share\")\n self.share[X] -= int(Y)\n self.money += int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"SELL:{str(int(Y))}:{str(X)}\", flush = True)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"Symbol\")\n if symbol is None:\n return apology(\"Enter a symbol\", 403)\n shares = request.form.get(\"Shares\")\n if int(shares) < 0:\n return apology(\"Please enter postive shares\", 403)\n\n stock = lookup(symbol)\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n for row in rows:\n if row[\"Symbol\"] == symbol:\n if int(shares) > row[\"totalShares\"]:\n return apology(\"Too many shares\")\n\n rows = db.execute(\"SELECT Cash FROM cash WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"Cash\"]\n\n current_cash = cash + int(shares)*stock[\"price\"]\n db.execute(\"UPDATE cash SET Cash=:current_cash WHERE id=:id\", current_cash = current_cash, id=session[\"user_id\"])\n db.execute(\"INSERT INTO cash (id, Symbol, Name, Shares) VALUES (:id, :Symbol, :Name, :Shares)\", id=session[\"user_id\"], Symbol=stock[\"symbol\"], Name=stock[\"name\"], Shares=-1*int(shares))\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n rows = db.execute(\"SELECT Symbol FROM cash WHERE id=:id GROUP BY Symbol HAVING SUM(Shares) > 0\", id=session[\"user_id\"])\n # Shorthand for obtaining the symbol for every row in rows. So would output AAPL e.g.\n return render_template(\"sell.html\", symbols=[ row[\"Symbol\"] for row in rows ])", "def sell(self,\n currency_pair,\n rate,\n amount):\n pass", "def sell_all(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price -= np.abs(slip_factor)\n\n self.trade_manager.sell_all(from_symbol, to_symbol, price, amount, date)", "def sell():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",userid=userid)\n\n if request.method == \"POST\":\n symbol_sell = request.form.get(\"symbol\")\n shares_sell = float(request.form.get(\"shares\"))\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol_sell)\n if shares_info[0][\"shares_sum\"] < shares_sell:\n return apology(\"You don't have that many shares\", 400)\n else:\n current = lookup(symbol_sell)\n price = current[\"price\"]\n amount = -shares_sell * price\n cash = db.execute(\"SELECT cash FROM users WHERE id =:userid\", userid=userid)\n balance = cash[0][\"cash\"] - amount\n db.execute(\"INSERT INTO purchase (userid, symbol, shares, tot) VALUES(:userid, :symbol, :shares, :tot)\",\n userid=userid, symbol=symbol_sell, shares=-shares_sell, tot=amount)\n db.execute(\"UPDATE users SET cash = :balance WHERE id = :userid\", balance=balance, userid=userid)\n flash(\"SOLD!!\")\n return redirect(\"/\")\n else:\n list_symbol = list()\n for symbol in stocks:\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid = userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0]\n if shares_info[0][\"shares_sum\"]:\n list_symbol.append(symbol[\"symbol\"])\n return render_template(\"sell.html\", list_symbol=list_symbol)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = request.form.get('shares')\n\n if not symbol or not shares or symbol == \"Select Stock\":\n return apology(\"Please input a valid symbol and number of shares\")\n elif int(shares) <= 0:\n return apology(\"Please input a positive number for shares\")\n else:\n symbol = symbol.lower()\n shares = int(shares)\n get_cur_shares = db.execute(\n \"SELECT SUM(shares) FROM History WHERE id = :id AND symbol = :symbol GROUP BY symbol\", id=session['user_id'], symbol=symbol)\n try:\n cur_shares = [share['SUM(shares)'] for share in get_cur_shares][0]\n except IndexError:\n return apology(\"Please input a valid number of shares\")\n if shares > cur_shares:\n return apology(\"Sorry, you don't have enough shares to sell\")\n else:\n cur_price = float(lookup(symbol)['price'])\n sell_val = cur_price * float(shares)\n sell_val = float(sell_val)\n get_bal = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n balance = [bal['cash'] for bal in get_bal][0]\n balance = float(balance)\n new_balance = balance + sell_val\n company = lookup(symbol)['name']\n new_database_balance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n cash=new_balance, id=session['user_id'])\n new_database_transaction = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=-shares, price=cur_price,\n totalprice=sell_val, id=session['user_id'], transaction_type=\"SELL\")\n return redirect(\"/\")\n else:\n get_symbols = db.execute(\n \"SELECT symbol FROM History WHERE id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=session['user_id'])\n if not get_symbols:\n return apology(\"Sorry, could not find valid symbol\")\n else:\n symbols = [symbol['symbol'] for symbol in get_symbols]\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def sell():\n\n if request.method == \"POST\":\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n if not symbol:\n return apology(\"please select a valid symbol\")\n\n target_stock = db.execute(\"SELECT *, sum(shares) FROM transactions WHERE user=:user AND symbol=:symbol\",\n user=user, symbol=symbol)\n print(target_stock)\n if not shares:\n return apology(\"must provide how many shares to sell\")\n\n elif shares > target_stock[0]['sum(shares)'] or shares < 1:\n return apology(\"shares must be more than 0 and less than \" + str(target_stock[0]['shares']))\n\n query = lookup(symbol)\n price = query['price']\n name = query['name']\n cash = entry[0]['cash']\n\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=symbol, name=target_stock[0]['name'], price=price, shares=-int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash+price*shares, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n\n return render_template(\"sell.html\", stocks=owned)", "def sell():\n if request.method == \"GET\":\n rows = db.execute(text(\n \"SELECT symbol, sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id GROUP BY symbol\"),\n id=session[\"user_id\"])\n symbols = [row[\"symbol\"] for row in rows if row[\"shares\"]]\n return render_template(\"sell.html\", symbols=symbols,\n symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = db.execute(text(\n \"SELECT sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id AND symbol=:symbol\"),\n id=session[\"user_id\"],\n symbol=request.form.get(\"symbol\")).fetchone()[\"shares\"]\n requested_shares = int(request.form.get(\"shares\"))\n if requested_shares > owned_shares:\n return apology(\"too many shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=-requested_shares,\n p=quote[\"price\"])\n sell_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n db.execute(text(\"UPDATE users SET cash=cash+:c WHERE id=:id\"),\n c=sell_price,\n id=session[\"user_id\"])\n flash(\"Sold!\")\n return redirect(\"/\")", "def sell(self, amount):\n trades = []\n sell_amount = 0\n precision = pow(10, self.pair.get_base_token().get_decimals() - self.pair.get_quote_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_BUY])):\n offer = self.book[Trade.WAY_BUY][i]\n amount_quote = offer.get_quote_amount()\n amount_base = offer.get_base_amount()\n price = offer.get_price()\n\n if amount_quote >= amount:\n tmp = amount * price * precision\n tmp = int(tmp)\n trade = Trade(self.pair, Trade.WAY_SELL, price, tmp, amount, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n sell_amount = sell_amount + trade.get_amount_base()\n trades.append(trade)\n return trades, int(sell_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade = Trade(self.pair, Trade.WAY_SELL, price, amount_base, amount_quote, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n amount = amount - amount_quote\n sell_amount = sell_amount + trade.get_amount_base()\n trades = trades + [trade]\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def sell():\n\n if request.method == \"POST\":\n sellstock = request.form.get(\"symbol\")\n sellq = int(request.form.get(\"shares\"))\n if sellstock == None:\n return apology(\"Please select a stock symbol to sell.\")\n if sellq < 0:\n return apology(\"Please enter a valid quantity of stocks to sell\")\n invq = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":sellstock})[0][\"quantity\"]\n if sellq > invq:\n return apology(\"You don't have enough shares.\")\n stock = lookup(sellstock)\n cost = round(sellq*stock[\"price\"], 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":sellstock,\"va\":stock[\"price\"],\"qu\":sellq,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":(invq-sellq),\"uid\":session[\"user_id\"],\"sy\":sellstock})\n db.execute(\"UPDATE users SET cash = cash + :cash WHERE id =:uid\", {\"cash\":cost,\"uid\":session[\"user_id\"]})\n flash(\"Shares successfully sold!\")\n return redirect(\"/\")\n inventory = db.execute(\"SELECT symbol FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n return render_template(\"sell.html\", context = inventory)", "def sellshares():\n # Initialise buy and sell share forms\n sellform = SellShareForm()\n # Validate and process form data\n if(sellform.validate_on_submit()):\n # Buys shares\n issuerID = sellform.sellsharecode.data\n quantity = sellform.sellquantity.data\n userID = current_user.userID\n # Call buyshare API\n sellshare = gdb.sellshare(userID, issuerID, quantity)\n if(sellshare):\n # Flash with success message\n flash(\"Share sale successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share sale unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def sell():\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to sell\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to sell\", 400)\n\n rows = db.execute(\"SELECT id, symbol, numshares FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1:\n return apology(\"You do not have shares of \" + symbol, 400)\n if num_shares > rows[0][\"numshares\"]:\n return apology(\"You cannot sell more shares than you have\", 400)\n\n sale_value = num_shares * company_quote[\"price\"]\n\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n new_balance = balance + sale_value\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares*-1, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n new_numshares = rows[0][\"numshares\"] - num_shares\n new_totalvalue = rows[0][\"totalvalue\"] - sale_value\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n else:\n rows = db.execute(\"SELECT symbol, numshares FROM totalshares WHERE id = :id\", id=session[\"user_id\"])\n symbol_options = []\n if rows != None and len(rows) > 0:\n for row in rows:\n if row[\"numshares\"] > 0:\n symbol_options.append(row[\"symbol\"])\n return render_template(\"sell.html\", symbol_options=symbol_options)", "async def sell(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def sell():\n return apology(\"TODO\")\n if request.method == \"POST\":\n # Ensure symbol was submitted\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"must provide symbol\", 403)\n symbol = symbol.upper()\n\n # Ensure number of shares was submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide shares\", 403)\n\n return render_template(\"sell.html\")", "def sell():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n units_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE id = :current_id AND stock = :stock_code\",\n current_id=session[\"user_id\"], stock_code=request.form.get(\"symbol\"))\n available_units = units_list[0][\"SUM(units)\"]\n\n if available_units < int(request.form.get(\"shares\")):\n return apology(\"no units bro\", 400)\n\n new_cash = available_money + total_price\n\n updating = db.execute(\"UPDATE users SET cash = :upd_cash WHERE id = :current_id\",\n upd_cash=new_cash, current_id=session[\"user_id\"])\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"S\")\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"sell_result.html\", shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(new_cash))\n else:\n available_stocks_info = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_stocks_list = []\n for element in available_stocks_info:\n if element[\"stock\"] not in available_stocks_list:\n available_stocks_list.append(element[\"stock\"])\n\n return render_template(\"sell.html\", available_stocks=available_stocks_list)", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares to sell\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n \n #Check No of Shares\n no_of_shares = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol = :symbol\",id=session[\"user_id\"],symbol =request.form.get(\"symbol\"))\n no_of_shares = int(no_of_shares[0]['shares'])\n if int(request.form.get(\"sharesnumber\")) > no_of_shares:\n return apology(\"Sorry!! Don't Have Enough shares\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_worth = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n \n \n \n #Update Cash\n net_cash = net_cash + net_worth\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=(-1)*int(request.form.get(\"sharesnumber\")))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= -int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"sell.html\")\n #return apology(\"TODO\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def attempt_market_sell(self, decision: Decision, state: Series, is_backtest: bool = False, crypto: bool = False) -> Transaction:\n # Currently, selling will only support closing out our entire position\n # TODO: support partial sells in the future\n share_quantity = decision.quantity\n try: latest_price = self.latest_price(decision.symbol, state, is_backtest, crypto, 'sell')\n except: return Transaction(False, TransactionType.MarketSell, 0, 0, decision, state['date'])\n\n strike_price: float\n succeeded = True\n if is_backtest:\n c_type = 'crypto' if crypto else 'stock'\n spread = .01 if c_type == 'stock' else 0\n sell_fee = state['close'] * self.get_fee_pct(c_type)[1] + self.get_fixed_fee(c_type, state['close'], share_quantity)\n self.total_fees += sell_fee\n self.trade_volume_shares += share_quantity\n print(f'sell fee: {sell_fee} | trade volume: {self.trade_volume} | total fees: {self.total_fees}')\n strike_price = state['close'] - sell_fee - spread\n else:\n # TODO: Communicate with market here\n try:\n if crypto:\n print('attempting crypto market sell @ ', latest_price)\n (strike_price, share_quantity, succeeded) = asyncio.get_event_loop().run_until_complete(wait_for_cb_order_fill(self.cb_client, decision.contract, 'sell', share_quantity, latest_price))\n else:\n print('attempting ib market sell @ ', latest_price)\n # sell_order = MarketOrder('SELL', share_quantity)\n sell_order = LimitOrder('SELL', share_quantity, latest_price)\n (strike_price, share_quantity, succeeded) = asyncio.get_event_loop().run_until_complete(wait_for_ib_order_fill(self.ib_client.ib, sell_order, decision.contract))\n\n\n except Exception as e: # Failed to sell at limit price\n succeeded = False\n strike_price = 0\n share_quantity = 0\n \n self.trade_volume += (strike_price * share_quantity)\n return Transaction(succeeded, TransactionType.MarketSell, strike_price, share_quantity, decision, state['date'])", "def sell():\n\n table = db.execute(\"SELECT symbol FROM portfolio WHERE id=:id\", id=session[\"user_id\"])\n symbols = []\n for i in range(len(table)):\n symbols.append(table[i][\"symbol\"])\n\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = int(db.execute(\"SELECT shares FROM portfolio where id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"shares\"])\n\n if owned_shares < int(request.form.get(\"shares\")):\n return apology(\"Too many shares\", 400)\n\n updated_shares = owned_shares - int(request.form.get(\"shares\"))\n\n # update shares in portfolio\n if updated_shares > 0:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE id=:id AND symbol=:symbol\",\n shares=updated_shares, id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n else:\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n # update cash in database\n quote = lookup(request.form.get(\"symbol\"))\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash = cash + :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=0-int(request.form.get(\"shares\")), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n return render_template(\"sell.html\", symbols=symbols)", "async def sell(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n inventory = self.iex.get_held_stock_quantity(db, company.id, symbol)\r\n if inventory < quantity:\r\n await ctx.send(f\"``{company.name}\\n{inventory} {symbol}``\")\r\n raise StonksError()\r\n\r\n price = self.iex.price(symbol)\r\n value = price * quantity\r\n self.iex.sell(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``+{value} {company.name} ⯬ {quantity} {symbol} @ {price}``\")", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def sell():\n userId = session[\"user_id\"]\n\n sharesOwned = db.execute(f\"SELECT symbol, SUM(shares) FROM transactions WHERE user_id={userId} GROUP BY symbol HAVING SUM(shares)>0\")\n\n if request.method == \"GET\":\n\n return render_template(\"sell.html\", sharesOwned=sharesOwned)\n\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\")) * (-1)\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n shareCount = float(db.execute(f\"SELECT SUM(shares) FROM transactions WHERE user_id={userId} AND symbol='{symbolInput}' GROUP BY symbol HAVING SUM(shares)>0\")[0][\"SUM(shares)\"] * (-1))\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares > 0 or shares < shareCount:\n return apology(\"No sell for you senpai!\")\n\n else:\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','SELL','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def sell():\n if request.method == \"GET\":\n symbols = Records.query.with_entities(Records.symbol).\\\n distinct().filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"sell.html\", symbols=symbols)\n\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n record = db.session.query(db.func.sum(Records.shares).label(\"shares\")).\\\n group_by(Records.user_id).filter_by(symbol=symbol, user_id=session.get('user_id')).one()\n\n if shares > record.shares:\n return apology(f\"You can only sell { record.shares } shares or less than\", 400)\n\n quote = lookup(symbol)\n price = quote['price']\n value = round(shares * price, 2)\n\n user = Users.query.get(session.get('user_id'))\n user.cash += value\n\n record = Records(symbol=quote['symbol'], company_name=quote['name'],\n transact_type=\"sell\", shares=int('-'+str(shares)),\n price=price, user_id=user.id)\n\n db.session.add(record)\n db.session.commit()\n\n flash('Sold')\n return redirect(url_for('index'))", "def sell():\n userid = session[\"user_id\"]\n if request.method == \"GET\":\n symbol = db.execute(\"SELECT symbol FROM purchase WHERE id=:uid\",uid=userid)\n # print(symbol)\n symbols = []\n for s in symbol:\n temp = s[\"symbol\"]\n symbols.append(temp)\n # print(symbols)\n return render_template(\"sell.html\", symbols=symbols)\n else:\n symbol_entry = request.form.get(\"symbol\")\n shares_entry = int(request.form.get(\"shares\"))\n if not symbol_entry or not shares_entry:\n return apology(\"Please select both symbol and shares\")\n\n data = db.execute(\"SELECT symbol, shares FROM purchase WHERE id=:uid\",uid=userid)\n share_check = 0\n\n for s in data:\n if(s[\"symbol\"] == symbol_entry):\n share_check = s[\"shares\"]\n # print(share_check)\n if shares_entry > share_check:\n return apology(\"You don't have this many shares of this company\")\n\n current_cash = (db.execute(\"SELECT cash FROM users WHERE id=:uid\", uid=userid))[0].get(\"cash\")\n query = lookup(symbol_entry)\n share_price = query[\"price\"]\n sold_price = share_price * shares_entry\n\n db.execute(\"UPDATE users SET cash=:sold WHERE id=:uid\",sold=sold_price+current_cash, uid=userid)\n if shares_entry == share_check:\n db.execute(\"DELETE FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol_entry, uid=userid)\n else:\n db.execute(\"UPDATE purchase SET shares=:shares WHERE symbol=:symbol AND id=:uid\",shares=share_check-shares_entry,symbol=symbol_entry, uid=userid)\n\n nshare = -shares_entry\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol_entry,shares=nshare,price=share_price, time=dt)\n return render_template(\"sell.html\", message=\"Sold!\")\n print(data)", "def _sell(self, amount, price):\n params = {\"pair\": self.pair, \"type\" : \"sell\", \"rate\" : price, \"amount\" : amount}\n response = self._send_request(\"Trade\", params)\n if \"error\" in response:\n raise TradeException(response[\"error\"])", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n elif int(request.form.get(\"shares\")) > (db.execute(\"SELECT sum(shares) as shares FROM 'transaction' WHERE u_id = :user_id and symbol = :symbol\", user_id = session[\"user_id\"], symbol = request.form.get(\"symbol\")))[0][\"shares\"]:\n return apology(\"cannot sell more shares than owned\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = -1*int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash + :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Sold\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n rows = db.execute(\"SELECT symbol, SUM(shares) as shares FROM 'transaction' WHERE u_id = :user_id GROUP BY symbol\", user_id = session[\"user_id\"])\n\n if len(rows) > 0:\n return render_template(\"sell.html\", rows = rows)\n else:\n return apology(\"no shares to sell\", 403)", "async def buy(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot buy less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\ttry:\n\t\t\tbal = await bank.withdraw_credits(ctx.author, shares * price)\n\t\texcept ValueError:\n\t\t\tbal = await bank.get_balance(ctx.author)\n\t\t\tawait ctx.send(\n\t\t\t\tf'You cannot afford {shares} share{plural} of {name}. '\n\t\t\t\tf'It would cost {price * shares} {currency} ({price} {currency} each). '\n\t\t\t\tf'You only have {bal} {currency}.'\n\t\t\t)\n\t\t\treturn\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name in user_stocks:\n\t\t\t\tuser_stocks[name]['count'] += shares\n\t\t\telse:\n\t\t\t\tuser_stocks[name] = {'count': shares, 'total_count': stock_data[name]['total_count']}\n\t\tawait ctx.send(\n\t\t\tf'You purchased {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n sharesToSell = int(request.form.get(\"shares\"))\n if sharesToSell < 0:\n return apology(\"Shares to sell cannot be negative\", 400)\n\n sharesRows = db.execute(\"SELECT * FROM portfolio WHERE UserID = :userid AND Symbol = :enteredSymbol\",\n userid=session.get(\"user_id\"), enteredSymbol = symbol)\n\n numSharesOwned = 0\n for row in sharesRows:\n numSharesOwned += row[\"NumberOfShares\"]\n\n if numSharesOwned < sharesToSell:\n return apology(\"You don't own that many shares!\", 400)\n\n remainingSharesToSell = sharesToSell\n for row in sharesRows:\n numShares = row[\"NumberOfShares\"]\n if remainingSharesToSell >= numShares:\n '''delete row'''\n delete = db.execute(\"DELETE FROM portfolio WHERE id = :rowid\", rowid = row[\"id\"])\n remainingSharesToSell -= numShares\n else:\n '''update row'''\n updatedShares = numShares - remainingSharesToSell\n update = db.execute(\"UPDATE portfolio SET NumberOfShares = :numshares, TotalPrice = :tp WHERE id = :rowid\",\n numshares = updatedShares, tp = updatedShares * row[\"UnitPrice\"], rowid = row[\"id\"])\n remainingSharesToSell = 0\n\n if remainingSharesToSell == 0:\n break;\n\n quote = lookup(symbol)\n cashToReturn = quote[\"price\"] * sharesToSell\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session.get(\"user_id\"))\n usersCurrentCash = userRows[0][\"cash\"]\n\n updatedBalance = usersCurrentCash + cashToReturn\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = updatedBalance, userid = session.get(\"user_id\"))\n '''Update history'''\n dateNow = datetime.datetime.now()\n db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbl, :shares, :price, :date, :userid)\", symbl = symbol, shares = -1 * sharesToSell, price = -1 * cashToReturn, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n else:\n symbolRows = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n symbls = []\n for row in symbolRows:\n symbls.append(row[\"Symbol\"])\n\n return render_template(\"sell.html\", symbols=symbls)", "def sell():\n if request.method == \"POST\":\n current_user = session[\"user_id\"]\n\n\n if not request.form.get(\"sell_amount\"):\n return apology(\"Must provide a number to sell\", 403)\n\n stock_to_sell= request.form.get(\"stock_to_sell\")\n sell_amount= int(request.form.get(\"sell_amount\"))\n\n current_stocks = db.execute(\"SELECT volume FROM portfolio WHERE id = :id AND stock_symbol=:stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n # current_stocks=db.execute(\"SELECT volume FROM portfolio WHERE id= :id AND stock_symbol= :stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n\n\n\n if not current_stocks:\n return apology(\"You do not own any stocks, try refreshing the sell page\")\n\n current_volume = current_stocks[0][\"volume\"]\n current_volume = int(current_volume)\n\n if current_volume < int(request.form.get(\"sell_amount\")):\n return apology(\"Attempting to sell more shares than you own\", 403)\n\n lookedup=[]\n lookedup=lookup(request.form.get(\"stock_to_sell\"))\n if not lookedup:\n return apology(\"Unable to lookup stock info.\")\n\n stock_name = lookedup.get(\"name\")\n stock_price = lookedup.get(\"price\")\n stock_symbol = lookedup.get(\"symbol\")\n\n\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n # see if properly selecting cash amount\n if not user_cash:\n return apology(\"Didn't find user's current balance\", 000)\n # update user total cash\n current_cash = user_cash[0][\"cash\"]\n current_cash = int(current_cash)\n total_revenue = sell_amount * stock_price\n new_balance = current_cash + total_revenue\n db.execute(\"UPDATE users SET cash = :new_balance WHERE id = :id\", new_balance=new_balance, id=current_user)\n\n # update portfolio\n new_volume=0\n new_volume=current_volume-sell_amount\n db.execute(\"UPDATE portfolio SET volume = :new_volume WHERE id = :id AND stock_symbol = :stock_symbol\", new_volume=new_volume, id=current_user, stock_symbol=stock_symbol)\n\n # update sales database\n db.execute(\"INSERT INTO sales (id,stock_symbol,volume_sold,price,date_sold) VALUES(:id,:symbol,:amount,:price,datetime('now'))\", id=current_user, symbol=stock_symbol, amount=sell_amount, price=stock_price)\n\n\n return render_template(\"sold.html\",stock_name=stock_name, stock_price=stock_price, stock_symbol=stock_symbol,shares_to_sell=sell_amount, total_value=total_revenue)\n\n\n else:\n current_user = session[\"user_id\"]\n current_stocks=db.execute(\"SELECT stock_symbol, volume FROM portfolio WHERE id = :id\", id=current_user)\n if not current_stocks:\n return apology(\"You do not own any stocks\")\n return render_template(\"sell.html\",current_stocks=current_stocks)\n # return apology(\"i suck at selling?\")", "def sell():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Check if user has enough shares to sell as requested\n shares_count = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n if shares > shares_count:\n return apology(\"not enough shares owned\", 400)\n\n # User has enough shares to sell as requested\n else:\n # Calculate new cash amount user has\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cash_gained = QUOTED[\"price\"] * shares\n new_cash_total = cash + cash_gained\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert sell log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], -(shares), QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n new_shares_total = current_shares - shares\n\n # If 0 shares left of the stock owned\n if new_shares_total == 0:\n db.execute(\"DELETE FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User still owns shares of the stock\n else:\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # Select share symbols from shares table for logged in user\n SHARES = db.execute(\"SELECT symbol FROM shares WHERE user_id = ?\", user_id)\n\n return render_template(\"sell.html\", shares=SHARES)", "def __sell(self, order, portfolio):\n amount = order.price * order.volume\n portfolio.remove_stock(order.symbol, order.volume)\n portfolio.add_cash(amount)\n return True", "def sell():\n\n if request.method == \"POST\":\n\n # define stock variables\n symbol = request.form.get(\"symbol\")\n stock = lookup(request.form.get(\"symbol\"))\n\n # error checking\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n # check if stock is owned\n try:\n sold_stock = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id AND symbol = :symbol GROUP BY symbol\", user_id=session[\"user_id\"], symbol=symbol)[0]\n except IndexError:\n return apology(\"Stock not owned\", 400)\n\n # check for shares input\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 Share\", 400)\n\n if int(sold_stock[\"shares\"]) < shares:\n return apology(\"Not enough shares to sell\", 400)\n\n else:\n # define variables for inserting into transactions table and updating cash\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n user_cash = user_cash + (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with selling transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=-shares,\n price=stock[\"price\"]\n )\n\n flash(\"You paper-handed that one!\")\n return redirect(\"/\")\n\n else:\n # query db for current holdings\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n stocks[:] = [stock for stock in stocks if stock.get('shares') > 0]\n return render_template(\"sell.html\", stocks=stocks)", "def sell_btc(self, qty):\n url = self.base_url + 'sells'\n request_data = {\n \"qty\": qty,\n }\n body = json.dumps(request_data)\n self.session.headers.update(self.sign(url, body=body))\n self.session.headers.update(request_data)\n resp = self.session.post(url=url, data=body)\n return resp.json()", "def sell(self, date_idx: int, cash_balance: float, buy_budget: float) -> float:\n todays_price: float = self.price_history.iat[date_idx, 1]\n sell_value: float = self.shares * todays_price\n new_cash_balance: float = cash_balance + sell_value\n profit_or_loss = sell_value - buy_budget\n if Helpers.is_verbose_on():\n if profit_or_loss >= 0:\n text_color: str = 'green'\n else:\n text_color = 'red'\n cprint(f\"{self.ticker}: sell {self.shares:.2f} shares at {todays_price:.2f} \"\n f\"for ${sell_value:.2f} on date {date_idx}. Cash balance: {new_cash_balance:.2f}\",\n text_color)\n self.shares = 0\n self.last_sell_date_idx = date_idx\n return new_cash_balance", "def sell_to_close(self, symbol, date, price):\n\n # Exit the position\n positions_by_symbol = self.active_positions_by_symbol\n position = positions_by_symbol[symbol]\n position.exit(date, price)\n\n # Receive the cash\n sale_value = position.last_value * (1 - self.percent_slippage)\n self.cash += sale_value\n self.portfolio_history.record_cash(date, self.cash)\n\n # Record in portfolio history\n self.portfolio_history.add_to_history(position)\n del positions_by_symbol[symbol]", "def sell():\n\n rows = db.execute('SELECT symbol, shares FROM transactions WHERE id = :id', id=session['user_id'])\n\n # Generate a list of stock's symbols owned by the current user\n stocks = {stock[\"symbol\"]: stock[\"shares\"] for stock in rows}\n\n # User reached route via POST\n if request.method == 'POST':\n\n if not request.form.get('symbol'):\n return apology('must provide symbol', 403)\n\n elif request.form.get('symbol') not in stocks:\n return apology(\"you don't own any stock of this company\")\n\n try:\n if int(request.form.get('shares')) < 1:\n return apology('must prove a positive number of stocks')\n\n elif int(request.form.get('shares')) > stocks[request.form.get('symbol')]:\n return apology(\"you don't own that shares\")\n except ValueError:\n return apology(\"input isn't an integer\", 403)\n\n stock_price = lookup(request.form.get('symbol'))['price']\n\n db.execute('INSERT INTO transactions (id, operation, symbol, shares, price) VALUES(:id, :operation, :symbol, :shares, :price)',\n id=session['user_id'],\n operation='SELL',\n symbol=request.form.get('symbol'),\n shares=request.form.get('shares'),\n price=stock_price\n )\n\n db.execute('UPDATE users SET cash = cash + :y WHERE id = :id',\n y=stock_price * int(request.form.get('shares')),\n id=session['user_id']\n )\n\n return redirect('/')\n\n # User reached route via GET\n else:\n return render_template('sell.html', stocks=stocks)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n price = lookup(symbol)['price']\n\n if not request.form.get('ammount').isnumeric() or int(request.form.get('ammount')) % 100 != 0:\n return apology(\"The ammount is not a valid number, should be a multiple of 100\", 501)\n\n ammount = int(request.form.get('ammount'))\n cost = price * ammount\n current_stock = db.execute(\"SELECT * FROM stocks WHERE user_id = ? AND symbol = ?\", session[\"user_id\"], symbol)\n current_cash = db.execute(\"SELECT * FROM users WHERE id = ?\", session[\"user_id\"])\n\n if ammount > current_stock[0][\"ammount\"] or len(current_stock) == 0:\n return apology(\"Your stocks are not that high!\", 501)\n else:\n update_database(session[\"user_id\"], symbol, ammount, price, \"sell\", current_stock[0], current_cash[0])\n \n return redirect(\"/\")\n\n return render_template(\"sell.html\")", "def sell():\n\n if request.method == \"POST\":\n\n # get share symbol from form\n symb = request.form.get(\"symbol\")\n\n # retrieve stock price, symbol and stock name via lookup function (returns dict object)\n quote = lookup(request.form.get(\"symbol\"))\n if not quote:\n return apology(\"Lookup failed\", 400)\n\n # retrieve number of shares to sell as an int and convert it to a negative number\n try:\n quant = int(request.form.get(\"shares\"))\n except ValueError:\n # apologise if not an int\n return apology(\"Invalid quantity\", 400)\n else:\n quant = abs(quant)*-1\n\n # variable to show user's current cash\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = cash[0][\"cash\"]\n\n # check if user owns that particular stock and that they have the same or more quantity\n shares = db.execute(\"SELECT name, SUM(quantity) FROM portfolio WHERE userid = :userid GROUP BY name\", userid=session[\"user_id\"])\n\n for share in shares:\n # if the share is found in the list (the user owns it)\n if share[\"name\"] == quote[\"name\"]:\n # if the quantity of the shares owned is greater than the quantity the user wants to sell\n if share[\"SUM(quantity)\"] > quant:\n # insert transaction into portfolio table\n db.execute(\"INSERT INTO portfolio (name, userid, price, quantity) VALUES (:name, :userid, :price, :quantity)\",name=quote[\"symbol\"],userid=session[\"user_id\"], price=quote[\"price\"], quantity=quant)\n # update user's cash in the users table\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+(quant*-1)*quote[\"price\"], id=session[\"user_id\"])\n # return user to index summary page after sell\n return redirect('/')\n # if the quantity of the particualr share is less than the quantity user wants to sell, then apologise\n else:\n apology(\"You don't have that many to sell!\", 400)\n else:\n apology(\"You don't own any of that name\", 400)\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # get a list of share names that the user owns for the select HTML element\n select = db.execute(\"SELECT name FROM portfolio WHERE userid=:id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", select=select)\n\n return apology(\"Buy failed\", 400)", "def sell():\n if request.method == \"GET\":\n portf = db.execute(\"SELECT * FROM portfolio WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"sell.html\",portfolio = portf)\n else:\n\n quote = lookup(request.form.get('stocklist'))\n print(str(quote))\n # Remove the stock frm user's portfolio\n # taking no of shares provided by user in form\n shares = int(request.form.get(\"no_of_shares\"))\n\n # Taking the price of that share\n\n price = db.execute(\"SELECT price FROM portfolio WHERE symbol=:symbol AND id=:id\", symbol = quote[\"symbol\"], id = session[\"user_id\"])\n\n # totla_price\n total_remove_price = shares * quote[\"price\"]\n # Now updating\n print(total_remove_price)\n # Taking total no of shares from portfolio\n share = db.execute(\"SELECT shares FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n total = db.execute(\"SELECT total FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n\n # if share provided by user in form is less than or equal to total shares owned then only transaction will processed\n print(share[0][\"shares\"])\n print(shares)\n if (shares < share[0][\"shares\"]):\n # Remove stock and price and no of stocks stocks = stocks - n\n real_total = total[0][\"total\"].split(\"$\")\n\n new_total1 = real_total[1][2:]\n new_total2 = real_total[1][:1]\n yup_final = new_total1 + new_total2\n print(yup_final)\n db.execute(\"UPDATE portfolio set total=:total, shares=:shares WHERE id=:id\", total = float(yup_final) - total_remove_price\n , shares = int(share[0][\"shares\"]) - shares , id=session[\"user_id\"])\n # current selling price = price * stocks and add this to user's cash\n elif (shares == share[0][\"shares\"]):\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\", id = session[\"user_id\"], symbol = quote['symbol'])\n else:\n return apology(\"Unable to process request\", 404)\n return redirect(\"/\")", "def test_sell_shares_success(self):\n\t\texpected_response_code = 200\n\t\tsymbol = \"DDD\" \n\t\tquantity = 100\n\t\t\n\t\tdisplayName, email, password = (\"John Doe\", \"johndoe@test.com\", \"12345678\")\n\t\tregistration_response = ApiFacade.register_user(displayName, email, password)\n\t\tauthentication_response = ApiFacade.authenticate_user(email, password)\n\t\ttoken = authentication_response.get_token()\n\n\t\t# get account id\n\t\tviewdetails_response = ApiFacade.view_details(token)\n\t\taccount_id = viewdetails_response.get_main_account_id()\n\n\t\t# buy shares first\n\t\tbuyshare_response = ApiFacade.buy_share(token, account_id, symbol, int(quantity))\n\t\t\n\t\t# sell the shares\n\t\tsellshare_response = ApiFacade.sell_share(token, account_id, symbol, int(quantity / 3))\n\n\t\tdeletion_response = ApiFacade.delete_user(token)\n\n\t\tself.assertEqual(sellshare_response.get_http_status(), expected_response_code, \n\t\t\tmsg = \"Expected HTTP{0}; got HTTP{1}\"\n\t\t\t.format(expected_response_code, sellshare_response.get_http_status()))", "def get_stock_price(stock):\n pass", "def sell():\n\n # User submits information\n if request.method == \"POST\":\n\n # Ensure user entered a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must choose a stock\")\n\n # Get stock selected\n symbol = request.form.get(\"symbol\")\n \n # Ensure is a valid stock symbol\n if not lookup(symbol):\n return apology(\"Invalid stock symbol\")\n\n # Ensure user owns the stock requested\n test = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n if not test:\n return apology(\"you have 0 shares of this stock\")\n\n owns = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n # Ensure user entered a number in shares\n if not request.form.get(\"shares\") or not isinstance(request.form.get(\"shares\"), int):\n return apology(\"must enter postive whole number of shares\")\n\n shares = request.form.get(\"shares\")\n\n # Ensure number is positive\n if shares <= 0:\n return apology(\"must enter a positive number\")\n\n # Ensure user owns the amount of stock entered to sell\n if shares > owns[0]['shares']:\n return apology(\"you don't own that much of this stock\")\n\n # Get date and time for transaction\n day = datetime.now()\n time = datetime.now().time()\n\n # Get total and stock name for transaction\n price = lookup(symbol)['price']\n total = price * shares\n name = lookup(symbol)['name']\n\n # Sell shares of the stock and add to transactions history\n db.execute(\"INSERT INTO transactions (user_id, date, time, price, shares, total, stock, name, type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n session[\"user_id\"], day, time, price, shares * -1, total, symbol, name, \"sell\")\n\n # Update portfolios table\n db.execute(\"UPDATE portfolios SET shares = shares - ? WHERE user_id = ? AND stocks = ?\", shares, session[\"user_id\"], symbol)\n\n # If stock shares is 0, delete from portfolio\n db.execute(\"DELETE FROM portfolios WHERE shares = ? \", 0)\n\n return redirect(\"/\")\n\n # If user reached page via link or redirect\n else:\n\n # Get list of stocks owned\n owns = db.execute(\"SELECT stocks FROM portfolios WHERE user_id = ? ORDER BY stocks\", session[\"user_id\"])\n\n return render_template(\"sell.html\", owns=owns)", "def sell(self, currency_pair, rate, amount):\n return self.api_query('sell', {\"currencyPair\": currency_pair, \"rate\": rate, \"amount\": amount})", "def reverse_sell(self, amount):\n trade_amount = 0\n precision = pow(10, self.pair.get_quote_token().get_decimals() - self.pair.get_base_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_BUY])):\n offer = self.book[Trade.WAY_BUY][i]\n amount_quote = offer.get_quote_amount() # GAS\n amount_base = offer.get_base_amount() # NEO\n price = offer.get_price()\n\n if amount_base >= amount:\n if self.pair.get_exchange().get_fee_token():\n trade_amount = trade_amount + amount/price * precision\n else:\n trade_amount = trade_amount + amount/price * precision / (1 - self.pair.get_exchange().get_fees())\n return int(trade_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade_amount = trade_amount + amount_quote\n amount = amount - amount_base\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def sell(self, ticker, volume):\n if volume <= 0: \n raise errs.VolumeLessThanZeroError\n\n sell_trade = Trade(ticker=ticker, volume=volume, account_id=self.id)\n if trade.get_current_price(ticker) is None:\n raise errs.NoSuchTickerError\n else:\n sell_trade.unit_price = trade.get_current_price(ticker)\n \n decrease_position = Position.from_account_id_and_ticker(account_id=sell_trade.account_id, ticker=sell_trade.ticker)\n if decrease_position.shares < sell_trade.volume:\n raise errs.InsufficientSharesError\n decrease_position.shares -= sell_trade.volume\n decrease_position.save()\n\n sell_trade.volume *= -1 # Differentiates buys/sells with pos/negative volume\n sell_trade.save()", "def sell():\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n #if shares < int(money[0][\"shares\"]):\n # return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n \n \n if not findshares:\n return apology(\"You don\"t have those shares >:(\")\n else:\n if int(findshares[0][\"shares\"]) < int(shares):\n return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) - int(shares))\n return redirect(url_for(\"index\"))\n\nif __name__ == \"__main__\":", "def sell():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol input\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if sufficient cash, make purchase, else return apology\n if not existing:\n return apology(\"you don't own this stock\")\n else:\n if existing[0]['num_shares'] < volume:\n return apology('you cannot sell more shares than you own')\n else:\n # query database for\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n minus = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] - volume, id=id, symbol=symbol)\n\n # set date string\n dstring = str(datetime.datetime.utcnow())\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=-volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # calculate sale price\n sale_price = stock_info['price'] * volume\n\n # increase cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+sale_price, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))", "def sell_stock(self, stock, amount, date=None):\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n self.order_stock(stock, self.stock_data[stock].position['Position'][date] - amount, date)", "def sell():\n # Moved userID outside of 'if' as could not be accessed in 'else' for html.\n userID = session[\"user_id\"]\n\n if request.method == \"POST\":\n\n user = db.execute(\"SELECT * FROM users WHERE id = :id\", id=userID)\n cash = user[0][\"cash\"]\n\n stock = lookup(request.form.get(\"symbol\"))\n\n numOfShares = float(request.form.get(\"shares\"))\n if not request.form.get(\"symbol\"):\n return apology(\"You haven't typed a symbol\")\n if stock is None:\n return apology(\"This doesn't seem to be a valid symbol, try again\")\n if numOfShares < 0:\n return apology(\"You must state how many shares you want to sell\")\n\n salePrice = stock[\"price\"] * numOfShares\n date_time = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n stockOwned = db.execute(\"SELECT * FROM portfolio WHERE id=:userID AND symbol=:symbol\", userID=userID, symbol=stock[\"symbol\"])\n if not stockOwned:\n return apology(\"You don't own any of this stock\")\n if stockOwned[0][\"numOwned\"] < numOfShares:\n return apology(\"You are trying to sell more shares than you own!\")\n else:\n newNumOwned = float(stockOwned[0][\"numOwned\"]) - numOfShares\n newTotalValue = newNumOwned * stock[\"price\"]\n db.execute(\"UPDATE users SET cash=cash+:salePrice WHERE id=:userID\", salePrice=salePrice, userID=userID)\n db.execute(\"INSERT INTO transactions (id, symbol, num_shares, price_ps, date_time, buy_or_sell) VALUES (:userID, :symbol, :num_shares, :price_ps, :date_time, :buy_or_sell)\",\n userID=userID, symbol=stock[\"symbol\"], num_shares=numOfShares, price_ps=stock[\"price\"], date_time=date_time, buy_or_sell=\"SELL\")\n db.execute(\"UPDATE portfolio SET numOwned=:newNumOwned, totalValue=:newTotalValue WHERE id=:userID AND symbol=:symbol\",\n newNumOwned=newNumOwned, newTotalValue=newTotalValue, userID=userID, symbol=stock[\"symbol\"])\n\n return redirect(\"/\")\n else:\n symbols = db.execute(\"SELECT symbol FROM portfolio WHERE id=:userID\", userID=userID)\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensures symbol was submitted, otherwise return apology\n if not request.form.get(\"symbol\"):\n return apology(\"must provide username\", 400)\n # Ensures shares was submitted, otherwise return apology\n if not request.form.get(\"shares\"):\n return apology(\"must provide username\", 400)\n # The symbol user selected\n symbolselected = request.form.get(\"symbol\")\n # The amount of shares of the stock user inputed\n amtshares = db.execute(\"SELECT SUM(shares), symbol FROM portfolio WHERE userid = :userid GROUP BY :symbol\",\n userid=session[\"user_id\"], symbol=symbolselected)\n # Get the int version of how many shares person currently has\n amtshares = int(amtshares[0][\"SUM(shares)\"])\n\n # Amount of shares user wants to sell (it's negative because it reduces amount of shares user has for the stock)\n sharesinputed = -int((request.form.get(\"shares\")))\n # If user does not have enough stock to sell with inputed amount of shares, return apology\n if (amtshares + sharesinputed) < 0:\n return apology(\"You do not have enough shares\", 400)\n\n # Sets quote to the information about symbol inputed by user\n quote = lookup(request.form.get(\"symbol\"))\n # Ensures symbol is a valid symbol that has a quote\n if not quote:\n return apology(\"Symbol Invalid\", 400)\n # Amount of money stock will sell for\n value = quote[\"price\"]\n # Name of stock\n name = quote[\"name\"]\n # Total amount of money needed to buy the amount and type of stock user has inputed\n total = (value * sharesinputed)\n\n # Inserts sell transaction record into portfolio\n db.execute(\"INSERT INTO portfolio (userid, symbol, price, shares, TOTAL, transacted, name) VALUES(:userid, :symbol, :price, :shares, :TOTAL, :transacted, :name)\",\n userid=session[\"user_id\"], symbol=symbolselected, price=value, shares=sharesinputed, TOTAL = total, transacted=datetime.datetime.now(), name=name)\n\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # Final money count after adding value of stock (subtraction is used since total is negative, and we are adding sales value to cash)\n finalcashamount = money - total\n # Updates cash for user\n db.execute(\"UPDATE users SET cash = :finalcashamount WHERE id=:userid\",\n finalcashamount=finalcashamount, userid=session[\"user_id\"])\n # Redirects user to index page\n return redirect(\"/\")\n # If user is accessing sell page\n else:\n # List of symbols (not repeating)\n symbols = db.execute(\"SELECT symbol FROM portfolio WHERE userid = :userid GROUP BY symbol\", userid=session[\"user_id\"])\n\n # Returns sell.html with different types of symbols\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Obtain the symbol and shares that the user inputted\n stock = request.form.get(\"symbol\")\n sold = request.form.get(\"shares\")\n\n # Compute the number of shares in the account\n shares = db.execute(\"SELECT shares FROM summary WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=stock)[0][\"shares\"]\n update = int(shares) - int(sold)\n\n # Ensure stock validity\n if stock == \"\":\n return apology(\"must select a stock\", 403)\n elif int(shares) == 0:\n return apology(\"stock not owned\", 403)\n\n # Ensure an appropriate amount of shares is requested\n if int(sold) < 0:\n return apology(\"invalid stock shares\", 403)\n elif int(shares) < int(sold):\n return apology(\"not enough shares owned\", 403)\n\n # Insert updated information into database\n db.execute(\"INSERT INTO purchase (id, symbol, shares, price, created_at) VALUES(:id,:symbol,:shares,:value, datetime('now'))\", id=session[\"user_id\"], symbol=stock, shares=\"-\"+sold, value=lookup(stock)[\"price\"])\n db.execute(\"UPDATE summary SET shares= :value WHERE (id = :username AND symbol= :symbol)\", value=str(update), username = session[\"user_id\"], symbol=stock)\n\n # Update the amount of cash in account\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])[0][\"cash\"]\n db.execute(\"UPDATE users SET cash = :new\", new = cash + (int(sold) * lookup(stock)[\"price\"]) )\n\n # Redirect users to login page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n symbols = []\n\n stocks = db.execute(\"SELECT * FROM summary WHERE id = :username\", username = session[\"user_id\"])\n\n # Create a list of stocks that the user owns and can sell\n for item in stocks:\n symbol = item[\"symbol\"]\n symbols.append(symbol)\n\n return render_template(\"sell.html\", symbols = symbols)", "def sell():\n\n if request.method == \"GET\":\n symbols = []\n table_name = f\"stocks_user{session.get('user_id')}\"\n rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol=\"DINHEIRO\" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name)\n for row in rows:\n symbols.append(row[\"stock_symbol\"])\n\n return render_template('sell.html', symbols=symbols)\n\n elif request.method == \"POST\":\n symbols = []\n table_name = f\"stocks_user{session.get('user_id')}\"\n rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol=\"DINHEIRO\" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name)\n for row in rows:\n symbols.append(row[\"stock_symbol\"])\n\n if request.form.get(\"symbol\") not in symbols:\n return apology(\"Código de ação inválido\")\n\n shares = db.execute(\"SELECT SUM(shares) FROM ? WHERE stock_symbol = ?\", table_name, request.form.get(\"symbol\"))[0][\"SUM(shares)\"]\n\n if not request.form.get(\"shares\"):\n return apology(\"Digite a quantidade de ações\")\n\n elif int(request.form.get(\"shares\")) > shares:\n return apology(\"Você não tem tantas ações\")\n\n elif int(request.form.get(\"shares\")) <= 0:\n return apology(\"Quantidade de ações não positiva\")\n\n else:\n current_price = lookup(request.form.get(\"symbol\"))['price']\n money_received = current_price * int(request.form.get(\"shares\"))\n db.execute(\"INSERT INTO ? (stock_symbol, shares, price, time) VALUES(?, ?, ?, ?)\", table_name, request.form.get(\"symbol\"), -(int(request.form.get(\"shares\"))), current_price, time_date())\n db.execute(\"UPDATE users SET dinheiro = dinheiro + ? WHERE id = ?\", money_received, session.get(\"user_id\"))\n\n return redirect('/')", "def sell(self, bar, volume):\n self.place(Order(symbol=bar.symbol,\n volume=volume,\n price=bar.close,\n transaction=TransactionType.SELL,\n timestamp=bar.timestamp))", "async def price(self, ctx, name):\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\treal = str(price)\n\t\treal = ('0' * (3 - max(len(real), 0))) + real\n\t\treal = '$' + real[:-2] + '.' + real[-2:]\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(f'**{name}:** {price} {currency} per share ({real}).')", "def sell(self):\n #TODO\n #hint: use the raise method to create an exception.\n if self.quantity < 1:\n raise SoldOutOfStockError(self.name)\n else:\n return 1\n # item getters", "def sell():\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)", "def sell(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.sell(symbol, quantity, in_force, extended)", "def sell():\n rows = db.execute(\"SELECT stock_id, shares, stocks.symbol FROM portfolio JOIN stocks ON portfolio.stock_id = stocks.id WHERE user_id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"sell.html\", rows=rows)\n else:\n symbol = request.form.get(\"symbol\")\n if symbol==\"None\":\n return apology(\"You must select a symbol\")\n # shares sold will be stored in history table with negative value\n shares = int(request.form.get(\"shares\"))*(-1)\n if abs(shares) > rows[0][\"shares\"]:\n return apology(\"You don't own enough shares\")\n # run lookup function\n dict_4 = lookup(symbol)\n price = dict_4[\"price\"]\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], price=price, shares=shares, buy=0)\n # UPDATE shares in 'portfolio' table\n new_shares = (rows[0][\"shares\"])+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], shares=new_shares)\n # Update cash in 'users' table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"]-(price*shares)\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user sells stock\n flash('Sold!')\n return redirect(\"/\")", "def sellStock(self, barcode):\n # TODO\n # hint: Look through the list of items\n # and call the 'sell' method of the relevant item\n # return an error if the product isn't found\n # No. 7\n invalid_barcode = 0\n for item in self.stocklist:\n if barcode == StockItem.getBarcode(item):\n invalid_barcode = 1\n if StockItem.sell(item) == 1:\n # StockItem.setQuantity(StockItem, 0) find away of reducing the stock quantity\n newQty = StockItem.getQuantity(item) - 1 # We reduce stock by one per item solid\n StockItem.setQuantity(item, newQty)\n print(\"Sold: Successfully: Qty remaining: \", StockItem.getQuantity(item))\n else:\n raise SoldOutOfStockError()\n if invalid_barcode == 0:\n raise ItemNotFoundError(barcode)", "def sell_bike(self, i):\n if i < len(self.inventory):\n self.sold.append(self.inventory[i])\n else:\n print \"That bike is not in stock\"", "def sell():\n\n user = session[\"user_id\"]\n\n # If GET just view\n if request.method == \"GET\":\n # view transactions\n rows = db.execute(\"SELECT symbol, amount FROM stocks WHERE user_id = :user\", user=user)\n\n # Create dictionary for stocks data owned\n stocks = {}\n for row in rows:\n stocks[row['symbol']] = row['amount']\n\n return render_template(\"sell.html\", stocks=stocks)\n\n # I case of POST\n amount=int(request.form.get(\"amount\"))\n symbol=request.form.get(\"symbol\")\n price=lookup(symbol)[\"price\"]\n value=round(price * float(amount))\n\n # Update stocks table\n stocks_before = db.execute(\"SELECT amount FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)[0]['amount']\n stocks_after = stocks_before - amount\n\n # not enough\n if stocks_after < 0:\n return render_template(\"sell.html\", error=True, message=\"You can't sell more than you have\")\n\n # delete stock\n elif stocks_after == 0:\n db.execute(\"DELETE FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)\n\n # or update it\n else:\n db.execute(\"UPDATE stocks SET amount = :amount WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user, amount=stocks_after)\n\n # update cash and history\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n cash_after = cash + price * float(amount)\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :user\", cash=cash_after, user=user)\n db.execute(\"INSERT INTO transactions(user_id, symbol, amount, value) VALUES (:user, :symbol, :amount, :value)\",\n user=user, symbol=symbol, amount=-amount, value=value)\n\n # If success redirect\n return redirect(\"/\")", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def market_sell(self, order_id, quantity):\n Library.functions.market_sell(self._book, order_id, quantity)", "def sell():\n\n # id user session\n user_id = session[\"user_id\"]\n\n # User reached route via GET\n if request.method == \"GET\":\n\n # Shares owned by the user\n current_stocks = db.execute(\n \"SELECT symbol FROM purchases WHERE user_id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=user_id)\n return render_template(\"sell.html\", current_stocks=current_stocks)\n\n # User reached route via POST\n else:\n\n # Assign inputs to variables\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n # Choose symbol\n if not symbol:\n return apology(\"choose symbol\")\n\n # Ensure user entered a positive integer for number of shares\n if int(shares) <= 0:\n return apology(\"number of shares must be a positive integer\")\n\n # Query database for user's purchases\n stock = db.execute(\"SELECT SUM(shares) as shares FROM purchases WHERE user_id = :id AND symbol = :symbol\",\n id=user_id, symbol=symbol)\n\n # Ensure user has enough shares for selected symbol\n if stock[0][\"shares\"] < int(shares):\n return apology(\"not enough shares\")\n\n # Query database to insert transaction\n db.execute(\"INSERT INTO purchases (user_id, symbol, name, shares, price, data) VALUES (:id, :symbol, :name, :shares, :price, :data)\",\n id=user_id,\n symbol=symbol,\n name=lookup(symbol)[\"name\"],\n shares=int(shares) * (-1),\n price=lookup(symbol)[\"price\"],\n data=datetime.now())\n\n # Calculate total price based on number of shares and stock's current price\n total_price = lookup(symbol)[\"price\"] * int(shares)\n\n # How much cash the user currently\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=user_id)[0][\"cash\"]\n\n # Query database to update user's cash balance\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n id=user_id,\n cash=cash + total_price)\n\n # Redirect user to homepage\n return redirect(\"/\")", "def do_sell():\n order_size = calculate_sell_order_size()\n if order_size is None:\n return None\n i = 1\n while i <= CONF.trade_trials:\n sell_price = calculate_sell_price(get_current_price())\n order = create_sell_order(sell_price, order_size)\n if order is None:\n LOG.error(\"Could not create sell order over %s\", order_size)\n return None\n write_action('-SELL')\n order_status = poll_order_status(order.id, 10)\n if order_status == 'open':\n cancel_order(order)\n i += 1\n daily_report()\n else:\n return order\n write_action('-SELL')\n return create_market_sell_order(order_size)", "def sell():\n\n # User reached route via GET (as by submitting a form via GET)\n if request.method == \"GET\":\n\n # Select user symbol from total\n symbol_sel = db.execute(\"SELECT symbol FROM total WHERE userID = :userID\", userID=session[\"user_id\"])\n return render_template(\"sell.html\", symbol_sel=symbol_sel, sslen=len(symbol_sel) )\n else:\n # Get symbol and number through input form\n symbol = request.form.get(\"symbol\")\n number = request.form.get(\"shares\")\n\n # Ensure sell symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure sell number was submitted\n if not number:\n return apology(\"must provide number\", 400)\n\n # Check if request.form.get(\"symbol\") in lookup() table\n symbol = lookup(symbol)\n if not symbol:\n return apology(\"must provide right symbol\", 400)\n else:\n\n # Get name, price, symbol from lookup function\n name = symbol.get(\"name\")\n price = symbol.get(\"price\")\n symbol = symbol.get(\"symbol\")\n\n # SELECT symbol in TABLE total\n symbolIn = db.execute(\"SELECT symbol FROM total WHERE userID = :userID and symbol = :symbol\",\n userID=session[\"user_id\"], symbol=symbol)\n\n # Ensure user have this symbol\n if not symbolIn:\n return apology(\"you don't have this symbol\", 400)\n\n # Ensure sell number is a number\n nlen = len(number)\n for i in range(nlen) :\n if number[i].isdigit() != True :\n return apology(\"sell number need to be a number\", 400)\n\n number = int(number)\n\n # Check positive number\n if number > 0:\n\n # SELECT sharesTotal in TABLE total\n symbolNum = db.execute(\"SELECT sharesTotal FROM total WHERE userID = :userID and symbol = :symbol\",\n userID=session[\"user_id\"], symbol=symbol)\n\n # Ensure user have sharesTotal\n if symbolNum[0][\"sharesTotal\"] < number:\n return apology(\"you don't have this number\", 400)\n\n # Selsct cash from user TABLE\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userID\",\n userID=session[\"user_id\"])\n\n # Count total\n totalGet = price*number\n cash = cash[0][\"cash\"] + totalGet\n\n # Update csah in user\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userID\", cash=cash, userID=session[\"user_id\"])\n\n # Check sell time\n now = datetime.now(timezone('Asia/Shanghai'))\n\n # INSERT sell TABLE date, shares, price, name, symbol, totalGet\n db.execute(\"INSERT INTO sell (date, symbol, name, price, shares, totalGet, userID) VALUES (:date, :symbol, :name, :price, :shares, :totalGet, :userID)\",date=now, symbol=symbol, name=name, price=price, shares=number, totalGet=totalGet, userID=session[\"user_id\"])\n\n # Add to buy-sell table\n db.execute(\"INSERT INTO bs (symbol, price, shares, date, userID) VALUES (:symbol, :price, :shares, :date, :userID)\", symbol=symbol, price=usd(price), shares=-number, date=now, userID=session[\"user_id\"])\n\n # SELECT costmoneyTotal FROM total\n costTot = db.execute(\"SELECT costmoneyTotal FROM total WHERE userID = :userID and name = :name\",\n userID=session[\"user_id\"], name = name)\n\n # Change costmoneyTotal FROM total\n costTotEnd = costTot[0][\"costmoneyTotal\"]-totalGet\n\n # Update sharesTotal, costmoneyTotal total did by order\n db.execute(\"UPDATE total SET sharesTotal = :sharesTotal, costmoneyTotal = :costmoneyTotal WHERE userID = :userID and name = :name\", sharesTotal=symbolNum[0][\"sharesTotal\"]-number, costmoneyTotal=costTotEnd, userID=session[\"user_id\"], name=name)\n\n # Falsh massage\n flash('sell')\n\n # render selled template\n return render_template(\"selled.html\",symbol=symbol, name=name, price=price, number=symbolNum[0][\"sharesTotal\"]-number, totalGet=usd(totalGet), costTotEnd=usd(cash))\n else:\n return apology(\"positive number\", 400)", "def sell():\n if request.method == \"POST\":\n\n #test for selection of stocks\n if request.form.get(\"symbol\") == \"\" or request.form.get(\"shares\") == \"\":\n return apology(\"Please fill in all fields\")\n\n #test for positive integer\n if str.isdigit(request.form.get(\"shares\")) == False:\n return apology(\"Please select a positive number of shares\")\n\n # does the user have enough shares of that stock\n user_stock = request.form.get(\"symbol\")\n user_number = int(request.form.get(\"shares\"))\n owned = db.execute(\"SELECT SUM(number) FROM portfolio WHERE userid=:id AND stock=:stock\", stock = user_stock, id=session[\"user_id\"])\n owned = int(owned[0]['SUM(number)'])\n if user_number > owned:\n return apology(\"You don't have enough shares\")\n\n #in the portfolio table, add a negative to the number field of the purchased stock\n #in the cash table, lookup the current price and add the cash to the user's cash balanace\n else:\n pay = lookup(request.form.get(\"symbol\"))\n user_number = int(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash=cash+:total WHERE id=:userid\", total=(pay['price'] * user_number), userid=session[\"user_id\"])\n\n user_number = int(request.form.get(\"shares\")) * -1\n db.execute(\"INSERT INTO portfolio (stock, number, price, trans_price, userid) VALUES (:stock, :number, :price, :trans_price, :userid)\", stock=user_stock, number=user_number, price=(pay['price'] * user_number), trans_price=usd(pay['price']), userid=session[\"user_id\"])\n\n user_id=session[\"user_id\"]\n return redirect(url_for('index'))\n\n if request.method == \"GET\":\n #get stocks from portfolio and return to html form\n stocks = db.execute(\"SELECT stock FROM portfolio WHERE userid=:id GROUP BY stock\", id=session[\"user_id\"])\n return render_template(\"sell.html\", stocks=stocks)", "def buy(self, stock, amount):\n self.orders[stock] += amount", "def transact_shares(self, action, quantity, price, commission, bid=None, ask=None):\n if bid is None: \n bid = price\n if ask is None:\n ask = price\n\n if action is None:\n return\n\n self.total_commission += commission\n\n # Adjust total bought and sold\n if action == \"BOT\":\n self.avg_bot = (self.avg_bot * self.buys + price * quantity) / (self.buys + quantity)\n\n if self.net < 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (self.avg_price - price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n # Increasing long position\n self.avg_price = (self.avg_price * self.net + price * quantity + commission) / (self.net + quantity)\n self.buys += quantity\n self.total_bot = self.buys * self.avg_bot\n\n # action == \"SLD\"\n else:\n self.avg_sld = (self.avg_sld * self.sells + price * quantity) / (self.sells + quantity)\n\n if self.net > 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (price - self.avg_price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n\n self.avg_price = (self.avg_price * self.net - price * quantity - commission) / (self.net - quantity)\n self.sells += quantity\n self.total_sld = self.sells * self.avg_sld\n\n # Adjust net values, including commissions\n self.net = self.buys - self.sells\n self.net_total = self.total_sld - self.total_bot\n self.net_incl_comm = self.net_total - self.total_commission\n self.cost_basis = self.net * self.avg_price\n\n self.update_market_value(bid, ask)", "def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)", "def sell():\n \n #Create list of stocks in users portfolio\n stocks = [s for s in portfolio() if 'symbol' in s.keys()]\n\n #User arrived via GET\n if request.method == 'GET':\n #Return sell.html\n return render_template('sell.html', stocks=stocks)\n\n #User arrived via POST\n else:\n if request.method == 'POST':\n\n #Set variable for selected stock\n stock = [s for s in stocks if s['symbol'] == request.form.get('symbol')][0]\n\n #Make sure user has enough stock to sell\n if int(request.form.get('shares')) > stock['shares']:\n return apology('too many shares', 400)\n\n else:\n #See what stock is currently selling for and store in variable\n price = lookup(stock['symbol'])['price']\n\n #Add transaction to history\n trans = Transactions(symbol=stock['symbol'].upper(), shares=(int(request.form.get('shares')) * -1), \n price=price, transacted=datetime.now(), owner=session['user_id'])\n db.session.add(trans)\n db.session.commit()\n\n #update user's cash\n Users.query.filter_by(id=session['user_id']).first().cash += (price * int(request.form.get('shares')))\n db.session.commit()\n\n return redirect('/')", "async def _submit_trade_sell(self, trade: Dict[str, Any]) -> str:\n\n pair = trade['pair']\n filled_quantity = trade['quantity'] - trade['remaining']\n base_mult = await self.market.get_pair_base_mult(config['trade_base'], pair)\n\n if filled_quantity > 0.0:\n min_size = self.market.min_trade_size / base_mult\n if min_size < self.market.min_trade_sizes[pair]:\n min_size = self.market.min_trade_sizes[pair]\n\n min_value = min_size / filled_quantity\n order_id = await self.api.sell_limit(pair, filled_quantity, min_value)\n\n if order_id is None:\n quote = pair.split('-')[1]\n reserved = config['remit_reserved'][quote] if quote in config['remit_reserved'] else 0.0\n balance = await self.api.get_balance(quote)\n\n if balance is None:\n self.log.error(\"Could not get available balance for {}!\", quote)\n return None\n\n balance -= reserved\n\n if balance >= min_size:\n min_value = min_size / balance\n self.log.warning(\"{} re-trying sell with available balance {}.\", pair, balance)\n order_id = await self.api.sell_limit(pair, balance, min_value)\n\n if order_id is None:\n self.log.error(\"{} could not submit market sell for trade {}!\", pair, trade['order_id'])\n\n else:\n self.log.info(\"{} submitted market sell for trade {}.\", pair, trade['order_id'])\n\n return order_id\n\n self.log.warning(\"{} has no filled volume on trade {} for sell.\", pair, trade['order_id'])\n return None", "def sell():\n\n # User reached route via GET\n if request.method == \"GET\":\n\n # Display quote\n return render_template(\"sell.html\")\n\n # User reached route via POST\n else:\n\n sell_symbol = request.form.get(\"sell_symbol\").upper()\n sell_amount = float(request.form.get(\"sell_amount\"))\n\n temp_symbol = []\n\n if sell_amount < 1:\n return apology(\"You can only sell a positive amount. To buy, please go to buy\", 403)\n\n user_id = session[\"user_id\"]\n\n # Check if client owns stock\n symbol = db.execute(\"SELECT DISTINCT symbol FROM stocks WHERE user_id = :user_id\", user_id = user_id)\n amount = db.execute(\"SELECT SUM(amount) FROM stocks WHERE symbol = :symbol\", symbol = sell_symbol)\n\n for i in range(len(symbol)):\n temp_symbol.append(symbol[i][\"symbol\"])\n\n if sell_symbol not in temp_symbol:\n return apology(\"Sorry, you don't own this stock\", 403)\n\n if amount[0][\"SUM(amount)\"] < sell_amount:\n return apology(\"Sorry, you don't have enough stocks\", 403)\n\n stock_price = float(lookup(sell_symbol)[\"price\"])\n\n # Returns a list\n cash_list = db.execute(\"SELECT cash FROM users WHERE id= :user_id\", user_id = user_id)\n cash = float(cash_list[0]['cash'])\n\n current_cash = cash + (sell_amount * stock_price)\n\n db.execute(\"INSERT INTO stocks (symbol, price, amount, user_id) VALUES (:symbol, :price, :amount, :user_id)\", {\"symbol\": sell_symbol, \"price\": stock_price, \"amount\": (-1 * sell_amount), \"user_id\": user_id})\n\n db.execute(\"UPDATE users SET cash = :current_cash WHERE id = :user_id\", {\"current_cash\": current_cash, \"user_id\": user_id})\n\n # Redirect user to home page\n return redirect(\"/\")", "def reverse_buy(self, amount):\n trade_amount = 0\n precision = pow(10, self.pair.get_base_token().get_decimals() - self.pair.get_quote_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_SELL])):\n offer = self.book[Trade.WAY_SELL][i]\n amount_quote = offer.get_quote_amount() # GAS\n amount_base = offer.get_base_amount() # NEO\n price = offer.get_price()\n\n if amount_quote >= amount:\n if self.pair.get_exchange().get_fee_token():\n trade_amount = trade_amount + amount*price * precision\n else:\n trade_amount = trade_amount + amount*price * precision / (1 - self.pair.get_exchange().get_fees())\n return int(trade_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade_amount = trade_amount + amount_base\n amount = amount - amount_quote\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n \n # Creates dict\n symbol_info = lookup(request.form.get(\"symbol\"))\n \n # Checks that symbol exists\n if symbol_info == None:\n return apology(\"Invalid Symbol\", 403)\n \n # Ensure number of shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 403)\n \n # Ensure shares is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough money to buy share\n user_money = db.execute(\"SELECT cash FROM users WHERE id=:userid\", userid=session[\"user_id\"])\n cash = float(user_money[0][\"cash\"])\n if cash < float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")):\n return apology(\"Not enough money\", 403)\n \n # Update user\n updated_money = cash - (float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")))\n db.execute(\"UPDATE users SET cash = :updated WHERE id=:usid\", updated=updated_money, usid=session[\"user_id\"])\n \n # Update shares table\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id = :usid\", usid=session[\"user_id\"])\n exist = 0\n for i in range(len(symbol_dicts)):\n if symbol_dicts[i][\"share\"].upper() == request.form.get(\"symbol\").upper():\n exist = 1\n break\n \n if exist == 0:\n db.execute(\"INSERT INTO shares (user_id, share, share_count) VALUES (:usid, :symbol, :count)\", usid=session[\"user_id\"], symbol=request.form.get(\"symbol\").upper(), count=int(request.form.get(\"shares\")))\n else:\n db.execute(\"UPDATE shares SET share_count = share_count + :count WHERE share = :symbol AND user_id = :usid\", count=int(request.form.get(\"shares\")), symbol=request.form.get(\"symbol\").upper(), usid=session[\"user_id\"])\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol_info[\"symbol\"], shares=request.form.get(\"shares\"), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(symbol_info[\"price\"]))\n \n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "async def trade(self, ctx, sell_amount : float, sell_symbol, \n buy_amount : float, buy_symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(sell_amount, sell_symbol)\n portfolio.Buy(buy_amount, buy_symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (user, portfolio.Value()))\n portfolio.Save()", "def sell_limit(self, market, quantity, rate):\n return self.api_query('Trade', {'type':'sell', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})", "def volume_sell(self, price=None):\n if price is None:\n return Library.functions.volume_sell(self._book)\n return Library.functions.volume_sell_price(self._book, price)", "def sell(self):\n self.status = \"sold\"\n return self", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n \n # calculate total price for the sell request\n company_name = lookup(request.form.get(\"symbol\"))[\"name\"]\n curr_price = lookup(request.form.get(\"symbol\"))[\"price\"]\n total_price = curr_price * -int(request.form.get(\"shares\"))\n\n # db.execute returns list of dicts (one dict, actually), where key == \"cash\" and value - cash left in user's account\n cash_left = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n\n # calculate if user has enough shares for operation to be made\n shares = db.execute(\"SELECT SUM(Shares) FROM portfolio WHERE id = :id AND Company = :company GROUP BY Company\", id = session[\"user_id\"], company=company_name)\n\n if shares[0][\"SUM(Shares)\"] < int(request.form.get(\"shares\")):\n return apology(\"you do not have enough shares for this operation to be completed\")\n\n # add operation to users portfolio\n exe = db.execute(\"INSERT INTO portfolio (id, Symbol, Company, Shares, Price, Total) VALUES(:id, :Symbol, :Company, :Shares, :Price, :Total)\",\n id=session[\"user_id\"], Symbol=request.form.get(\"symbol\").upper(), Company=lookup(request.form.get(\"symbol\"))[\"name\"],\n Shares=-int(request.form.get(\"shares\")), Price=curr_price, Total=total_price)\n\n # update cash\n db.execute('UPDATE users SET cash = :cash WHERE id = :id', cash=cash_left - total_price, id=session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n # extract list of companies user has in portfolio\n companies = db.execute(\"SELECT Symbol FROM portfolio WHERE id = :id GROUP BY Symbol\", id = session[\"user_id\"])\n\n return render_template(\"sell.html\", companies = companies)", "def sell_etf(self, etf_name, sell_date, sell_price, commissions, n_shares='all'): \n assert etf_name in self.etfs.keys(), 'ETF not in portfolio'\n assert isinstance(sell_date, date), 'Sell_date parameter needs to be a datetime.date instance'\n assert isinstance(sell_price, float), 'Sell_price must be float'\n assert isinstance(commissions, float), 'Commissions must be float'\n assert n_shares == 'all' or isinstance(n_shares, int), 'N_shares must be int'\n if n_shares == 'all':\n self.etfs[etf_name].sell(sell_date, sell_price, commissions)\n new_file = pd.read_csv(self.infoFile, index_col='Name')\n new_file.loc[etf_name, 'sell_date'] = sell_date\n new_file.loc[etf_name, 'sell_price'] = sell_price\n new_file.loc[etf_name, 'sell_commissions'] = commissions\n new_file.to_csv(self.infoFile)\n else:\n new_file = pd.read_csv(self.infoFile, index_col='Name')\n assert 0 < n_shares <= new_file.loc[etf_name, 'n_shares'], f'Number of shares must be between 0 and {new_file.loc[etf_name, \"n_shares\"]}'\n new_file.loc[etf_name, 'n_shares'] -= n_shares # Take out the sold shares\n prevEtf = self.get_etf_by_name(etf_name)\n newName = self.find_next_name(etf_name)\n newEtf = ETF(newName, prevEtf.buy_date, n_shares, prevEtf.buy_price, prevEtf.commissions[0], sell_date, sell_price, prevEtf.info, commissions)\n new_file.to_csv(self.infoFile)\n self.add_etf(newEtf)\n self.refresh()", "def sellOutAllStock(self):\n # GET ALL POSITIONS FOR ACCOUNT\n open_positions = self.open_positions.find({\"Trader\": self.user[\"Name\"], \"Asset_Type\" : self.asset_type, \"Account_ID\" : self.account_id})\n\n for position in open_positions:\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n queued = self.queue.find_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": position[\"Asset_Type\"], \"Account_ID\" : self.account_id})\n\n if not queued:\n\n self.placeOrder(trade_data, position, orderType=\"MARKET\")", "def trading_alg(self,table_name = None, buy_now = False, strategy_name = \"sma9\"):\n \n self.bs.buyed_stocks = 0\n self.bs.money = self.bs.startCredit\n spy_stocks = self.load_data(table_name = table_name, symbols = [\"SPY\"])\n spy_stocks = FinI.add_indicators(spy_stocks)\n \n if self.symbols:\n symbols = self.symbols\n else:\n symbols = self.db.get_symbols()\n\n # symbols = [\"INTC\",\"BYND\",\"ZM\",\"NKE\",\"HIMX\",\"JKS\",\"ENPH\",\"DUK\",\"GE\",\"DIS\",\"LEVI\",\"NVAX\",\"SLCA\",\"GPS\"]\n \n for symbol in symbols:\n print(\"symbol: \" + str(symbol))\n \n sub_data = self.load_data(table_name = table_name, symbols = symbol)\n if len(sub_data) < 1:\n break\n\n self.bt_stocks = FinI.add_indicators(sub_data)\n self.bt_stocks = FinI.add_fib(self.bt_stocks)\n # print(self.bt_stocks)\n print(self.bt_stocks[\"sma30\"])\n print(\"calculating percent change:\" + str(symbol))\n # sub_data = self.stocks.loc[self.stocks.sym ==symbol[0]].sort_values(by='index')\n \n self.symbols = symbol[0]\n \n # self.prev_stock = sub_data.iloc[0]\n # self.bt_stocks.iloc[0] = sub_data.iloc[0]\n\n # self.sell_marks = self.sell_marks.iloc[0:0]\n # self.buy_marks = self.buy_marks.iloc[0:0]\n self.bs.transactions = 0\n self.bs.profit_perc = 0\n \n # trend_indicator = \n # TODO mechanism for select strategies\n # self.ts_boll(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks)\n self.ts_eval(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_logic = strategy_name)\n\n # call the method with passed and assembled name\n # method = getattr(self, 'ts_' + strategy_name)\n # method(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_name = strategy_name)", "def sell():\n\n if request.method == \"GET\":\n\n #Query for all the stocks in posession.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", ports=ports)\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n #Query for the total quantity of that stock in posession\n get_quantity = db.execute(\"SELECT quantity FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n #Convert the quantity dict to int\n get_quantity_int = int(get_quantity[0]['quantity'])\n\n #Check if the user input a positive number.\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n #Lookup the stock symbol data (price, symbol, company name)\n if shares > get_quantity_int:\n return apology (\"Selling more than you own?\", 400)\n stock = lookup(symbol)\n\n stock_price = stock['price']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares sold (One company stock only)\n total = stock_price * float(shares)\n\n #Update the total amount of cash in hand by adding the sold stocks.\n db.execute(\"UPDATE users SET cash = cash + :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n #Check if the total quantity of shares is equal to the quantity the user is trying to sell.\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(shares) * -1, float(stock_price), date_time, session[\"user_id\"] )\n\n #If it's equal then delete the stock in the portfolio. #Else, Update the quantity of that stock in the portfolio.\n if shares == get_quantity_int:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n flash('You successfully sold the stock!')\n else:\n db.execute(\"UPDATE portfolio SET quantity = quantity - :shares, total = total -:total WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, shares=shares, total=total)\n flash('You successfully sold the stock!')\n return redirect (url_for('index'))", "def sell():\n\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n rows = db.execute(\"Select Stock, sum(Num) as Number from portfolio where User = :User and Stock = :symbol group by Stock\", User = session.get(\"user_id\"), symbol = symbol)\n num = rows[0][\"Number\"]\n num1 = int(request.form.get(\"number\"))\n # render apology if the user fails to select a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure number of shares\n elif not request.form.get(\"number\"):\n return apology(\"must provide number\", 403)\n\n # Ensure if users owns the number of stocks\n elif num1 > num:\n return apology(\"not enough stock\", 403)\n\n #log sale as a negative quant of shares at the current slide\n\n\n stock = symbol\n\n price = float(lookup(stock)['price'])\n\n\n num = -num1\n result = db.execute(\"INSERT INTO portfolio (User, Stock, Price, Num) VALUES(:User, :Stock, :Price, :Num)\", User = session.get(\"user_id\"), Stock = stock, Price = price, Num = num)\n\n\n #update the user cash\n amount = round(num*price,2)\n result = db.execute(\"UPDATE users set cash = cash - :amount where id = :User \", User = session.get(\"user_id\"), amount = amount)\n\n\n# if not result:\n# return apology(\"username already exists\", 403)\n\n # Log user in\n # Query database for username\n# rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n\n\n # session[\"user_id\"] = rows[0][\"id\"]\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n\n rows = db.execute(\"Select Stock, sum(Num) as Number from portfolio where User = :User group by Stock\", User = session.get(\"user_id\"))\n stockss = rows\n stocksss = []\n for stock in stockss:\n symbol = str(stock[\"Stock\"])\n stocksss.append(symbol)\n\n return render_template(\"sell.html\", x = stocksss)\n\n\n\n\n # get current price for each group (ie AAPL) with help from lookup function (which remember, returns a dict)" ]
[ "0.76451164", "0.748746", "0.7260813", "0.71164954", "0.7057435", "0.70490545", "0.70268077", "0.69872165", "0.6972749", "0.6966809", "0.6915586", "0.68977857", "0.68829334", "0.6849716", "0.68191767", "0.68124473", "0.68022114", "0.6801836", "0.67799073", "0.67698354", "0.67686903", "0.67685384", "0.6763028", "0.6761169", "0.6750394", "0.6737747", "0.67146623", "0.66963017", "0.6695012", "0.66774476", "0.66640884", "0.66626096", "0.6654251", "0.66449285", "0.66422653", "0.66262025", "0.6608471", "0.65532064", "0.6548866", "0.65250057", "0.65241355", "0.6516954", "0.65059006", "0.6501579", "0.6487432", "0.6485335", "0.64743656", "0.64702666", "0.6451987", "0.6428272", "0.6395518", "0.63914704", "0.63885033", "0.63833195", "0.6382582", "0.63755643", "0.63674843", "0.6364422", "0.6359301", "0.63532573", "0.63522965", "0.6348651", "0.634843", "0.6285282", "0.6277801", "0.62740654", "0.6274011", "0.62734246", "0.62478936", "0.62463963", "0.62325263", "0.6216905", "0.61967844", "0.6180049", "0.61729735", "0.6158238", "0.61084294", "0.6108257", "0.6099662", "0.6077487", "0.6073024", "0.6070951", "0.60680014", "0.6059142", "0.6051766", "0.6027577", "0.60202867", "0.60192585", "0.60135007", "0.6010428", "0.59895015", "0.59863704", "0.5983354", "0.59814173", "0.59808", "0.5960544", "0.5959601", "0.5958356", "0.5951698", "0.5944001" ]
0.6538862
39
auth_state enabled and available
async def test_auth_state(app, auth_state_enabled): name = 'kiwi' user = add_user(app.db, app, name=name) assert user.encrypted_auth_state is None cookies = await app.login_user(name) auth_state = await user.get_auth_state() assert auth_state == app.authenticator.auth_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "def check_auth():", "def requires_auth(self):\n return True", "def get_authorization():\n return True", "def is_authenticated(self):\n return True #self.authenticated", "def is_authenticated(self):\n return True", "def auth_enabled(self):\n\n return self._api_manager.auth_enabled()", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def set_auth_state(self, data):\n raise NotImplementedError()", "def auth():\n pass", "def auth():\n pass", "def auth_token_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auth_token_enabled\")", "def get_authenticated_granted(self):", "def is_authenticated(self):\n return False", "def authorization():\n pass", "def is_authenticated(self):\n return self.ping() is not None", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def ready(self):\n if self._wait_auth:\n return False\n return True", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def is_authenticated(self, request, **kwargs):\r\n return True", "def is_authenticated(self):\r\n return self.authenticated", "def authentication_hook(self):\n pass", "def authorized(self):\n pass", "def is_authenticated(self):\n return bool(get_auth_token())", "def authn_and_authz():\n authentication()\n authorization()", "def _base_test_extauth_auto_activate_user_with_flag(self, log_user_string=\"inactive@stanford.edu\"):\r\n inactive_user = UserFactory.create(email='inactive@stanford.edu')\r\n inactive_user.is_active = False\r\n inactive_user.save()\r\n request = self.request_factory.get('/shib-login')\r\n request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session\r\n request.META.update({\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/',\r\n 'REMOTE_USER': 'inactive@stanford.edu',\r\n 'mail': 'inactive@stanford.edu'\r\n })\r\n\r\n request.user = AnonymousUser()\r\n with patch('external_auth.views.AUDIT_LOG') as mock_audit_log:\r\n response = shib_login(request)\r\n audit_log_calls = mock_audit_log.method_calls\r\n # reload user from db, since the view function works via db side-effects\r\n inactive_user = User.objects.get(id=inactive_user.id)\r\n self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))\r\n self.assertTrue(inactive_user.is_active)\r\n self.assertIsInstance(response, HttpResponseRedirect)\r\n self.assertEqual(request.user, inactive_user)\r\n self.assertEqual(response['Location'], '/')\r\n # verify logging:\r\n self.assertEquals(len(audit_log_calls), 3)\r\n self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)\r\n method_name, args, _kwargs = audit_log_calls[2]\r\n self.assertEquals(method_name, 'info')\r\n self.assertEquals(len(args), 1)\r\n self.assertIn(u'Login success', args[0])\r\n self.assertIn(log_user_string, args[0])", "def is_enabled(self):", "def auth_token_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auth_token_enabled\")", "def enable_authentication(self) -> bool:\n return pulumi.get(self, \"enable_authentication\")", "def auth(self):\n try:\n print(\"You are going to log in as Полигон\")\n os.system('clear')\n self.session = vk_api.VkApi(token=self.token)\n self.session._auth_token()\n print(\"authred\")\n vk = self.session.get_api()\n global authed\n self.authed = True\n print('gAut Online')\n self.longpollserver = bot_longpoll.VkBotLongPoll(self.session, 172301854)\n self.gLPS = threading.Thread(target=self.lps, args=(self.session, ), daemon=True)\n return True\n except Exception as e:\n print(e)\n pass", "def auth_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_status\")", "def authenticated(self):\n # We don't support authentication yet\n return False", "def is_frozensand_auth_available(self):\n cvar = self.getCvar('auth')\n if cvar:\n auth = cvar.getInt()\n return auth != 0\n else:\n return False", "def test_get_authenticated(self):\n self.verify_get_response(self.client.get(STATUS_PATH))", "def check_auth(self):\n if self.type_of_auth == BboxConstant.AUTHENTICATION_TYPE_LOCAL:\n access_level_required = self.get_auth_access_needed_for_local()\n else:\n access_level_required = self.get_auth_access_needed_for_remote()\n\n if access_level_required == BboxConstant.AUTHENTICATION_LEVEL_NONE:\n return False\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PRIVATE:\n return self.is_authentified()\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PUBLIC:\n return True", "def _check_auth(self, group_id):\n return", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def the_user_should_be_able_to_get_the_state_of_the_connected_device():\n assert web_app.get_state()", "def is_authenticated(self):\n return self.user is not None and self.state == AuthenticationOptions.authenticated", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def credentials(self):\n return True", "def auth(self, username, password):\n return False", "def get_auth(self):\n return {'method': yeti_config.core.auth}", "def __enter__(self):\r\n\r\n # if the user account is not activated then no go\r\n if not self.user_acct.activated:\r\n raise HTTPForbidden('Deactivated Account')\r\n\r\n if AuthHelper.check_login(self.request, username=self.username):\r\n return True\r\n\r\n if AuthHelper.check_api(self.api_key, self.user_acct.api_key):\r\n return True\r\n\r\n raise HTTPForbidden('Invalid Authorization')", "def check_auth(cls, Configuration):\n if not Configuration.auth_token:\n cls.authorize(Configuration)", "def authorized(self) -> bool:\n\n return (\n self.activated\n or self.on_screen\n or self.on_file\n or (\n bool(PyFunceble.storage.CONFIGURATION)\n and bool(PyFunceble.storage.CONFIGURATION.debug.active)\n )\n )", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def _has_auth_details(self) -> bool:\n\n return all([self.secret is not None, self.api_key is not None])", "def check_user_and_login(self) -> Response:\n pass", "def device_only_auth_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def _auth_plugin_available(ext):\n return ext.obj.available", "def is_active(self) -> bool:", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def authenticate(self, request=None):\r\n try:\r\n token = request.META.get('HTTP_AUTHORIZATION') or request.REQUEST['key']\r\n accesskey = AccessKey.objects.select_related('user').get(key=token)\r\n request.user = accesskey.user\r\n return request.user and request.user.is_active\r\n\r\n except(KeyError, AccessKey.DoesNotExist):\r\n return False", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def is_authorized(self) -> bool:\n\t\tif \"access_token\" in session:\n\t\t\tif session.get(\"access_token\") is not None:\n\t\t\t\tif \"user\" in session:\n\t\t\t\t\treturn True\n\t\treturn False", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def default_login_works(self):\n return True if self.default_login_auth_header else False", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def authenticated(self):\n return self.token is not None", "def isActive(state):\n return state in [State.enabled, State.softDisabling]", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def is_authorized(self, request, obj=None):\r\n return True", "def test_func(self):\n return self.request.user.is_active # any active user", "def auth_status():\n return jsonify(message='Success')", "def device_only_auth_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def device_only_auth_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def sr_auth_state(self, **kwargs):\n from pykern import pkunit\n from pykern import pkcollections\n\n m = re.search(\n r\"(\\{.*\\})\",\n pkcompat.from_bytes(self.sr_get(\"authState\").data),\n )\n s = pkcollections.json_load_any(m.group(1))\n for k, v in kwargs.items():\n pkunit.pkeq(\n v,\n s[k],\n \"key={} expected={} != actual={}: auth_state={}\",\n k,\n v,\n s[k],\n s,\n )\n return s", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def loginState(self, user_data):\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def is_authenticated(self) -> bool:\n return self.requester.uuid is not None", "def isEnabled(state):\n return (isActive(state) or state == State.preEnabled)", "def is_logged_in(self):\n return self.router.token is not None", "def test_activate_unauthenticated(client):\n response = client.post(\"/auth/activate\")\n assert b\"<h1>You are not logged in</h1>\" in response.data\n assert response.status_code == HTTPStatus.OK", "def auth(self):\n if self.get_saved_token():\n return\n self.oauth2()\n self.save_token()", "def test_activate_login(self):\r\n pass", "def is_active(self):\n return self.status == ACTIVE_USER", "def is_active():\n return True" ]
[ "0.74882525", "0.71273947", "0.6910631", "0.6909217", "0.670079", "0.6680911", "0.6628001", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.6505308", "0.6504012", "0.64769554", "0.64769554", "0.64572227", "0.64142615", "0.6381516", "0.63628924", "0.6335662", "0.6330429", "0.63162214", "0.6268234", "0.6267411", "0.6244223", "0.6192191", "0.618395", "0.6181388", "0.6169435", "0.61482126", "0.61420214", "0.6122972", "0.60895383", "0.607752", "0.60767335", "0.604296", "0.60370445", "0.60290974", "0.6024908", "0.6020251", "0.60199165", "0.6017317", "0.6015367", "0.59927887", "0.598669", "0.5986333", "0.59675884", "0.59671545", "0.5943072", "0.5927627", "0.5921017", "0.5919868", "0.59133273", "0.5896471", "0.58911747", "0.58871144", "0.58777213", "0.5872557", "0.5871341", "0.587043", "0.5866646", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.5855878", "0.58553416", "0.5841574", "0.5836217", "0.58348215", "0.58295155", "0.5819418", "0.5809199", "0.5803787", "0.57908726", "0.57908124", "0.57908124", "0.578724", "0.5784481", "0.5784009", "0.5780865", "0.57733804", "0.5764313", "0.57626903", "0.576194", "0.5754186", "0.57523435", "0.5731533", "0.5731394" ]
0.7185515
1
admin should be passed through for nonadmin users
async def test_auth_admin_non_admin(app): name = 'kiwi' user = add_user(app.db, app, name=name, admin=False) assert user.admin is False cookies = await app.login_user(name) assert user.admin is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def is_admin(self):\n return False", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def is_admin(self):\r\n return self.admin", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def is_user_admin(request):\n return request.user.is_superuser", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin", "def is_administrator(self):\n return False", "def GET_adminoff(self):\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = False)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))", "def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def is_admin(self):\n return self.admin", "def is_admin(self, user):\n return user.name in self.admins", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def admin(request):\n if not request.user.is_staff:\n return render_to_response('error.htm', {\n 'error': \"Sorry, you are not staff... (user permissions 'is_staff')\",\n })\n return render_to_response('admin.htm', {\n 'username': request.user,\n })", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def user_is_admin(user):\n return user in admins", "def admin():\n pass # pragma: no cover", "def admin_user_only(view):\r\n @google_login_required\r\n def wrapped(request, *args, **kwargs):\r\n if users.is_current_user_admin():\r\n return view(request, *args, **kwargs)\r\n context = RequestContext(request);\r\n return rtr( 'access_limited.html', context,None )\r\n return wraps(view)(wrapped)", "def test_func(self, user):\n return self.get_object().admin == user", "def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")", "def admin(self):\n if self.is_admin:\n return True\n return False", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_not_admin(user):\n return not user.is_superuser", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_func(self):\n return self.request.user.is_superuser", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def admin_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_user.is_admin:\n return func(*args, **kwargs)\n else:\n return login_manager.unauthorized()\n\n return wrapper", "def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def requires_admin(method):\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n if web.ctx.method == \"GET\":\r\n raise web.seeother(users.create_login_url(web.ctx.fullpath))\r\n raise web.forbidden()\r\n elif not (users.is_current_user_admin()):\r\n raise web.forbidden()\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_staff(self):\r\n return self.is_admin", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)", "def admin(func) :\n def redirectlogin(session, self) :\n setSessionMessage(session, \"Admin Login Required.\", True)\n return self.redirect('/login')\n\n def checkauth(*args, **kwargs) : \n self = args[0]\n\n \n session = getSessionByRequest(self)\n user = getSessionUser(session)\n\n if not user :\n return redirectlogin(session, self)\n\n if user.userType == 'ADMIN' : \n return func(*args, **kwargs)\n\n return redirectlogin(session, self)\n return checkauth", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def test_admin(self):\n assert(admin)", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def admin_only():\n return 'Super-seekrit admin page.'", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") not in getAdminIDs():\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "def admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == False:\n return jsonify({\"messsage\": \"Only admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def require_admin(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return redirect(url_for('admin.login'))\n if g.user.name != 'admin':\n abort(403)\n return func(*args, **kwargs)\n\n return decorator", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session['user']['user_type'] != \"admin\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def is_user_admin(self, user):\n return user == self.created_by", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def be_admin(username):\n user_data = my_users.get(username)\n if not user_data or 'admin' not in user_data.get('roles', []):\n return \"User does not have admin role\"", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def is_admin(self) -> bool:\n return self._is_admin", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def is_admin(self):\n return Role.query.get(2) in self.roles", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def admin_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n if not g.user.admin:\n abort(403)\n return f(*args, **kwargs)\n return decorator", "def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)", "def someturbotadmin():\n cond = lambda member: member != ADMIN and ADMIN_ROLE in member.roles\n return random.choice(list(filter(cond, CHANNEL_MEMBERS)))", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def user_is_admin(func):\n def decorator(request, *args, **kwargs):\n competition = get_object_or_404(Competition,\n slug=kwargs['competition_slug'])\n\n if competition.user_is_admin(request.user):\n return func(request, *args, **kwargs)\n\n raise PermissionDenied()\n\n return decorator", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def admin_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\tif not users.is_current_user_admin():\n\t\t\t\tabort(401) # Unauthorized\n\t\t\treturn func(*args, **kwargs)\n\t\treturn redirect(users.create_login_url(request.url))\n\treturn decorated_view", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def somenonturbotadmin():\n cond = lambda member: member != ADMIN and ADMIN_ROLE not in member.roles\n return random.choice(list(filter(cond, CHANNEL_MEMBERS)))", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)" ]
[ "0.76604193", "0.76227117", "0.7580729", "0.7580729", "0.74723893", "0.7399326", "0.73662555", "0.72489077", "0.7212591", "0.71992993", "0.7144615", "0.7074819", "0.7029829", "0.7020532", "0.70080376", "0.7003052", "0.7002116", "0.6998605", "0.69832844", "0.6980152", "0.6980147", "0.69679815", "0.69642824", "0.6953532", "0.6952214", "0.6947435", "0.69445187", "0.6930399", "0.6926649", "0.6921218", "0.69188064", "0.6912777", "0.6903085", "0.6882547", "0.68721014", "0.6839807", "0.6831945", "0.68116385", "0.68091416", "0.6782987", "0.678068", "0.678068", "0.67806333", "0.67784506", "0.67725354", "0.6752088", "0.67434067", "0.6739361", "0.6733094", "0.6730463", "0.67297995", "0.67230093", "0.67207843", "0.67182887", "0.67104113", "0.6696352", "0.66941357", "0.6693837", "0.6689845", "0.6689451", "0.668904", "0.6683387", "0.6676739", "0.66767305", "0.6670724", "0.66682875", "0.6667248", "0.6664187", "0.6662405", "0.6647732", "0.6640845", "0.6631953", "0.66307807", "0.66307807", "0.66213435", "0.66065913", "0.65958816", "0.65938705", "0.6593587", "0.6592865", "0.6592865", "0.6592865", "0.6592865", "0.6578975", "0.65766007", "0.6576075", "0.65758336", "0.6571127", "0.6571127", "0.65693235", "0.6550398", "0.6548569", "0.65440387", "0.65438056", "0.65423214", "0.6535551", "0.65325576", "0.65325576", "0.65325576", "0.65325576", "0.65268856" ]
0.0
-1
admin should be passed through for admin users
async def test_auth_admin_is_admin(app): # Admin user defined in MockPAMAuthenticator. name = 'admin' user = add_user(app.db, app, name=name, admin=False) assert user.admin is False cookies = await app.login_user(name) assert user.admin is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def is_admin(self):\r\n return self.admin", "def admin():\n pass # pragma: no cover", "def is_user_admin(request):\n return request.user.is_superuser", "def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def is_admin(self):\n return False", "def admin(request):\n if not request.user.is_staff:\n return render_to_response('error.htm', {\n 'error': \"Sorry, you are not staff... (user permissions 'is_staff')\",\n })\n return render_to_response('admin.htm', {\n 'username': request.user,\n })", "def admin(func) :\n def redirectlogin(session, self) :\n setSessionMessage(session, \"Admin Login Required.\", True)\n return self.redirect('/login')\n\n def checkauth(*args, **kwargs) : \n self = args[0]\n\n \n session = getSessionByRequest(self)\n user = getSessionUser(session)\n\n if not user :\n return redirectlogin(session, self)\n\n if user.userType == 'ADMIN' : \n return func(*args, **kwargs)\n\n return redirectlogin(session, self)\n return checkauth", "def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin", "def GET_adminoff(self):\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = False)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def is_admin(self, user):\n return user.name in self.admins", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def user_is_admin(user):\n return user in admins", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def getAdmin():", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def test_func(self, user):\n return self.get_object().admin == user", "def is_admin(self):\n return self.admin", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def admin(self):\n if self.is_admin:\n return True\n return False", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_admin(self):\n assert(admin)", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") not in getAdminIDs():\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "def admin_user_only(view):\r\n @google_login_required\r\n def wrapped(request, *args, **kwargs):\r\n if users.is_current_user_admin():\r\n return view(request, *args, **kwargs)\r\n context = RequestContext(request);\r\n return rtr( 'access_limited.html', context,None )\r\n return wraps(view)(wrapped)", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def web_admin_required(handler):\n\n def check_admin(self, *args, **kwargs):\n \"\"\"\n If handler has no login_url specified invoke a 403 error\n \"\"\"\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)\n\n return check_admin", "def admin(self, view):\n view.admin = True\n return view", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def admin_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_user.is_admin:\n return func(*args, **kwargs)\n else:\n return login_manager.unauthorized()\n\n return wrapper", "def is_staff(self):\r\n return self.is_admin", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def isAdmin(user):\n return isUserType(user, Admin)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def is_administrator(self):\n return False", "def require_admin(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return redirect(url_for('admin.login'))\n if g.user.name != 'admin':\n abort(403)\n return func(*args, **kwargs)\n\n return decorator", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper", "def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def admin_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\tif not users.is_current_user_admin():\n\t\t\t\tabort(401) # Unauthorized\n\t\t\treturn func(*args, **kwargs)\n\t\treturn redirect(users.create_login_url(request.url))\n\treturn decorated_view", "def admin_required(f):\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if db_session.query(Admin).filter(Admin.id == current_user.id).first() is None:\n return redirect(url_for('index'))\n return f(*a, **kw)\n return wrapper", "def admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == False:\n return jsonify({\"messsage\": \"Only admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def admin_only():\n return 'Super-seekrit admin page.'", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def admin_required(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if \"is_admin\":\n return f(*args, **kwargs)\n else:\n flash(\"Sorry, this page is for admin only.\")\n user_id = session.get('user_id')\n return redirect(\"/users/{}\".format(user_id))\n\n return wrapper", "def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset", "def test_func(self):\n return self.request.user.is_superuser", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)", "def check_if_admin(bot, update, *args, **kwargs):\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user:\n self.request_access(bot, user_id)\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n elif user[\"admin\"]:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)\n else:\n update.message.reply_text(\"Non sei abilitato ad usare questo comando\")\n return", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n user_id = get_jwt_identity()\n target_user = User.query.filter_by(id=user_id).first()\n\n if target_user is None:\n return redirect(\"/admin/login\", code=403)\n\n if target_user.role != RoleType.ADMINISTRATOR:\n return redirect(\"/admin/login\", code=403)\n return fn(*args, **kwargs)\n return wrapper", "def is_user_admin(self, user):\n return user == self.created_by", "def test_admin_can_login_to_web_portal(admin):", "def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()", "async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')", "def test_add_admin_to_org(self):\n pass", "def requires_admin(method):\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n if web.ctx.method == \"GET\":\r\n raise web.seeother(users.create_login_url(web.ctx.fullpath))\r\n raise web.forbidden()\r\n elif not (users.is_current_user_admin()):\r\n raise web.forbidden()\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper", "async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True", "def is_admin(self) -> bool:\n return self._is_admin", "def admin_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n if not g.user.admin:\n abort(403)\n return f(*args, **kwargs)\n return decorator", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session['user']['user_type'] != \"admin\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def is_admin(self):\n return Role.query.get(2) in self.roles", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role" ]
[ "0.76515234", "0.7589409", "0.72544324", "0.7240122", "0.72308195", "0.72102034", "0.72102034", "0.71380514", "0.71223193", "0.71042573", "0.7030925", "0.70082307", "0.6999681", "0.6966063", "0.6965032", "0.6940669", "0.69357306", "0.6932168", "0.6897163", "0.6887465", "0.6870391", "0.6860596", "0.6855732", "0.6852728", "0.68465143", "0.6844571", "0.68059653", "0.6800195", "0.6788935", "0.6771883", "0.6763007", "0.67594767", "0.6748982", "0.67488676", "0.67451006", "0.67441505", "0.6741649", "0.6736674", "0.6731021", "0.6730613", "0.67255616", "0.6725538", "0.67162496", "0.66916394", "0.6680957", "0.6676992", "0.6673622", "0.66694134", "0.66621435", "0.6656507", "0.66384345", "0.662332", "0.6605288", "0.6581872", "0.6580343", "0.6578257", "0.6574084", "0.65687746", "0.65687746", "0.6563752", "0.65622044", "0.654781", "0.654781", "0.6538974", "0.6537789", "0.65020233", "0.6499497", "0.6499116", "0.64936894", "0.648992", "0.64808035", "0.6474666", "0.6470513", "0.64607066", "0.64587164", "0.6456045", "0.6453423", "0.6449472", "0.6432922", "0.6432094", "0.6430127", "0.6429237", "0.64230454", "0.6422841", "0.642238", "0.6420149", "0.6417762", "0.6416356", "0.64156955", "0.64128655", "0.64127016", "0.64127016", "0.64127016", "0.64127016", "0.6410351", "0.6396381", "0.6390034", "0.63775796", "0.6373303", "0.6369757", "0.63618207" ]
0.0
-1
admin should be unchanged if authenticator doesn't return admin value
async def test_auth_admin_retained_if_unset(app): name = 'kiwi' # Add user as admin. user = add_user(app.db, app, name=name, admin=True) assert user.admin is True # User should remain unchanged. cookies = await app.login_user(name) assert user.admin is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def authenticator():", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def is_admin(self):\r\n return self.admin", "def admin_authenticated(decoratee):\n\n @wraps(decoratee)\n def wrapper(*args, **kwargs):\n if \"username\" in session.keys():\n if is_admin(session[\"username\"]) and is_enabled(session[\"username\"]):\n return decoratee(*args, **kwargs)\n else:\n session[\"last_error\"] = (\n \"You need to be an administrator to view this page.\"\n )\n return redirect(url_for(\"error\"))\n else:\n return redirect(url_for(\"login\") + \"?sso\")\n\n return wrapper", "async def test_auth_admin_is_admin(app):\n # Admin user defined in MockPAMAuthenticator.\n name = 'admin'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is True", "def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield", "def admin(func) :\n def redirectlogin(session, self) :\n setSessionMessage(session, \"Admin Login Required.\", True)\n return self.redirect('/login')\n\n def checkauth(*args, **kwargs) : \n self = args[0]\n\n \n session = getSessionByRequest(self)\n user = getSessionUser(session)\n\n if not user :\n return redirectlogin(session, self)\n\n if user.userType == 'ADMIN' : \n return func(*args, **kwargs)\n\n return redirectlogin(session, self)\n return checkauth", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def _get_admin_status(self):\n return self.__admin_status", "def is_admin(self):\n return self.admin", "def Build(self,admin): \n\n rv=admin.helper.setUser(self.name,self.__encryptPwd.decode())\n if rv is None:\n return False\n else:\n rv=admin.helper.setAccount(self.name,'ARS')\n if rv is None:\n return False\n else:\n return True", "def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected", "def __call__(self, target, creds, enforcer):\n\n return creds['is_admin'] == self.expected", "def authAdmin(self, email='admin@mail.com'):\n admin = self._createUser(email=email, role=UserType.ADMIN)\n return admin, self._authenticate(admin)", "def admin(self):\n if self.is_admin:\n return True\n return False", "def admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == False:\n return jsonify({\"messsage\": \"Only admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def admin_password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_password\")", "def GET_adminoff(self):\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = False)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def test_admin(self):\n assert(admin)", "def set_admin():\n print(\"Insert admin email:\")\n return input()", "def admin_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_user.is_admin:\n return func(*args, **kwargs)\n else:\n return login_manager.unauthorized()\n\n return wrapper", "def set_admin_password(self, instance, new_pass):\n pass", "def administrator(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user:\n if self.request.method == \"GET\":\n self.redirect(self.get_login_url())\n return\n raise web.HTTPError(403)\n elif not self.current_user.administrator:\n if self.request.method == \"GET\":\n self.redirect(\"/\")\n return\n raise web.HTTPError(403)\n else:\n return method(self, *args, **kwargs)\n return wrapper", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def getAdmin():", "def administrator(method):\n\t@functools.wraps(method)\n\tdef wrapper(self, *args, **kwargs):\n\t\tif not self.current_user:\n\t\t\tif self.request.method == \"GET\":\n\t\t\t\tself.redirect(self.get_login_url())\n\t\t\t\treturn\n\t\t\traise tornado.web.HTTPError(403)\n\t\telif not self.current_user.administrator:\n\t\t\tif self.request.method == \"GET\":\n\t\t\t\tself.redirect(\"/\")\n\t\t\t\treturn\n\t\t\traise tornado.web.HTTPError(403)\n\t\telse:\n\t\t\treturn method(self, *args, **kwargs)\n\treturn wrapper", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)", "def get_admin_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def admin():\n pass # pragma: no cover", "def test_func(self, user):\n return self.get_object().admin == user", "def authenticate_admin():\n\n print request.json['session_id']\n username = _authenticate_admin_from_session(request)\n\n if username:\n if username == 'local_user':\n return Response(\"local_user\", 200)\n else:\n return Response(status=200)\n\n else:\n return Response('Bad or missing session id.', status=401)", "def admin_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided and ensures the user is an admin\"\"\"\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\": \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n admin = data['is_admin']\n except:\n return make_response(jsonify({\"message\": \"kindly provide a valid token in the header\"}), 401)\n\n if not admin:\n return make_response(\n jsonify({\"message\": \"you are not authorized to perform this function as a non-admin user\"}), 401)\n\n return f(*args, **kwargs)\n\n return decorated", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n user_id = get_jwt_identity()\n target_user = User.query.filter_by(id=user_id).first()\n\n if target_user is None:\n return redirect(\"/admin/login\", code=403)\n\n if target_user.role != RoleType.ADMINISTRATOR:\n return redirect(\"/admin/login\", code=403)\n return fn(*args, **kwargs)\n return wrapper", "def __call__(self, target, creds):\n\n return creds['is_admin'] == self.expected", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def is_admin(self):\n return False", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") not in getAdminIDs():\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "def require_admin(handler_method):\n def Decorate(self):\n if not users.is_current_user_admin():\n self.error(401)\n html = '<html><body><a href=\"%s\">Sign in</a></body></html>'\n self.response.out.write(html % (users.create_login_url(self.request.url)))\n return\n return handler_method(self)\n return Decorate", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "async def test_auth_admin_non_admin(app):\n name = 'kiwi'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is False", "def get_info_admin(self):\n return self.get_info(\"HS_ADMIN\")", "def process_admin_login():\n\n entered_email = request.form.get(\"email\")\n entered_password = request.form.get(\"password\")\n admin = c.get_admin(entered_email, entered_password)\n\n if admin is False:\n flash('Invalid credentials. Please click on sign up to create an account!')\n return redirect('/')\n session['current_admin'] = entered_email\n ad_id = admin.admin_id\n flash('Logged in as %s' % entered_email)\n if admin.rescue_id is None:\n return redirect('/admin' + '/' + str(ad_id) + '/rescue-info')\n else:\n return redirect('/admin' + '/' + str(ad_id))", "def admin_action(self):\n SCREEN_MANAGER.current = 'passCode'", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def admin(self, view):\n view.admin = True\n return view", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def requires_admin(method):\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n if web.ctx.method == \"GET\":\r\n raise web.seeother(users.create_login_url(web.ctx.fullpath))\r\n raise web.forbidden()\r\n elif not (users.is_current_user_admin()):\r\n raise web.forbidden()\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper", "def authorized(self):\n pass", "def auth(self, user):", "def _set_authenticator(self):\n pass", "def test_02_second_user_is_not_admin(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"tester2@tester.com\",\r\n password=\"tester\")\r\n self.signout()\r\n user = db.session.query(User).get(2)\r\n assert user.admin == 0, \"User ID: 2 should not be admin, but it is\"", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "def __init__(self, username, password, email, authenticator):\n super().__init__(username, password, email)\n if password != \"superpassword\":\n raise NotAdminError\n self.authenticator = authenticator\n self.permissions = {}", "def admin_required(f):\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if db_session.query(Admin).filter(Admin.id == current_user.id).first() is None:\n return redirect(url_for('index'))\n return f(*a, **kw)\n return wrapper", "def admin_required(f):\n @wraps(f)\n def admin_decorator(*args, **kwargs):\n if session.get('logged_in') and session.get('type') == 'Admin':\n return f(*args, **kwargs)\n else:\n abort(401)\n return admin_decorator", "def get_admin_username(self) -> str:\n # read the original value passed by the command\n admin_username = self.raw_param.get(\"admin_username\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.linux_profile and\n self.mc.linux_profile.admin_username is not None\n ):\n admin_username = self.mc.linux_profile.admin_username\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return admin_username", "async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:\n is_admin = await auth.is_server_admin(requester)\n if not is_admin:\n raise AuthError(HTTPStatus.FORBIDDEN, \"You are not a server admin\")", "def admin_login():\n account = request.json['account']\n password = request.json['password']\n u = user.User.query.filter(user.User.account == account).first()\n if not u:\n abort(404)\n if u.password == password and u.role == 'admin':\n if u.token is None:\n u.generate_token()\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())\n else:\n abort(500)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")", "def admin_required(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if \"is_admin\":\n return f(*args, **kwargs)\n else:\n flash(\"Sorry, this page is for admin only.\")\n user_id = session.get('user_id')\n return redirect(\"/users/{}\".format(user_id))\n\n return wrapper", "def authorization():\n pass", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def get_authenticated_denied(self):", "def test_01_admin_index_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"tester2@tester.com\",\r\n password=\"tester\")\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def octp_require_admin(f):\n\n @functools.wraps(f)\n def authed_only_wrapper(*args, **kwargs):\n if is_admin():\n return f(*args, **kwargs)\n else:\n if request.content_type == 'application/json':\n abort(403)\n else:\n # return redirect(url_for('auth.login', next=request.full_path))\n return render_template('page.html', content=\"You need to be admin to access this page!\")\n\n return authed_only_wrapper", "def __add_admin(self):\n log.debug(\"Displaying __add_admin\")\n # Let the admin select an administrator to promote\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Check if the user is already an administrator\n admin = self.session.query(db.Admin).filter_by(user_id=user.user_id).one_or_none()\n if admin is None:\n # Create the keyboard to be sent\n keyboard = telegram.ReplyKeyboardMarkup([[self.loc.get(\"emoji_yes\"), self.loc.get(\"emoji_no\")]],\n one_time_keyboard=True)\n # Ask for confirmation\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_confirm_admin_promotion\"),\n reply_markup=keyboard)\n # Wait for an answer\n selection = self.__wait_for_specific_message([self.loc.get(\"emoji_yes\"), self.loc.get(\"emoji_no\")])\n # Proceed only if the answer is yes\n if selection == self.loc.get(\"emoji_no\"):\n return\n # Create a new admin\n admin = db.Admin(user=user,\n edit_products=False,\n receive_orders=False,\n create_transactions=False,\n is_owner=False,\n display_on_help=False)\n self.session.add(admin)\n # Send the empty admin message and record the id\n message = self.bot.send_message(self.chat.id, self.loc.get(\"admin_properties\", name=str(admin.user)))\n # Start accepting edits\n while True:\n # Create the inline keyboard with the admin status\n inline_keyboard = telegram.InlineKeyboardMarkup([\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.edit_products)} {self.loc.get('prop_edit_products')}\",\n callback_data=\"toggle_edit_products\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.receive_orders)} {self.loc.get('prop_receive_orders')}\",\n callback_data=\"toggle_receive_orders\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.create_transactions)} {self.loc.get('prop_create_transactions')}\",\n callback_data=\"toggle_create_transactions\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.display_on_help)} {self.loc.get('prop_display_on_help')}\",\n callback_data=\"toggle_display_on_help\"\n )],\n [telegram.InlineKeyboardButton(\n self.loc.get('menu_done'),\n callback_data=\"cmd_done\"\n )]\n ])\n # Update the inline keyboard\n self.bot.edit_message_reply_markup(message_id=message.message_id,\n chat_id=self.chat.id,\n reply_markup=inline_keyboard)\n # Wait for an user answer\n callback = self.__wait_for_inlinekeyboard_callback()\n # Toggle the correct property\n if callback.data == \"toggle_edit_products\":\n admin.edit_products = not admin.edit_products\n elif callback.data == \"toggle_receive_orders\":\n admin.receive_orders = not admin.receive_orders\n elif callback.data == \"toggle_create_transactions\":\n admin.create_transactions = not admin.create_transactions\n elif callback.data == \"toggle_display_on_help\":\n admin.display_on_help = not admin.display_on_help\n elif callback.data == \"cmd_done\":\n break\n self.session.commit()", "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def home_edituser():\n\tpass", "def _ensure_initial_admin(config):\n if get_api_version() > 2:\n manager = get_manager()\n default_domain_id = create_or_show_domain(DEFAULT_DOMAIN)\n leader_set({'default_domain_id': default_domain_id})\n admin_domain_id = create_or_show_domain(ADMIN_DOMAIN)\n leader_set({'admin_domain_id': admin_domain_id})\n create_or_show_domain(SERVICE_DOMAIN)\n create_tenant(\"admin\", ADMIN_DOMAIN)\n create_tenant(config(\"service-tenant\"), SERVICE_DOMAIN)\n leader_set({'service_tenant_id': manager.resolve_tenant_id(\n config(\"service-tenant\"),\n domain=SERVICE_DOMAIN)})\n create_role('service')\n create_tenant(\"admin\", DEFAULT_DOMAIN)\n create_tenant(config(\"service-tenant\"), DEFAULT_DOMAIN)\n # User is managed by ldap backend when using ldap identity\n if not (config('identity-backend') ==\n 'ldap' and config('ldap-readonly')):\n\n admin_username = config('admin-user')\n if get_api_version() > 2:\n passwd = create_user_credentials(admin_username,\n get_admin_passwd,\n set_admin_passwd,\n domain=ADMIN_DOMAIN)\n if passwd:\n create_role('Member')\n # Grant 'Member' role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/'admin'\n # ADMIN_DOMAIN\n grant_role(admin_username, 'Member', tenant='admin',\n user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n create_role(config('admin-role'))\n # Grant admin-role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/admin\n grant_role(admin_username, config('admin-role'),\n tenant='admin', user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n # Grant domain level admin-role to ADMIN_DOMAIN/admin-user\n grant_role(admin_username, config('admin-role'),\n domain=ADMIN_DOMAIN, user_domain=ADMIN_DOMAIN)\n else:\n create_user_credentials(admin_username, get_admin_passwd,\n set_admin_passwd, tenant='admin',\n new_roles=[config('admin-role')])\n\n create_service_entry(\"keystone\", \"identity\",\n \"Keystone Identity Service\")\n\n for region in config('region').split():\n create_keystone_endpoint(public_ip=resolve_address(PUBLIC),\n service_port=config(\"service-port\"),\n internal_ip=resolve_address(INTERNAL),\n admin_ip=resolve_address(ADMIN),\n auth_port=config(\"admin-port\"),\n region=region)", "def is_administrator(self):\n return False", "def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session['user']['user_type'] != \"admin\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function", "def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False", "def get_authorization():\n return True", "def admin_credentials(self) -> pulumi.Input['AdministrativeCredentialsArgs']:\n return pulumi.get(self, \"admin_credentials\")", "def require_admin_login(handler_method):\n\n def wrapper(self, *args, **kwargs):\n \"\"\" Verifies that the calling user is an administrator of the application before calling the\n decorated handler\n\n Parameters:\n :param args: the arguments for the decorated function\n :param kwargs: the keyword arguments for the decorated function\n\n Returns:\n :return: the decorated function result if the access token was valid; otherwise it\n send an error response and returns None\n \"\"\"\n user = users.get_current_user()\n if not user:\n self.write_error(401)\n elif not users.is_current_user_admin():\n self.write_error(403)\n else:\n handler_method(self, *args, **kwargs)\n\n return wrapper", "def isAdmin(user):\n return isUserType(user, Admin)", "def save_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n added_admin_mail = request.forms.get('mail')\n added_admin = users.User(added_admin_mail)\n if added_admin:\n #return added_admin.user_id()\n #output = added_admin.nickname()+ ' ' + added_admin.user_id()\n #return output\n new_admin = Admins(parent=admin_base)\n new_admin.ref_nick = user.nickname()\n new_admin.admin_nick = added_admin.nickname()\n new_admin.admin_id = 'no id'#added_admin.user_id()\n new_admin.put()\n redirect('/admin')\n \n else:\n #return \"Не получилось\"\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins, error=\"Неверный email\")\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def get_admin(request):\n\n jsonResp = {}\n jsonResp['admin'] = ''\n if User.objects.filter(profile = 'Admin').exists():\n mode = NameSpace.objects.get(ns_id='Human')\n name = User.objects.get(profile = 'Admin',ns_id=mode)\n admin = name.username\n jsonResp['admin'] = admin\n\n return JsonResponse(jsonResp)", "def admin_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n if not g.user.admin:\n abort(403)\n return f(*args, **kwargs)\n return decorator", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def administrator():\n\n administrator = Administrator.objects.create(name='Michał', surname='Paluch',\n login='Udfsr43', password='Password_3',\n password_repeat='Password_3')\n return administrator", "def test_00_first_user_is_admin(self):\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n assert user.admin == 1, \"User ID:1 should be admin, but it is not\"", "def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")", "def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")", "def admin_username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_username\")", "def has_admin(cipher):\n counter = Crypto.gen_aes_stream_counter_mt19973(3453243);\n text = Crypto.decrypt_aes(cipher, key, AES.MODE_CTR, counter=counter)\n return text.find(';admin=true;') != -1" ]
[ "0.6545929", "0.6443032", "0.6328436", "0.6289202", "0.62141764", "0.62122965", "0.6134274", "0.6130718", "0.6127599", "0.61198676", "0.6079004", "0.60773593", "0.6006329", "0.6006329", "0.59977925", "0.5994712", "0.59755987", "0.5956938", "0.59537655", "0.59465075", "0.59193176", "0.5917442", "0.5916327", "0.5910608", "0.5904137", "0.58757097", "0.58757097", "0.5864789", "0.5863521", "0.585017", "0.5838864", "0.58311325", "0.5829398", "0.5828973", "0.582743", "0.58154446", "0.5784877", "0.578317", "0.57785755", "0.57784563", "0.5775345", "0.57720876", "0.57583344", "0.575685", "0.5748126", "0.574803", "0.574761", "0.57426566", "0.57414746", "0.5731969", "0.57292825", "0.57286733", "0.5723149", "0.57201314", "0.57058614", "0.570381", "0.5701025", "0.57006645", "0.5693411", "0.569233", "0.56921184", "0.56918925", "0.5678996", "0.56752", "0.567515", "0.5669998", "0.5653384", "0.56524456", "0.5649157", "0.56359476", "0.5632836", "0.56286615", "0.56227505", "0.5606914", "0.56019926", "0.56007534", "0.56002456", "0.5589533", "0.5584318", "0.55837476", "0.5578232", "0.5566279", "0.5565549", "0.5565102", "0.55523276", "0.5551454", "0.55496776", "0.55419147", "0.55387676", "0.5535551", "0.55298746", "0.5528674", "0.5526509", "0.55228674", "0.5515095", "0.5514443", "0.551", "0.551", "0.551", "0.5502033" ]
0.62106174
6
auth_state enabled at the Authenticator level, but unavailable due to no crypto keys.
def auth_state_unavailable(auth_state_enabled): crypto.CryptKeeper.instance().keys = [] yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "async def test_auth_state(app, auth_state_enabled):\n name = 'kiwi'\n user = add_user(app.db, app, name=name)\n assert user.encrypted_auth_state is None\n cookies = await app.login_user(name)\n auth_state = await user.get_auth_state()\n assert auth_state == app.authenticator.auth_state", "def check_auth():", "def set_auth_state(self, data):\n raise NotImplementedError()", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def auth_token_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auth_token_enabled\")", "def auth_enabled(self):\n\n return self._api_manager.auth_enabled()", "def ready(self):\n if self._wait_auth:\n return False\n return True", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def requires_auth(self):\n return True", "def sr_auth_state(self, **kwargs):\n from pykern import pkunit\n from pykern import pkcollections\n\n m = re.search(\n r\"(\\{.*\\})\",\n pkcompat.from_bytes(self.sr_get(\"authState\").data),\n )\n s = pkcollections.json_load_any(m.group(1))\n for k, v in kwargs.items():\n pkunit.pkeq(\n v,\n s[k],\n \"key={} expected={} != actual={}: auth_state={}\",\n k,\n v,\n s[k],\n s,\n )\n return s", "def _set_authenticator(self):\n pass", "def auth_token_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auth_token_enabled\")", "def is_stateless():\n return AceQLHttpApi.is_stateless()", "def authenticator():", "def enable_authentication(self) -> bool:\n return pulumi.get(self, \"enable_authentication\")", "def _auth_plugin_available(ext):\n return ext.obj.available", "def is_enabled(self):", "def isEnabled(state):\n return (isActive(state) or state == State.preEnabled)", "def check_auth(self):\n if self.type_of_auth == BboxConstant.AUTHENTICATION_TYPE_LOCAL:\n access_level_required = self.get_auth_access_needed_for_local()\n else:\n access_level_required = self.get_auth_access_needed_for_remote()\n\n if access_level_required == BboxConstant.AUTHENTICATION_LEVEL_NONE:\n return False\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PRIVATE:\n return self.is_authentified()\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PUBLIC:\n return True", "def is_frozensand_auth_available(self):\n cvar = self.getCvar('auth')\n if cvar:\n auth = cvar.getInt()\n return auth != 0\n else:\n return False", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def get_authorization():\n return True", "def authorized(self):\n\n # Here we explicitly start because the usage of alembic may be out\n # of our running context.\n return PyFunceble.cli.facility.CredentialLoader.is_already_loaded()", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def check_state(self):\n pass", "def is_authenticated(self):\n return self.ping() is not None", "def _check_authentication(self) -> NoReturn:\n if not self.heartbeat():\n self.authenticate()", "def enablement_state(self):\n return self.__enablement_state", "def enable_authentication(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_authentication\")", "def _check_auth(self, group_id):\n return", "def __load_encrypted_states(self) -> None:\n for section in config.sections():\n value = config.getstr('encryption', section=section)\n if value and value == self.encryption_short_name:\n self._enabled_tabs[section] = self.encrypt", "def __resolve_locked_state(self):\n\t\tif self.__userPassword not in (None, '') \\\n\t\t\tand self.__userPassword[0] == '!':\n\t\t\t\treturn True\n\n\t\treturn False", "def _should_be_encrypted(self):\n if self._should_be_encrypted_state is not None:\n return self._should_be_encrypted_state\n\n self._should_be_encrypted_state = self.contains_pii\n return self._should_be_encrypted_state", "def get_auth(self):\n return {'method': yeti_config.core.auth}", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def isActive(state):\n return state in [State.enabled, State.softDisabling]", "def getSyncState(self, authenticationToken):\r\n pass", "def test_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )", "def tob_connection_active():\n if not tob_connection_synced():\n return False\n return (0 < len(list(credential_requests.keys())))", "def start(self):\n self.delay(50)\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"start()\"\n\n # Get enckey_idx\n enckey_idx = struct.unpack(\"<L\", self.magic_page[OFF_ENCKEY_IDX:OFF_ENCKEY_IDX+4])[0]\n enckey_idx_actual = ((enckey_idx % SZ_PAGE) & ~0xF) & 0xFFFFFFFF;\n if DEBUG: print \"enckey_idx = 0x%08x; enckey_idx_actual = 0x%08x\" % (enckey_idx, enckey_idx_actual)\n\n # Get the enckey: a 4-lengthed array of uint32_ts\n self.state[\"enckey\"] = self.magic_page[enckey_idx_actual:enckey_idx_actual+16]\n # NOTE: this doesn't take LE into account\n if DEBUG: \n print \"enckey_idx_actual = 0x%02x, enckey = %s\" % (enckey_idx_actual, self.state[\"enckey\"])\n msg = \"0x\"\n for byte in self.state[\"enckey\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print \"enckey (hex) = %s\" % msg\n\n # Get auth_token_idx\n auth_token_idx = struct.unpack(\"<L\", self.magic_page[OFF_AUTH_TOKEN_IDX:OFF_AUTH_TOKEN_IDX+4])[0]\n auth_token_idx_actual = ((auth_token_idx % SZ_PAGE) & ~0xF) & 0xFFFFFFFF;\n if DEBUG: print \"auth_token_idx = 0x%08x; auth_token_idx_actual = 0x%08x\" % (auth_token_idx, auth_token_idx_actual)\n\n # Get the auth_token: a single uin32_t\n self.state[\"auth_token\"] = self.magic_page[auth_token_idx_actual:auth_token_idx_actual+4]\n # NOTE: this doesn't take LE into account\n if DEBUG: \n print \"auth_token_idx_actual = 0x%02x, auth_token = %s\" % (auth_token_idx_actual, self.state[\"auth_token\"])\n msg = \"0x\"\n for byte in self.state[\"auth_token\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print \"auth_token (hex) = %s\" % msg\n\n # Initialize PRNG buf (static)\n self.state[\"prng_buf\"] = struct.pack(\"<BBBBBBBB\", \n 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 );\n if DEBUG: \n print 'self.state[\"prng_buf\"] = %s' % self.state[\"prng_buf\"] \n msg = \"0x\"\n for byte in self.state[\"prng_buf\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print 'self.state[\"prng_buf\"] = %s' % msg\n \n # Get PRNG key (based on flag page)\n self.state[\"prng_key\"] = \"\".join([ \n self.magic_page[ 2], self.magic_page[ 3], \n self.magic_page[ 5], self.magic_page[ 7],\n self.magic_page[11], self.magic_page[13],\n self.magic_page[17], self.magic_page[19],\n self.magic_page[23], self.magic_page[29],\n self.magic_page[31], self.magic_page[37],\n self.magic_page[41], self.magic_page[43],\n self.magic_page[53], self.magic_page[59] ] )\n if DEBUG: \n print 'self.state[\"prng_key\"] = %s' % self.state[\"prng_key\"] \n msg = \"0x\"\n for byte in self.state[\"prng_key\"]:\n msg += \"%02x\" % struct.unpack(\"B\", byte)\n print 'self.state[\"prng_key\"] = %s' % msg\n\n # We start with an empty PRNG cache.\n self.state[\"prng_bytes_remaining\"] = 0\n\n # Toggle for expected destination of messages.\n self.state[\"expected_dst\"] = DST_CB1", "def authentication_hook(self):\n pass", "def is_authenticated(self):\n return True #self.authenticated", "def _initialize_authentication(self):\n self._broker_connection.request(SaslHandshakeRequest.get_versions()[self.handshake_version](self.mechanism))\n response = SaslHandshakeResponse.get_versions()[self.handshake_version](self._broker_connection.response())\n if response.error_code != 0:\n if response.error_code == UnsupportedSaslMechanism.ERROR_CODE:\n msg = \"Broker only supports sasl mechanisms {}, requested was {}\"\n raise UnsupportedSaslMechanism(msg.format(\",\".join(response.mechanisms), self.mechanism))\n raise ERROR_CODES[response.error_code](\"Authentication Handshake failed\")", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def device_only_auth_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def auth_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_status\")", "def state(self) -> Optional[pulumi.Input['DatabaseEncryptionState']]:\n return pulumi.get(self, \"state\")", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None", "def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None", "def controlled(self):\n if self.crypt_private is not None and self.sign_private is not None:\n return True\n else:\n return False", "def is_gcloud_auth_set():\n try:\n # This returns an email address of currently active account or empty string\n # if no account is active.\n output = subprocess.check_output([\n find_gcloud(), 'auth', 'list',\n '--filter=status:ACTIVE', '--format=value(account)',\n ])\n return bool(output.strip())\n except subprocess.CalledProcessError as exc:\n logging.error('Failed to check active gcloud account: %s', exc)\n return False", "def credentials(self):\n return True", "def authenticated(self):\n # We don't support authentication yet\n return False", "def is_authenticated(self):\n return False", "def authorized(self):\n\n return PyFunceble.cli.facility.CredentialLoader.is_already_loaded()", "def keyguard_disabled(self):\n return self._keyguard_disabled", "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def device_only_auth_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def device_only_auth_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_only_auth_enabled\")", "def alpha(self, state, pctx):\n raise NotImplementedError", "def encrypted(self):\n return self.encryption_type is not None", "def _on_account_config_changed(self, _, state: bool):\n if state:\n self.add_button.grab_default()\n self.add_button.set_sensitive(state)", "def _authenticate(self):\n auth = self.settings.get(\"auth\")\n if auth:\n if auth == Auth.PLAIN:\n self._authenticate_plain()\n elif auth == Auth.SHA256_MEMORY:\n self._authenticate_sha256_memory()\n elif auth == Auth.MYSQL41:\n self._authenticate_mysql41()\n elif self.stream.is_secure():\n # Use PLAIN if no auth provided and connection is secure\n self._authenticate_plain()\n else:\n # Use MYSQL41 if connection is not secure\n try:\n self._authenticate_mysql41()\n except InterfaceError:\n pass\n else:\n return\n # Try SHA256_MEMORY if MYSQL41 fails\n try:\n self._authenticate_sha256_memory()\n except InterfaceError as err:\n raise InterfaceError(\n \"Authentication failed using MYSQL41 and \"\n \"SHA256_MEMORY, check username and \"\n f\"password or try a secure connection err:{err}\"\n ) from err", "def is_connected(self):\n # FIXME: timeout automatically based on ADT default expiry?\n #self._authenticated_timestamp\n return self._authenticated", "def auth():\n pass", "def auth():\n pass", "def is_authenticated(self):\n return True", "def check_auth_interactive(self, username, submethods):\n return AUTH_FAILED", "def _has_auth_details(self) -> bool:\n\n return all([self.secret is not None, self.api_key is not None])", "def authenticated(self):\n self.account = Account(\n credentials=self._credentials,\n auth_flow_type=self._auth_type,\n token_backend=WorkdayTokenBackend(),\n )\n return self.account.is_authenticated", "def get_authenticated_granted(self):", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def enabled(cls):\r\n cls._check_configured()\r\n return sorted(cls._ENABLED.values(), key=lambda provider: provider.NAME)", "def check_auth_gssapi_keyex(self, username,\n gss_authenticated=AUTH_FAILED,\n cc_file=None):\n if gss_authenticated == AUTH_SUCCESSFUL:\n return AUTH_SUCCESSFUL\n return AUTH_FAILED", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def determine_authenticator(all_auths, config):\n # Available Authenticator objects\n avail_auths = {}\n # Error messages for misconfigured authenticators\n errs = {}\n\n for auth_name, auth in all_auths.iteritems():\n try:\n auth.prepare()\n except errors.LetsEncryptMisconfigurationError as err:\n errs[auth] = err\n except errors.LetsEncryptNoInstallationError:\n continue\n avail_auths[auth_name] = auth\n\n # If an authenticator was specified on the command line, try to use it\n if config.authenticator:\n try:\n auth = avail_auths[config.authenticator]\n except KeyError:\n logging.info(list_available_authenticators(avail_auths))\n raise errors.LetsEncryptClientError(\n \"The specified authenticator '%s' could not be found\" %\n config.authenticator)\n elif len(avail_auths) > 1:\n auth = display_ops.choose_authenticator(avail_auths.values(), errs)\n elif len(avail_auths.keys()) == 1:\n auth = avail_auths[avail_auths.keys()[0]]\n else:\n raise errors.LetsEncryptClientError(\"No Authenticators available.\")\n\n if auth is not None and auth in errs:\n logging.error(\"Please fix the configuration for the Authenticator. \"\n \"The following error message was received: \"\n \"%s\", errs[auth])\n return\n\n return auth", "def enable_encryption(self, output_key: bytes, input_key: bytes) -> None:\n self.chacha = chacha20.Chacha20Cipher(output_key, input_key)\n self.state.has_authenticated = True", "def check_auth_interactive_response(self, responses):\n return AUTH_FAILED", "def ceph_enabled(self):", "def transit_encryption_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"transit_encryption_enabled\")", "def auth_required(self, cls):\n assert cls.authentication_classes == [JWTKeyAuthentication]", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})", "def auth_config(self) -> 'outputs.AuthConfigResponse':\n return pulumi.get(self, \"auth_config\")", "def authenticate(self, request=None):\r\n try:\r\n token = request.META.get('HTTP_AUTHORIZATION') or request.REQUEST['key']\r\n accesskey = AccessKey.objects.select_related('user').get(key=token)\r\n request.user = accesskey.user\r\n return request.user and request.user.is_active\r\n\r\n except(KeyError, AccessKey.DoesNotExist):\r\n return False", "def get_authenticated_agent(self):\n raise IllegalState()", "def _get_enable_peer_as_check(self):\n return self.__enable_peer_as_check", "def transit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"transit_encryption_enabled\")" ]
[ "0.70946044", "0.6600317", "0.5862487", "0.58282715", "0.577362", "0.5701615", "0.5661563", "0.5567852", "0.55177164", "0.55092674", "0.55005485", "0.54370195", "0.5430303", "0.53735715", "0.5352972", "0.53514665", "0.5348289", "0.5340672", "0.5339704", "0.5333827", "0.53211564", "0.5317445", "0.53099155", "0.5294105", "0.5293451", "0.5264012", "0.5261133", "0.5248625", "0.5242969", "0.52391094", "0.5233627", "0.52286106", "0.522143", "0.5203147", "0.5194423", "0.51880246", "0.51822716", "0.51769567", "0.51581967", "0.5156866", "0.5149827", "0.5143519", "0.5136433", "0.51222056", "0.51155114", "0.5112765", "0.5104248", "0.5104248", "0.5101371", "0.5101354", "0.50894487", "0.5082038", "0.50773406", "0.5077323", "0.5077323", "0.5076965", "0.5076714", "0.5072409", "0.5058869", "0.50522095", "0.50493103", "0.50241995", "0.5021298", "0.50183743", "0.50183743", "0.5013944", "0.501225", "0.5003597", "0.49993837", "0.499856", "0.49983194", "0.49983194", "0.49886188", "0.49884373", "0.49838656", "0.4982649", "0.4982262", "0.49796984", "0.49796984", "0.49796984", "0.49796984", "0.49796984", "0.49796984", "0.49796984", "0.49778065", "0.4976036", "0.49738646", "0.49710742", "0.49689633", "0.49641293", "0.49634334", "0.49618718", "0.49612302", "0.49601573", "0.49480727", "0.49475056", "0.49465415", "0.4943853", "0.4939066", "0.49384552" ]
0.6792563
1
Tests whether ``SoundboardSound.__repr__`` works as intended.
def test__SoundboardSound__repr(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240032 volume = 0.69 sound_id = 202305240033 guild_id = 202305240034 sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, available = available, emoji = emoji, name = name, user_id = user_id, volume = volume, ) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_repr(self):\n self.assertEqual(repr(self.deck), \"Deck of 52 cards.\")", "def test_repr(self):\n self.assertEqual(repr(self.card), \"A of Spades\")", "def test_repr(self):\n dummy = DummyCryptographicObject()\n repr(dummy)", "def test_repr_show(self):\n self.assertEquals(\n repr(self.t['CNNNN']),\n \"<Show Chaser Non-Stop News Network (CNNNN) (containing 2 seasons)>\"\n )", "def test_repr(self, r, rep):\n assert repr(r) == rep", "def test_repr():\n c = Circle(4) \n assert c.__repr__() == 'Circle(4)'", "def allow_repr(cls) -> bool:\n raise NotImplementedError", "def test_notification_repr(self) -> None:\n self.assertEqual(repr(self.notification1), \"<Notification 1>\")\n\n # pylint: disable=unnecessary-dunder-call\n self.assertEqual(self.notification1.__repr__(), \"<Notification 1>\")", "def test_repr(self):\n self.assertTrue(repr(self.obj1))\n self.assertTrue(repr(self.obj2))\n self.assertTrue(repr(self.obj3))\n self.assertTrue(repr(self.obj4))\n self.assertTrue(repr(self.obj5))", "def test_repr(self):\n\n char = Character.query.get(1111)\n expected = \"<Character Instance | ID: 1111 | Name: Mario | Game: Super Mario 64>\"\n\n self.assertEqual(expected, str(char))", "def test_repr() -> None:\n attrs = {\"this_attr\": True}\n fixed_time = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC, microsecond=432432)\n state = ha.State(\n \"sensor.temperature\",\n \"18\",\n attrs,\n last_changed=fixed_time,\n last_updated=fixed_time,\n )\n event = ha.Event(\n EVENT_STATE_CHANGED,\n {\"entity_id\": \"sensor.temperature\", \"old_state\": None, \"new_state\": state},\n context=state.context,\n time_fired=fixed_time,\n )\n assert \"2016-07-09 11:00:00+00:00\" in repr(States.from_event(event))\n assert \"2016-07-09 11:00:00+00:00\" in repr(Events.from_event(event))", "def test_repr(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(book), (\"Book(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']', \"\\\n \"'Penguin Group', 'New York', 'fiction')\"))", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def test_repr(self):\n obj = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n args = \"value={0}, opaque_type={1}\".format(\n binascii.hexlify(self.bytes_a), enums.OpaqueDataType.NONE)\n expected = \"OpaqueObject({0})\".format(args)\n observed = repr(obj)\n self.assertEqual(expected, observed)", "def test_repr_episode(self):\n self.assertEquals(\n repr(self.t['CNNNN'][1][1]),\n \"<Episode 01x01 - September 19, 2002 (20:30 - 21:00)>\"\n )", "def test_repr(self):\n for duration, repr_, _ in self.test_cases:\n self.assertEqual(repr(Rest(duration)), repr_)", "def test_repr(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\", \n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(resource), (\"Resource(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']')\"))", "def _repr_(self):\n return repr(self.element())", "def __repr__(self):", "def __repr__(self):", "def __repr__(self):", "def test_reprMethod(self):\n self.assertEqual(\n repr(task.LoopingCall(TestableLoopingCall.__init__)),\n \"LoopingCall<None>(TestableLoopingCall.__init__, *(), **{})\")", "def test_reprSanity(self):\n repr(MessageSet(1, 2))", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def test_glass_repr__returns_expected_value():\n glass = moet.create_glass(\"A\")\n assert \"moet.glass.Glass(uid=A, pos=None)\" in repr(glass)", "def test_repr(self):\n fz = FourierZernikeBasis(L=6, M=3, N=0)\n s = str(fz)\n assert \"FourierZernikeBasis\" in s\n assert \"ansi\" in s\n assert \"L=6\" in s\n assert \"M=3\" in s\n assert \"N=0\" in s", "def test_repr(self, cosmo_cls, cosmo):\n r = repr(cosmo)\n\n # class in string rep\n assert cosmo_cls.__qualname__ in r\n assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing\n r = r[len(cosmo_cls.__qualname__) + 1:] # remove\n\n # name in string rep\n if cosmo.name is not None:\n assert f\"name=\\\"{cosmo.name}\\\"\" in r\n assert r.index(\"name=\") == 0\n r = r[6 + len(cosmo.name) + 3:] # remove\n\n # parameters in string rep\n ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}\n cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__}\n for k, v in ps.items():\n sv = format(v, cps[k].format_spec if v is not None else '')\n assert (k + '=' + sv) in r\n assert r.index(k) == 0\n r = r[len((k + '=' + sv)) + 2:] # remove", "def test_repr(self, cosmo_cls, cosmo):\n FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo)\n\n # test eliminated Ode0 from parameters\n assert \"Ode0\" not in repr(cosmo)", "def test_repr(self):\n \n from pystarlab.starlab import Scale\n scale = Scale(c=True, m=1, r=1)\n self.assertEquals(\"scale -c -e 0 -m 1 -r 1 \", repr(scale))", "def __repr__(self):\n return self.display()", "def test_repr(self):\n tab = widgets.StaticTab(\n id=u'id',\n title=u'Title',\n content=u'A content.')\n self.assertEquals(\n repr(tab),\n \"<StaticTab id=u'id' title=u'Title' selected=False group=None>\")", "def test_log_repr(self) -> None:\n self.assertEqual(repr(self.log1), \"<Log 1>\")\n\n # pylint: disable=unnecessary-dunder-call\n self.assertEqual(self.log1.__repr__(), \"<Log 1>\")", "def test_repr(post_factory):\n post = post_factory()\n expected = (\n f\"Post(author_id={repr(post.author.id)}, slug={repr(post.slug)})\"\n )\n\n assert repr(post) == expected", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def _repr_(self):\n return self._description", "def __repr__(self):\n str(self)", "def _repr_(self):\n return repr(self.label())", "def test_repr(self):\n self.assertEquals(\n repr(self.tabView),\n \"<TabView topLevel=False tabs=%r>\" % self.tabs)", "def __repr__(self):\n return super().__repr__()", "def test_class_repr_method(self, test_instances):\n a, b, c = test_instances\n repr_a = repr(a)\n repr_b = repr(b)\n repr_c = repr(c)\n\n assert \"<class 'pymonzo.utils.CommonMixin'>\" in repr_a\n assert \"<class 'pymonzo.utils.CommonMixin'>\" in repr_b\n assert \"<class 'tests.test_utils.ExampleClass'>\" in repr_c\n\n if six.PY2:\n # We don't know the `__dict__` order so let's do it in parts\n parts = [\"u'_hidden': 1\", \"u'foo': u'foo'\", \"u'bar': True\"]\n\n for part in parts:\n assert part in repr_a\n assert part in repr_b\n assert part in repr_c\n else:\n # We don't know the `__dict__` order so let's do it in parts\n parts = [\n \"'_hidden': 1\", \"'foo': 'foo'\", \"'bar': True\",\n ]\n\n for part in parts:\n assert part in repr_a\n assert part in repr_b\n assert part in repr_c", "def test_repr_magic_method():\n LINES = (\n \"One morn before me were three figures seen,\",\n \"And once more came they by:-alas! wherefore?\",\n )\n for line in LINES:\n assert(repr(LineBuilder(line))\n == \"LineBuilder('\" + line + \"')\")", "def test_reprs(self):\n\n #test User repr\n charlie = User.query.get(1)\n self.assertEqual(\n charlie.__repr__(),\n \"<User Charlie Dog id: 1>\")\n\n #test Chart repr\n chart = Chart.query.get(1)\n self.assertEqual(\n chart.__repr__(),\n \"<Chart id: 1 users: [u'Charlie Dog', u'Maisey Puppy']>\")\n\n #test Star repr\n star = Star.query.get(1)\n self.assertEqual(\n star.__repr__(),\n \"<Star id: 1 from: Charlie Dog to: Maisey Puppy on 2016-12-31>\")", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def __repr__(self):\n # type: () -> str\n return self.to_str()", "def test_repr_format(self):\n t = Enumerate([2, \"asfa\", \"ipsi\"])\n assert t.repr_format(\"asfa\") == \"Enumerate(asfa)\"", "def repr(self):\n raise InterpreterError(\"Unimplemented repr()\")", "def repr(x) -> String:\n pass", "def test_repr_season(self):\n self.assertEquals(\n repr(self.t['CNNNN'][1]),\n \"<Season instance (containing 9 episodes)>\"\n )", "def repr_(object_):\n return repr(object_)", "def srepr(obj):\n return repr(str(obj))", "def test_display_method2(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r2 = Rectangle(2, 2)\n r2.display()\n sys.stdout = sys.__stdout__\n desired = '##\\n##\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)", "def test_repr():\n assert_equal(repr(Vector(1, 2)), 'Vector(1, 2)')", "def __repr__(self):\n return NotImplemented", "def __repr__(self):\n return self._format() if self.always_visible or not self.is_pointless() else ''", "def __repr__(self):\n # this is a good default __repr__\n # Q: using the docs, can you figure out what this is doing?\n return f\"<{type(self).__name__} {self.__dict__}>\"", "def __repr__(self):\r\n return self.__str__()", "def __repr__(self):\n pass", "def test_node__repr():\n a_left = Node(7, data='pl left')\n a_right = Node(42, data='pl right')\n a = Node(13, data='pl a', left=a_left, right=a_right)\n repr_a = repr(a)\n expect_repr = '<Node | Val: 13 | Data: pl a | Left: 7 | Right: 42>'\n assert expect_repr == repr_a", "def test_repr_ef(self):\n self.assertEqual(self.ns, eval(f\"{self.ns!r}\"))", "def __repr__(self):\n raise NotImplementedError", "def __repr__(self):\n raise NotImplementedError", "def __repr__(self):\n raise NotImplementedError", "def __repr__(self):\n raise NotImplementedError(\"Not implemented\")", "def __repr__(self):\n return self.to_str()", "def test_reprFunction(self):\n self.assertEqual(repr(task.LoopingCall(installReactor, 1, key=2)),\n \"LoopingCall<None>(installReactor, *(1,), **{'key': 2})\")", "def test__ActivityMetadataBase__repr():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_instance(repr(activity_metadata), str)", "def test_display__method(self):\n Rectangle.reset_objects()\n s1 = Square(5)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n s1.display()\n self.assertEqual(f.getvalue(), \"#####\\n#####\\n#####\\n#####\\n#####\\n\")", "def test_repr_method(self):\n\n u = User(\n email=\"test@test.com\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\"\n )\n\n u.id = 9999\n\n db.session.add(u)\n db.session.commit()\n\n # Method should return: User <User #{self.id}: {self.username}, {self.email}>\n msg = self.u1.__repr__()\n self.assertEqual(1, 1)\n\n ### Following tests ###", "def __repr__(self) -> str:\n\t\t\n\t\trepr = \"\"\n\t\tfor row in self.board:\n\t\t\tfor element in row:\n\t\t\t\tif element:\n\t\t\t\t\trepr = repr + \"o \"\n\t\t\t\telse:\n\t\t\t\t\trepr = repr + \"@ \"\n\t\t\trepr = repr + \"\\n\"\n\t\treturn repr", "def test_repr(self):\n certificate = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n\n certificate_type = \"certificate_type={0}\".format(\n str(self.certificate_type_b))\n certificate_value = \"certificate_value=b'{0}'\".format(\n str(self.certificate_value_b))\n\n expected = \"Certificate({0}, {1})\".format(\n certificate_type, certificate_value)\n observed = repr(certificate)\n\n msg = \"\\nexpected:\\n{0}\\nobserved:\\n{1}\".format(expected, observed)\n self.assertEqual(expected, observed, msg)\n\n # NOTE (peter-hamilton) Testing with eval won't work due to null bytes.", "def test_display_method1(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r1 = Rectangle(4, 6)\n r1.display()\n sys.stdout = sys.__stdout__\n desired = '####\\n####\\n####\\n####\\n####\\n####\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)", "def test_repr(self):\n zone = Zone('test.example.com')\n record = Record(zone, \"test-record\", {'type': 'A', 'ttl': 300})\n self.assertEqual(f'{record}', 'Record<A, test-record>')", "def _printable(self):\n pass", "def test_updated_display3(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r3 = Rectangle(3, 2, 0, 1)\n r3.display()\n sys.stdout = sys.__stdout__\n desired = '\\n###\\n###\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)", "def __repr__(self):\n repr = \"<BBPlayer %s at %s>\" % (self.name, hex(id(self)))\n return repr", "def __repr__(self):\t\n\t\treturn arabicRepr.repr(self.__dict__);", "def __repr__(self):\r\n return self.to_str()", "def __repr__(self):\r\n return self.to_str()", "def test_repr_format(self):\n t = Identity()\n assert t.repr_format(\"asfa\") == \"asfa\"", "def test_repr_method(self):\n _name = 'test-name'\n _el = MarkerId(_name)\n self.assertEqual(_el.__repr__(), \"<MarkerId: %s>\" % _name)", "def __repr__(self) -> str:\n return \"{}({!r}, {!r}, {!r})\".format(\n self.__class__.__name__,\n \"\".join(self.tape),\n self.blank_symbol,\n self.current_position,\n )", "def test_updated_display1(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r1 = Rectangle(2, 3, 2, 2)\n r1.display()\n sys.stdout = sys.__stdout__\n desired = '\\n\\n ##\\n ##\\n ##\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self):\n return repr(self.__dict__['_obj'])", "def __repr__(self):\n return \"<instance of {}>\".format(self.__class__.__name__)", "def test_repr(self):\n qg = ConcentricGrid(2, 3, 4)\n s = str(qg)\n assert \"ConcentricGrid\" in s\n assert \"jacobi\" in s\n assert \"L=2\" in s\n assert \"M=3\" in s\n assert \"N=4\" in s", "def test_updated_display4(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r4 = Rectangle(3, 2, 0, 0)\n r4.display()\n sys.stdout = sys.__stdout__\n desired = '###\\n###\\n'\n self.assertEqual(capturedOutput.getvalue(), desired)", "def __str__(self):\r\n return repr(self)", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()", "def __repr__(self):\n return self.__str__()" ]
[ "0.6914809", "0.6808723", "0.66963106", "0.6534174", "0.6505421", "0.64757746", "0.6391462", "0.6254702", "0.6244902", "0.6209125", "0.6178", "0.6166957", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6116914", "0.6116562", "0.6096828", "0.60628986", "0.60588056", "0.6048957", "0.6048957", "0.6048957", "0.60472554", "0.6041102", "0.5974292", "0.5974292", "0.5968777", "0.59686023", "0.5950879", "0.5946966", "0.59256124", "0.5897471", "0.5893889", "0.5868422", "0.5867867", "0.5865553", "0.5864804", "0.58464235", "0.5828465", "0.5825539", "0.5816437", "0.5809053", "0.58061904", "0.57952696", "0.57818246", "0.57818246", "0.57818246", "0.57790697", "0.57716566", "0.57632035", "0.57581294", "0.57574475", "0.575503", "0.5753189", "0.5730392", "0.5730188", "0.5728156", "0.57248425", "0.5724584", "0.57008874", "0.5700458", "0.5698105", "0.5693948", "0.5693948", "0.5693948", "0.56917137", "0.56831956", "0.56737494", "0.5664351", "0.5652273", "0.5644786", "0.56433284", "0.56391525", "0.562401", "0.5607336", "0.5590028", "0.5577699", "0.5571292", "0.5565837", "0.556388", "0.556388", "0.5559018", "0.5558682", "0.5554054", "0.5549401", "0.5547373", "0.5547373", "0.5547373", "0.5547373", "0.5546302", "0.5542348", "0.5540679", "0.55385774", "0.553687", "0.55346465", "0.55346465", "0.55346465" ]
0.77653193
0
Tests whether ``SoundboardSound.__hash__`` works as intended.
def test__SoundboardSound__hash(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240035 volume = 0.69 sound_id = 202305240036 guild_id = 202305240037 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_instance(repr(sound), str) sound = SoundboardSound(**keyword_parameters) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__SoundboardSound__eq():\n available = False\n emoji = BUILTIN_EMOJIS['heart']\n name = 'rember'\n user_id = 202305240038\n volume = 0.69\n \n sound_id = 202305240039\n guild_id = 202305240040\n \n keyword_parameters = {\n 'available': available,\n 'emoji': emoji,\n 'name': name,\n 'user_id': user_id,\n 'volume': volume,\n }\n \n sound = SoundboardSound.precreate(\n sound_id,\n guild_id = guild_id,\n **keyword_parameters,\n )\n \n vampytest.assert_eq(sound, sound)\n vampytest.assert_ne(sound, object())\n \n test_sound = SoundboardSound(**keyword_parameters,)\n \n vampytest.assert_eq(sound, test_sound)\n \n for field_name, field_value in (\n ('available', True),\n ('emoji', BUILTIN_EMOJIS['x']),\n ('name', 'happy day'),\n ('user_id', 202305240041),\n ('volume', 0.70),\n ):\n test_sound = SoundboardSound(**{**keyword_parameters, field_name: field_value})\n vampytest.assert_ne(test_sound, sound)", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def __hash__(self) -> int:", "def __hash__(self):\n return 0", "def check_hashable(self, setup):\n try:\n hash(setup)\n except TypeError as e:\n raise AssertionError(f\"setup object is not hashable:\\n{setup}\") from e", "def __hash__(self):\n\n return hash(str(self.board))", "def test_channel_hash(self):\n acq_channel_1 = AcquireChannel(123)\n acq_channel_2 = AcquireChannel(123)\n\n hash_1 = hash(acq_channel_1)\n hash_2 = hash(acq_channel_2)\n\n self.assertEqual(hash_1, hash_2)", "def __hash__(self) -> int:\n ...", "def __hash__(self) -> int:\n ...", "def __hash__(self):\n\t\treturn 1", "def __hash__(self):\n return hash(self.hash)", "def __hash__(self):\n return super().__hash__()", "def __hash__(self):\n return hash((self.SYMBOL, self._.hash_parameters))", "def __hash__(self):\n return hash((self._im_func, self._im_self_ref, self._im_class))", "def test__ActivityMetadataBase__hash():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_instance(hash(activity_metadata), int)", "def __hash__(self):\r\n return hash(type(self)) ^ hash(self.broadcastable)", "def __hash__(self):\n return hash(self.returnBoard())", "def __hash__(self):\n return self.word.__hash__()", "def test_random_matgame_hash_eq(strats):\n payoffs = rand.random(tuple(strats) + (len(strats),))\n matg = matgame.matgame(payoffs)\n\n copy = matgame.matgame_copy(matg)\n assert hash(copy) == hash(matg)\n assert copy == matg\n\n game = paygame.game_copy(matg)\n copy = matgame.matgame_copy(game)\n assert hash(copy) == hash(matg)\n assert copy == matg", "def __hash__(self):\n raise NotImplementedError", "def __hash__(self):\n return hash(tuple([self.get_rank(), self.get_suit()]))", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def fixed(o):\n try:\n hash(o)\n except TypeError:\n return False\n return True", "def __hash__(self):\r\n return hash(self.__key())", "def __hash__(self):\n return hash(self.__uuid)", "def __hash__(self):\n return hash(self.__uuid)", "def __hash__(self):\n return hash(tuple(self.sig))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self.__id__))", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return self.code_data.__hash__()", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash((self.benchmark, self.name))", "def checkHash(song):\n\tsql = \"Select path, filename, hash from songs where hash = '\" + song.hash + \"';\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tnotexists = True\n\tfor (path, filename, hash) in c:\n\t\tif hash == song.hash:\n\t\t\tnotexists = False\n\t\telse:\n\t\t\tnotexists = True\n\treturn notexists", "def __hash__(self):\n return hash(tuple(self._sub_effects))", "def __hash__( self ):\n return hash( self.data )", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def __hash__(self):\n return self.to_hash()", "def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))", "def __hash__(self):\n return hash(self.name)", "def __hash__(self):\n return hash(self.name)", "def __hash__(self):\n\t\treturn hash(repr(self))", "def __hash__(self):\n return hash(id(self))", "def __hash__(self):\n return hash(id(self))", "def __hash__(self):\n return hash(self.sequence)", "def __hash__(self):\n return hash((self._nele, self._m_s))", "def __hash__(self) -> int:\n return hash(self.__key__())", "def __hash__(self): # hash unico para cada movimento\n return hash(self.move) ^ hash(self.score) ^ hash(self.color)", "def __hash__(self):\n return hash((self.type, self.data))", "def __Hash(self):\n return self._Hash()", "def __hash__(self):\n\n return int(self._hash_value_)", "def _validate_hash(data, shasum):\n from hashlib import sha1\n digest = sha1(data).hexdigest()\n if digest == shasum:\n return True\n else:\n print('Invalid shasum, got: {} , expected: {}'.format(digest, shasum))\n return False", "def __hash__(self):\n return hash(self.seq)", "def __hash__(self) -> int:\n return hash(self.identifier)", "def __hash__(self) -> int:\n return hash(repr(self))", "def __hash__(self) -> int:\n return hash(tuple(self.name,))", "def __hash__(self):\n return self.value.__hash__()", "def __hash__(self):\r\n return hashtype(self) ^ hash(self.dtype) ^ hash(self.broadcastable)", "def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False", "def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash", "def valid(self):\n return self.hash.to_int('little') < self.target", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n return hash((self.benchclass, self.name))", "def __hash__(self):\n return hash(self._tp__get_typed_properties())", "def __hash__(self):\n return hash(self.floatvalue)", "def __hash__(self):\n return hash(self._callback_ref)", "def __hash__(self) -> int:\n return hash(self._id)", "def __hash__(self):\n if self.quality is None:\n return hash(md5(self.id.encode('UTF-8') + b'\\0' +\n self.sequence.encode('UTF-8')).digest())\n else:\n return hash(md5(self.id.encode('UTF-8') + b'\\0' +\n self.sequence.encode('UTF-8') + b'\\0' +\n self.quality.encode('UTF-8')).digest())", "def hash(self):\n return self.__hash__()", "def __hash__(self):\n\n return hash(self._key)", "def __hash__(self):\n return hash((super().__hash__(), self.permeability))", "def __hash__(self):\n return hash(self.label())", "def __hash__(self):\n obj = (self.make, self.model, self.year, self.mpg)\n return hash(obj)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash(self.id)", "def __hash__(self):\n return hash((self.name, self.state))", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __hash__(self):\n return self._id", "def __hash__(self) -> int:\n return hash(self._hashable_content())", "def __hash__(self) -> int:\r\n return hash((hash(self.data), self.version, self.compatibilityLimit,\r\n self.script, self.seriesSignature, self.pha,\r\n self.identityInfo, self.message))", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def test_find_hash(twitter, message, expected):\n assert twitter.find_hash(message) == expected", "def __hash__(self):\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)", "def __hash__(self):\n # These entities are not cached, so we wont use their `id` if applicable.\n hash_value = 0\n \n # bot\n hash_value ^= hash(self.bot)\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def test__ChannelMetadataGuildMainBase__hash():\n parent_id = 202209180092\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202209180093, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n channel_metadata = ChannelMetadataGuildMainBase(\n parent_id = parent_id,\n name = name,\n permission_overwrites = permission_overwrites,\n position = position,\n )\n \n vampytest.assert_instance(hash(channel_metadata), int)", "def __eq__(self, other):\n if not isinstance(other, HandwrittenSignature):\n return False\n\n return self.__dict__ == other.__dict__", "def __hash__(self):\n x = xxhash.xxh64()\n x.update(self.puzzle)\n return x.intdigest()", "def __hash__(self):\n return hash(self.idl)" ]
[ "0.64707255", "0.63552177", "0.6180331", "0.61213714", "0.6096645", "0.60493755", "0.60439104", "0.603657", "0.603657", "0.60337245", "0.6027019", "0.6023845", "0.60236806", "0.59823734", "0.5976516", "0.5973415", "0.59712046", "0.59241724", "0.58853847", "0.5853793", "0.5851253", "0.58267725", "0.5819978", "0.5814218", "0.5810652", "0.5810652", "0.5806252", "0.5798702", "0.5798702", "0.5798702", "0.57956636", "0.5790008", "0.5790008", "0.5790008", "0.5790008", "0.57859457", "0.5766911", "0.5766911", "0.5766911", "0.5765899", "0.57596153", "0.5745226", "0.57442755", "0.57424736", "0.5741634", "0.57380724", "0.57368743", "0.57368743", "0.5734792", "0.57327396", "0.57327396", "0.5730465", "0.57136416", "0.57126", "0.57045656", "0.56969315", "0.569068", "0.5677607", "0.5675853", "0.5669072", "0.56683916", "0.5668335", "0.56637615", "0.56636775", "0.5655523", "0.5649773", "0.5639078", "0.5636748", "0.5629348", "0.56138074", "0.5612067", "0.55968153", "0.5589918", "0.5586067", "0.558461", "0.55839664", "0.557934", "0.5575915", "0.556682", "0.5560925", "0.5559574", "0.5559574", "0.5559574", "0.5559574", "0.55530673", "0.5551287", "0.5551287", "0.5551287", "0.5549955", "0.5540264", "0.5539916", "0.5539666", "0.5539666", "0.5537961", "0.5535621", "0.5521757", "0.55205804", "0.5507553", "0.55065566", "0.55020237" ]
0.78902173
0
Tests whether ``SoundboardSound.__eq__`` works as intended.
def test__SoundboardSound__eq(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240038 volume = 0.69 sound_id = 202305240039 guild_id = 202305240040 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_eq(sound, sound) vampytest.assert_ne(sound, object()) test_sound = SoundboardSound(**keyword_parameters,) vampytest.assert_eq(sound, test_sound) for field_name, field_value in ( ('available', True), ('emoji', BUILTIN_EMOJIS['x']), ('name', 'happy day'), ('user_id', 202305240041), ('volume', 0.70), ): test_sound = SoundboardSound(**{**keyword_parameters, field_name: field_value}) vampytest.assert_ne(test_sound, sound)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if not isinstance(other, NhlOddsScoringPlay):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DiarizeAudio):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AudioFrame):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)", "def __eq__(self, other) -> bool:\r\n if isinstance(other, Square):\r\n if (self.board, self.file, self.rank) == (\r\n other.board, other.file, other.rank):\r\n return True\r\n \r\n return False", "def __eq__(self, other : dumbEmoji) -> bool:\n return type(other) == dumbEmoji and self.sendable == other.sendable", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self, other):\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self,other):\n return self is other", "def __eq__(self, other):\n if not isinstance(other, CbbOddsGameOdd):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\r\n return self.__name == other.__name", "def __eq__(self, other):\r\n return self.__name == other.__name", "def equals(self, other): # -> bool:\n ...", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self, other):\n if not isinstance(other, BasicMediaInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return(\n self.name == other.name and\n self.hand == other.hand and\n self.score == other.score\n )", "def __eq__(self, other: object) -> bool:\n\n if not isinstance(other, self.__class__):\n return False\n\n if not self.simctl_type == other.simctl_type:\n return False\n\n return self.raw_info == other.raw_info", "def __eq__(self, other):\n\n return self.board == other.board", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\r\n return (type(self) == type(other) and\r\n other.broadcastable == self.broadcastable)", "def __eq__(self, other):\n return (self.name == other.name) and (self.wavelength_control == other.wavelength_control) \\\n and (self.gonio_angles == other.gonio_angles) and (self.wl_angles == other.wl_angles) \\\n and (self.wavelength_minimum == other.wavelength_minimum) \\\n and (self.wavelength_maximum == other.wavelength_maximum) \\\n and (self.wavelength_bandwidth == other.wavelength_bandwidth)", "def __eq__(self, other):\n if isinstance(other, Card):\n return self.color == other.color and self.value == other.value\n return False", "def __eq__(self, other: 'Monitor') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, *args):\n return _osgAnimation.SwigPyIterator___eq__(self, *args)", "def __eq__(self, other) -> bool:\n if not isinstance(other, Square):\n raise TypeError(\"Cannot compare Square object to object of different type\")\n\n return self.x_y == other.x_y", "def __eq__(self, other):\n return bool(_make._alpha_equal(self, other))", "def __eq__(self,other):\n if isinstance(other, self.__class__):\n return self.mac == other.mac\n else: return False", "def __eq__(self, other):\n pass", "def __eq__(self, other):\n pass", "def __eq__(self, other):\n return self.abs2phy.__eq__(other)", "def __eq__(self, other):\n return self.abs2phy.__eq__(other)", "def __eq__(self, other):\n if isinstance(other, _MethodConnection):\n return (self._im_func == other._im_func and\n self._im_self_ref == other._im_self_ref and\n self._im_class == other._im_class)\n return False", "def test_eq(self):\n dummy = DummyCryptographicObject()\n self.assertTrue(dummy == dummy)", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n if not isinstance(other, VoiceTestDefinition):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\r\n if self.over in other and self.under in other:\r\n return True\r\n else:\r\n return False", "def __eq__(self, other):\n return self.is_(other)", "def __eq__(self, other):\n return self.times == other.times", "def __eq__(self, other):\n if not isinstance(other, Alert):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, name):\n return self.name == name", "def __eq__(self, frac):\n return self.equal == frac.equal", "def __eq__(self, other: Any) -> bool:\n\n if isinstance(other, Signal):\n return all(\n [\n np.array_equal(self._domain, other.domain),\n np.array_equal(self._range, other.range),\n self._interpolator is other.interpolator,\n self._interpolator_kwargs == other.interpolator_kwargs,\n self._extrapolator is other.extrapolator,\n self._extrapolator_kwargs == other.extrapolator_kwargs,\n ]\n )\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, EventSubscription):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, *args):\n return _ida_hexrays.cswitch_t___eq__(self, *args)", "def __eq__(self, *args):\n return _ida_frame.stkpnt_t___eq__(self, *args)", "def __eq__(self, other) -> None:\n\t\tfor k, v in enumerate(self.board):\n\t\t\tif v != other.board[k]:\n\t\t\t\treturn False\n\t\treturn True", "def __eq__(self, other):\n\n if type(other) != type(self):\n return False\n if other.description != self.description:\n return False\n if other.func != self.func:\n return False\n return True", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other: Event) -> bool:\n return self.timestamp == other.timestamp", "def __eq__(self, other):\n if isinstance(other, Quaternion):\n return self.__real == other.__real and (self.__img == other.__img).all()\n return False", "def __eq__(self, other):\n return np.array_equal(\n self.np_floats(),\n other.np_floats()) and np.array_equal(\n self.np_ints(),\n other.np_ints()) and np.array_equal(\n self.freqs,\n other.freqs)", "def __eq__(self, other: t.Any) -> bool:\n return self._op_bool('__eq__', other)", "def __eq__(self, other):\n return type(other) is type(self) and other.color == self.color", "def __eq__(self, oth):\n return int(self) != oth", "def __eq__(self, other):\n return isinstance(other, self.__class__)", "def __eq__(self, p_object):\n if any([self[i] != p_object[i] for i in range(9)]):\n return False\n return True", "def __eq__(self, other):\n return self.is_red() == other.is_red()" ]
[ "0.7053136", "0.69082266", "0.68528724", "0.6663054", "0.6607504", "0.65843195", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.6562754", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.6549555", "0.64797044", "0.64797044", "0.64464974", "0.6430881", "0.64232206", "0.64232206", "0.6398154", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6378347", "0.6376513", "0.63345045", "0.6332525", "0.6331471", "0.6330687", "0.63168937", "0.63105917", "0.6305952", "0.62763965", "0.62661934", "0.62649566", "0.6260623", "0.6252648", "0.62512195", "0.62512195", "0.6233617", "0.6233617", "0.62290305", "0.6209636", "0.62045026", "0.61957705", "0.6193682", "0.61931336", "0.61909986", "0.61871845", "0.61862636", "0.6178862", "0.61780435", "0.6177934", "0.617655", "0.615803", "0.6156647", "0.6156217", "0.61526144", "0.6148363", "0.61290765", "0.6121717", "0.61161286", "0.61157393", "0.61116946", "0.6107948", "0.6101771", "0.6095533", "0.60935014" ]
0.7741757
0
Getter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _get_usr_ping_count(self): return self.__usr_ping_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def get_num_psus(self):\n return len(self._psu_list)", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def count():\r\n return User.query.count()", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def count_user_push(username):\n public_activities = json.loads(query_user_activities(username))\n push_count = 0\n for activity in public_activities:\n if activity['type'] == 'PushEvent':\n push_count += 1\n return 'Total push count: ' + str(push_count)", "def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetCount(self, label)", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)", "def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def com_adobe_aem_screens_player_pingfrequency(self) -> ConfigNodePropertyInteger:\n return self._com_adobe_aem_screens_player_pingfrequency", "def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetCount(self, label)", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def inbox_count_for(user):\n return Message.inbox.for_user(user).unread().count()", "def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })", "def get_historic_users_count():\n return User.objects.all().count()", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUC3_GetCount(self, label)", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuCount', self.handle)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetCount(self, label)", "def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetCount(self, label)", "def get_online_count():\n return dict(online_user=get_online_users())", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])", "def user_counts(user_id):\n return _fetch_records(f\"SELECT rating_type, count FROM counts_by_rating_type WHERE user_id = {user_id} AND count > 0\")", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetCount(self, label)", "def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for localisation in self.localisations.all()\n for ownership in localisation.ownerships.all()\n if ownership.owner_id == user.id\n )\n\n return self.localisations.aggregate(\n card_count=Sum(\n Case(\n When(ownerships__owner=user, then=\"ownerships__count\"),\n output_field=IntegerField(),\n default=0,\n )\n )\n )[\"card_count\"]", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUC3_GetCount(self, label)", "def wordcount(self):\n return int(self._fetch_element('user_wordcount'))", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_GetCount(self, label)", "def countPlayers():\n\n conn, c = main.connect()\n c.execute(\"SELECT count(*) FROM player\")\n\n return c.fetchone()[0]", "async def connected_user_count(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: connected_user_count: count: \" + str(event[\"connected_user_count\"]))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_CONNECTED_USER_COUNT,\n\t\t\t\t\"connected_user_count\": event[\"connected_user_count\"]\n\t\t\t},\n\t\t)", "def get_user_ownership_count(\n self, user: get_user_model(), prefetched: bool = False\n ) -> int:\n if prefetched:\n return sum(\n ownership.count\n for card_printing in self.printings.all()\n for localisation in card_printing.localisations.all()\n for ownership in localisation.ownerships.all()\n if ownership.owner_id == user.id\n )\n\n return self.printings.aggregate(\n card_count=Sum(\n Case(\n When(\n localisations__ownerships__owner=user,\n then=\"localisations__ownerships__count\",\n ),\n output_field=IntegerField(),\n default=0,\n )\n )\n )[\"card_count\"]", "def countPlayers():\n DB = dbc()\n c = DB.cursor()\n c.execute('SELECT COUNT(*) from players WHERE active = 1')\n total = c.fetchone()\n DB.close()\n return int(total[0])", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUC3_GetCount(self, label)", "def get_os_uptime(self):\n\t\treturn call_sdk_function('PrlStat_GetOsUptime', self.handle)", "def countPlayers():\n conn, c = connect()\n c.execute(\"SELECT COUNT(*) FROM players;\")\n return c.fetchone()[0]", "def countPlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT count(*) FROM players\")\n playerCount = cursor.fetchone()[0]\n conn.close()\n return playerCount", "def get_followers_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'followers')", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "def get_total_session_count(self) -> int:\n return self.streams_count", "def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuCount', self.handle)", "def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)", "def load_user_from_json(path: str) -> Tuple[str, int, int]:\n with lzma.open(path + '.json.xz') as f:\n js = json.load(f)\n return (js['node']['owner']['username'], js['node']['owner']['edge_followed_by']['count'],\n js['node']['edge_media_preview_like']['count'])", "def get_total_ppresults(self):\n\n self._logger.debug(\"Getting ping pong results count\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT COUNT(result_id) FROM pp_result\")\n count = cursor.fetchone()[0]\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return count", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetCount(self, label)", "async def get_player_total(user_id):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT total FROM blackjack.currentstatus WHERE userid = $1\", user_id))", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def get_amount_users() -> User:\n return User.objects.all().count()", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3ISS3_GetCount(self, label)", "def countPlayers():\n conn = connect()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM players\")\n players = int(cur.fetchone()[0])\n conn.close()\n return players", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def countPlayers():\n conn, cur = connect()\n query = \"SELECT count(*) AS player_count FROM players;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player count from players table\")\n num_players = cur.fetchone()\n conn.close()\n return num_players['player_count']", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def do_get_user_comment_count(parser, token):\r\n error_message = \"%r tag must be of format {%% %r for OBJECT as CONTEXT_VARIABLE %%}\" % (token.contents.split()[0], token.contents.split()[0])\r\n try:\r\n split = token.split_contents()\r\n except ValueError:\r\n raise template.TemplateSyntaxError, error_message\r\n if len(split) != 5:\r\n raise template.TemplateSyntaxError, error_message\r\n return UserCommentCountNode(split[2], split[4])", "def bytes_used(self):\n return int(self.status[\"pgmap\"][\"bytes_used\"])", "def get_kudos_given_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_given')", "def num_gpus():\n count = ctypes.c_int()\n check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))\n return count.value", "def get_count(username):\n return get_contributor(username)[\"count\"]", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def number_of_players(self) -> int:\n return self.param.number_of_players", "async def osu(self, ctx, u, mode=0):\n if self.api:\n mode = self.osu_mode_converter(mode=mode)\n if mode is None:\n raise utils.errors.ServiceError(\"Unknown mode\")\n user = await self.api.get_user(u, mode=mode)\n try:\n user = user[0]\n pp = user.pp_raw # CHAR SAVING.\n ss = user.count_rank_ss\n s = user.count_rank_s\n a = user.count_rank_a\n except IndexError:\n return await ctx.send(\"User does not exist, \" \"maybe try one that does\")\n else:\n raise utils.errors.ServiceError(\"osu! api key not configured\")\n osu_embed = discord.Embed(title=f\"osu! stats\", colour=0x690E8)\n osu_embed.set_author(\n name=f\"{u} ({user.country}\"\n f\" #{user.pp_country_rank}, global\"\n f\" #{user.pp_rank})\",\n icon_url=\"https://osu.ppy.sh/images/flags/\" f\"{user.country}.png\",\n )\n osu_embed.set_thumbnail(url=f\"https://a.ppy.sh/{user.user_id}\")\n osu_embed.add_field(name=\"Play count\", value=user.playcount)\n osu_embed.add_field(name=\"Ranked score\", value=user.ranked_score)\n osu_embed.add_field(name=\"Total score\", value=user.total_score)\n osu_embed.add_field(name=\"Level\", value=int(user.level))\n osu_embed.add_field(name=\"Total PP\", value=f\"{round(pp, 2)} PP\")\n osu_embed.add_field(name=\"Accuracy\", value=f\"{user.accuracy:.1f}%\")\n osu_embed.add_field(name=\"Plays (SS/S/A)\", value=f\"{ss}/{s}/{a}\")\n await ctx.send(embed=osu_embed)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_GetCount(self, label)", "def rtt_get_num_up_buffers(self):\n cmd = enums.JLinkRTTCommand.GETNUMBUF\n dir = ctypes.c_int(enums.JLinkRTTDirection.UP)\n return self.rtt_control(cmd, dir)", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def get_usercounts(self):\n word_counts = defaultdict(int) # {}\n with open(self.filename) as f:\n for line in f:\n if line:\n username, words = self.get_username_words(line) # username1, cat dog\n num_words = len(words.split()) # 1\n word_counts[username] += num_words # {u1: 3, u2: 4, }\n return word_counts", "def cpu_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"cpu_count\")" ]
[ "0.7919465", "0.62490904", "0.5974101", "0.5872466", "0.5848729", "0.5829324", "0.58061343", "0.57633376", "0.5698802", "0.5670618", "0.55348474", "0.54909444", "0.54774773", "0.54751647", "0.54595673", "0.54140943", "0.5395396", "0.5368198", "0.5319455", "0.531552", "0.52972794", "0.52741754", "0.5245219", "0.5238117", "0.522497", "0.52011055", "0.51978695", "0.5192744", "0.5141922", "0.51149833", "0.5098746", "0.5086633", "0.5075276", "0.50567085", "0.50394785", "0.50337815", "0.5025277", "0.50150234", "0.49781194", "0.4972393", "0.49462026", "0.4938548", "0.4930883", "0.49250722", "0.49108106", "0.48975608", "0.4847605", "0.48400116", "0.4833803", "0.48175684", "0.48172632", "0.48017853", "0.47911245", "0.47789246", "0.47707024", "0.4768262", "0.4753605", "0.4746862", "0.47440583", "0.4743677", "0.4729163", "0.47113046", "0.47089064", "0.47044793", "0.469868", "0.46930602", "0.46889597", "0.4682547", "0.4678865", "0.46775553", "0.46774152", "0.4677279", "0.46730906", "0.4673086", "0.4672219", "0.46647516", "0.46640155", "0.46468365", "0.46457478", "0.46431237", "0.46405342", "0.46398163", "0.46296614", "0.46246573", "0.46221566", "0.46193063", "0.4617219", "0.46120757", "0.46102643", "0.46101764", "0.46008822", "0.4599935", "0.45958275", "0.45858085", "0.45779532", "0.45585328", "0.45583865", "0.45574793", "0.45561743", "0.45546776" ]
0.6955739
1
Setter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _set_usr_ping_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_ping_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_ping_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def get_num_psus(self):\n return len(self._psu_list)", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def count():\r\n return User.query.count()", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()", "def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def count_user_push(username):\n public_activities = json.loads(query_user_activities(username))\n push_count = 0\n for activity in public_activities:\n if activity['type'] == 'PushEvent':\n push_count += 1\n return 'Total push count: ' + str(push_count)", "def com_adobe_aem_screens_player_pingfrequency(self) -> ConfigNodePropertyInteger:\n return self._com_adobe_aem_screens_player_pingfrequency", "async def connected_user_count(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: connected_user_count: count: \" + str(event[\"connected_user_count\"]))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_CONNECTED_USER_COUNT,\n\t\t\t\t\"connected_user_count\": event[\"connected_user_count\"]\n\t\t\t},\n\t\t)", "def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def count(self, value):\n \n self._count = int(value)", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def count_user_bookmarks(username):\r\n total = BmarkMgr.count(username)\r\n stat = StatBookmark(\r\n attrib=USER_CT.format(username),\r\n data=total\r\n )\r\n DBSession.add(stat)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def AddUser(self, usercount, user):\n for i in range(usercount):\n login = string.replace(user[i]['Login'], ' ', '')\n home = self.__homeprefix + login[0] + '/' + login\n action = 'userman -A ' + login + ' -p ' + user[i]['Passwd'] + ' -u ' + str(user[i]['UID']) + \\\n ' -g ' + str(user[i]['GID']) + ' -H ' + home + ' -s ' + user[i]['Shell'] \n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })", "def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetCount(self, label)", "def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuCount', self.handle)", "def get_followers_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'followers')", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetCount(self, label)", "def get_historic_users_count():\n return User.objects.all().count()", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def people_count(self):\n return len(self.__users)", "def cmd_account_comment_count(client, args):\n account_comment_count = client.get_account_comment_count(args.username)\n generate_output({'account_comment_count': account_comment_count})", "def wordcount(self):\n return int(self._fetch_element('user_wordcount'))", "async def osu(self, ctx, u, mode=0):\n if self.api:\n mode = self.osu_mode_converter(mode=mode)\n if mode is None:\n raise utils.errors.ServiceError(\"Unknown mode\")\n user = await self.api.get_user(u, mode=mode)\n try:\n user = user[0]\n pp = user.pp_raw # CHAR SAVING.\n ss = user.count_rank_ss\n s = user.count_rank_s\n a = user.count_rank_a\n except IndexError:\n return await ctx.send(\"User does not exist, \" \"maybe try one that does\")\n else:\n raise utils.errors.ServiceError(\"osu! api key not configured\")\n osu_embed = discord.Embed(title=f\"osu! stats\", colour=0x690E8)\n osu_embed.set_author(\n name=f\"{u} ({user.country}\"\n f\" #{user.pp_country_rank}, global\"\n f\" #{user.pp_rank})\",\n icon_url=\"https://osu.ppy.sh/images/flags/\" f\"{user.country}.png\",\n )\n osu_embed.set_thumbnail(url=f\"https://a.ppy.sh/{user.user_id}\")\n osu_embed.add_field(name=\"Play count\", value=user.playcount)\n osu_embed.add_field(name=\"Ranked score\", value=user.ranked_score)\n osu_embed.add_field(name=\"Total score\", value=user.total_score)\n osu_embed.add_field(name=\"Level\", value=int(user.level))\n osu_embed.add_field(name=\"Total PP\", value=f\"{round(pp, 2)} PP\")\n osu_embed.add_field(name=\"Accuracy\", value=f\"{user.accuracy:.1f}%\")\n osu_embed.add_field(name=\"Plays (SS/S/A)\", value=f\"{ss}/{s}/{a}\")\n await ctx.send(embed=osu_embed)", "def get_cpus_stats_count(self):\n\t\treturn call_sdk_function('PrlStat_GetCpusStatsCount', self.handle)", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def inbox_count_for(user):\n return Message.inbox.for_user(user).unread().count()", "def sendUpStatCountTagCounts(node, tag):\n def pushUp(node):\n t = 0\n ta = 0\n for child in node.children:\n tc, tac = pushUp(child)\n ta += tac\n t += tc\n node.tagTranscriptAnnotations += ta\n node.tagTranscripts += t\n return node.tagTranscripts, node.tagTranscriptAnnotations\n if ':' in tag:\n tag = tag.split(':')[-1]\n pushUp(node)", "def getTopUsers(self):\n\n\t\tquery = \"\"\"select M.user_id, count( distinct M.venue_id) as cnt\n\t\t\t\t\tfrom\n\t\t\t\t\t(\n\t\t\t\t\tselect J.user_id, J.venue_id, J.latitude, J.longitude, J.Homelat, J.Homelong,\n\t\t\t\t\tCASE\n\t\t\t\t\t\tWHEN J.latitude = J.Homelat and J.longitude = J.Homelong THEN 1\n\t\t\t\t\t\tELSE 0\n\t\t\t\t\t\tEND bool\n\t\t\t\t\tfrom \n\t\t\t\t\t(\n\t\t\t\t\tselect C.*, U.latitude as Homelat, U.longitude as Homelong \n\t\t\t\t\tfrom checkins C\n\t\t\t\t\tleft join users U\n\t\t\t\t\ton U.id = C.user_id\n\t\t\t\t\t) J\n\t\t\t\t\twhere bool=0\n\t\t\t\t\t) M\n\t\t\t\t\tgroup by user_id\n\t\t\t\t\torder by cnt desc, user_id\n\t\t\t\t\tlimit {}\"\"\".format(self.top)\n\n\t\tdf = pd.read_sql_query(query, self.conn)\n\n\t\tself.topusers = df[\"user_id\"].tolist()", "def user_counts(user_id):\n return _fetch_records(f\"SELECT rating_type, count FROM counts_by_rating_type WHERE user_id = {user_id} AND count > 0\")", "def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuCount', self.handle)", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "async def _total_players(self, ctx: Context, number: int):\n\n await self.config.guild(ctx.guild).total_players.set(number)\n\n await ctx.send(_(\"Set total players to `{}`.\").format(number))", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def get_user_likes(self) -> int:\n return -1", "def count_upvotes(self):\n return self.filter(value=1).count()", "def countPlayers():\n db, cursor = connect()\n cursor.execute( \" SELECT count(*) as num FROM players \")\n count = cursor.fetchone()[0]\n return int(count)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetCount(self, label)", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def get_usercounts(self):\n word_counts = defaultdict(int) # {}\n with open(self.filename) as f:\n for line in f:\n if line:\n username, words = self.get_username_words(line) # username1, cat dog\n num_words = len(words.split()) # 1\n word_counts[username] += num_words # {u1: 3, u2: 4, }\n return word_counts", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetCount(self, label)", "def get_total_session_count(self) -> int:\n return self.streams_count", "def get_online_count():\n return dict(online_user=get_online_users())", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def __count_player(self):\n # Count up the player\n self.current_player += 1\n\n # Check is the self.current_player is a ghost player\n while self.current_player in self.ghost_players:\n self.current_player += 1\n\n # If the count is over 3 then reset to player 0 and count up the round\n if self.current_player > 3:\n self.current_player = 0\n self.round += 1", "def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)", "def users_like_changed(sender, instance, **kwargs):\n instance.total_likes = instance.users_like.count()\n instance.save()", "def num_gpus():\n count = ctypes.c_int()\n check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))\n return count.value", "def user_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes if x not in fixed_attributes]\n\n return dict(\n attributes=\",\".join(attributes),\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n **kwargs,\n )", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUC3_GetCount(self, label)", "def oneup_count(self):\n return self.oneups.filter(Oneup.state >= 0).count()", "def number_of_users(frame, frequency = 'M'):\n\n times = frame.groupby('user').apply(lambda x: x.inserted.values[0])\n times = times.reset_index()\n times = times.set_index(DatetimeIndex(times[0]))\n return times.resample(frequency,how=len).user", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def test_user_stats(self):\r\n res = self.testapp.get(u'/api/v1/stats/users',\r\n status=200)\r\n data = json.loads(res.body)\r\n self.assertTrue(\r\n 'count' in data,\r\n \"Should have user count: \" + str(data))\r\n self.assertTrue(\r\n 'activations' in data,\r\n \"Should have pending user activations: \" + str(data))\r\n self.assertTrue(\r\n 'with_bookmarks' in data,\r\n \"Should have count of users with bookmarks: \" + str(data))", "def number_users_solving_bites(self) -> int:\r\n users_solving_bites = {\r\n row['user']\r\n for row in self.rows\r\n if row['completed'] == 'True'\r\n }\r\n\r\n return len(users_solving_bites)", "def count_update_pool_size(self, count_update_pool_size: ConfigNodePropertyInteger):\n\n self._count_update_pool_size = count_update_pool_size" ]
[ "0.66030264", "0.6556253", "0.5894535", "0.568632", "0.56651974", "0.56644946", "0.5605921", "0.5584418", "0.55039364", "0.55039364", "0.5503759", "0.5493639", "0.54372156", "0.53541523", "0.5247895", "0.5217682", "0.5213357", "0.5187056", "0.516838", "0.5158767", "0.51152784", "0.5085967", "0.5063331", "0.50588334", "0.50533867", "0.5000247", "0.49808678", "0.49688262", "0.49194285", "0.49020582", "0.48850292", "0.48736855", "0.48502967", "0.48324072", "0.48300096", "0.47872385", "0.47837412", "0.4771179", "0.47657537", "0.47236422", "0.47105342", "0.4707528", "0.46989533", "0.46831542", "0.46796042", "0.46515226", "0.46505764", "0.4648545", "0.46232376", "0.4615512", "0.4614666", "0.4589962", "0.4579943", "0.45735046", "0.45690727", "0.45684993", "0.45567352", "0.4551709", "0.45486525", "0.4547299", "0.45457608", "0.45025587", "0.44991243", "0.44924477", "0.4491204", "0.4486254", "0.4478741", "0.4470307", "0.44694382", "0.44691214", "0.4469051", "0.44655424", "0.44619018", "0.44562435", "0.44508612", "0.4450725", "0.44419956", "0.4438126", "0.443426", "0.4431695", "0.44205287", "0.44178554", "0.4415901", "0.44124705", "0.43915653", "0.43897274", "0.4389673", "0.43878388", "0.43783623", "0.43764758", "0.4373142", "0.4371261", "0.43703124", "0.43668333", "0.43571183", "0.43534315", "0.4341676", "0.43378398", "0.43361363", "0.43356323" ]
0.8726256
0
Getter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _get_usr_traceroute_count(self): return self.__usr_traceroute_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def NoOfSRTunnels(self):\r\n\t\treturn self._get_attribute('noOfSRTunnels')", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def voterContactCount(self, user):\n return self.votercontact_set.filter(user=user).count()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count():\r\n return User.query.count()", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def get_users_count(self):\n try:\n roles = self.db_handler.get_roles_list()\n reply = ''\n\n for role_id, role_name in roles:\n reply += f'{role_name}ів - {self.db_handler.get_staff_count_by_role(role_id)[0]}\\n'\n\n return reply\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_roles_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'roles')", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "def get_total_session_count(self) -> int:\n return self.streams_count", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "def get_otu_counts(fpath):\r\n\r\n try:\r\n otu_table = parse_biom_table(open(fpath, 'U'))\r\n except (TypeError, IOError):\r\n raise MissingFileError('OTU table file required for this analysis')\r\n\r\n if (otu_table.ObservationMetadata is None or\r\n otu_table.ObservationMetadata[0]['taxonomy'] is None):\r\n raise ValueError(\r\n '\\n\\nThe lineages are missing from the OTU table. Make sure you included the lineages for the OTUs in your OTU table. \\n')\r\n\r\n return otu_table", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)", "def get_num_psus(self):\n return len(self._psu_list)", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret", "def inbox_count_for(user):\n return Message.inbox.for_user(user).unread().count()", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetCount(self, label)", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetCount(self, label)", "def utilization(user, ressource):\n if ressource == 'accounts':\n return Account.objects.filter(vhost__in=list(get_vhosts(user))).count()\n return None", "def count_user_trips(session_user_id):\n\n all_trips = Trip.query.filter(Trip.user_id == session_user_id).order_by(Trip.start_date).all()\n\n future_trips = 0\n past_trips = 0\n future_trips_list = []\n past_trips_list = []\n todays_date = datetime.now()\n\n for trip in all_trips:\n if trip.start_date <= todays_date:\n past_trips += 1\n past_trips_list.append(trip)\n else:\n future_trips += 1\n future_trips_list.append(trip)\n return [(future_trips, past_trips), (future_trips_list, past_trips_list)]", "def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)", "def run_and_return_counts(self, user, threshold, shots=10000):\n circuit = self.create_circuit(user, threshold, measurements=True, logical_barriers=False)\n\n job = execute(circuit, BasicAer.get_backend(\"qasm_simulator\"), shots=shots)\n\n results = job.result()\n\n return results.get_counts()", "def TunnelCount(self):\n if self.force_auto_sync:\n self.get('TunnelCount')\n return self._TunnelCount", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def LnsCount(self):\n if self.force_auto_sync:\n self.get('LnsCount')\n return self._LnsCount", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetCount(self, label)", "def getThreadCountForSLRUMNode(hostname, errStream):\n qhcmd = [\"sinfo\",\"-n\", hostname, \"-o\", '\"%15N %10c\"']\n process=subprocess.Popen(qhcmd,stdout=subprocess.PIPE)\n sinfoCPUsRE = re.compile(r'^\\S+\\s+(\\d+)')\n qhout=\"\"\n for line in process.stdout:\n qhout+=line\n m=sinfoCPUsRE.search(line)\n if m:\n slots = int(m.group(1))\n logging.debug(\"Node %s has %d slots\" % (hostname, slots))\n break\n else:\n slots=8\n logging.warning(\"Could not parse sinfo output:\\n%s\" % (qhout))\n return slots", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def get_historic_users_count():\n return User.objects.all().count()", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def GetPerVendorLocationCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['PerVendorLocations']", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def snmpqosqos_sch_sessions_regulated_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_regulated_count\n\t\texcept Exception as e:\n\t\t\traise e", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetCount(self, label)", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetNumberOfObjects(self)", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def ir_count(self) -> int:\n return int(self.graph_tuple_stats.ir_count or 0)", "def get_buildings_for_user_count(user):\n return BuildingSnapshot.objects.filter(\n super_organization__in=user.orgs.all(),\n canonicalbuilding__active=True,\n ).count()", "def nb_rhinoceros(self):\n return self.__nb_rhinoceros", "def uracil_count(RNAsequence):\n uracil = 0\n for nucleotide in RNAsequence:\n if nucleotide == 'U':\n uracil += 1\n return uracil", "def get_total_issues_per_user(issues):\n return get_total_contributions_per_user(issues, 'user')", "def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def get_ingredient_counts_by_user(cls, userid):\n\n QUERY = \"\"\"\n SELECT item, COUNT(item)\n FROM ingredients\n WHERE recipe_id IN (SELECT recipe_id FROM recipes WHERE user_id= :userid)\n GROUP BY item\n ORDER BY COUNT(item) DESC\n \"\"\"\n\n cursor = db.session.execute(QUERY, {'userid': userid})\n ingredients_count = cursor.fetchall()\n\n return ingredients_count", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetNumberOfObjects(self)", "def count_statements_by(self, actor_label):\n query = read_query('trust/count_statements_by') % actor_label\n response = self._submit_query(query)\n return response[0]['num_stat']['value']", "def vscr_ratchet_group_session_get_participants_count(self, ctx):\n vscr_ratchet_group_session_get_participants_count = self._lib.vscr_ratchet_group_session_get_participants_count\n vscr_ratchet_group_session_get_participants_count.argtypes = [POINTER(vscr_ratchet_group_session_t)]\n vscr_ratchet_group_session_get_participants_count.restype = c_uint\n return vscr_ratchet_group_session_get_participants_count(ctx)", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def get_top_users_total_distance_on_foot(self, number: int, offset: int = 0):\n return self._get_top_users_total_distance([ActivityType.Run, ActivityType.Walk], number, offset)", "def getTotalNumRRI(self):\n return self.analyze.tg_ecg_get_total_rri_count()", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def num_servos(self) -> int:\n return self._num_servos", "def target_lun_in_use_count(self):\n return self._target_lun_in_use_count", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetNumberOfObjects(self)", "def wordcount(self):\n return int(self._fetch_element('user_wordcount'))", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetCount(self, label)", "def totalTrips(analyzer):\n return model.totalTrips(analyzer)", "def get_Iu(uid):\n try:\n return len(trainset.ur[trainset.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0", "def spine_switch_count(self):\n spines = GetSwitchDetails().get_spine_switch_ip()\n totalSpines = len(spines)\n return totalSpines", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetCount(self, label)", "def get_Iu(uid):\n try:\n return len(trainSet.ur[trainSet.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0", "async def connected_user_count(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: connected_user_count: count: \" + str(event[\"connected_user_count\"]))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_CONNECTED_USER_COUNT,\n\t\t\t\t\"connected_user_count\": event[\"connected_user_count\"]\n\t\t\t},\n\t\t)", "def get_non_traffic_charges_grid_row_count(self):\n non_traffic_charges_grid_row_count_span_element = self.wait().until(EC.visibility_of_element_located(self.non_traffic_charges_grid_row_count_span_locator), 'non traffic charges grid row count span locator not found before specified time out')\n count_span_text = non_traffic_charges_grid_row_count_span_element.text.split()\n self.non_traffic_charges_grid_row_count = count_span_text[2]\n return self.non_traffic_charges_grid_row_count", "def get_triangle_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetTriangleCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetTriangleCount(key1, result_val)\n return result_val.i", "def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuCount', self.handle)" ]
[ "0.8096988", "0.58348656", "0.5540107", "0.5337788", "0.5088764", "0.50769204", "0.5072222", "0.50714403", "0.5059296", "0.50581366", "0.50281113", "0.49717057", "0.4894099", "0.48607355", "0.48384598", "0.48260984", "0.48260832", "0.48071045", "0.47291836", "0.47040775", "0.46823198", "0.46797007", "0.46164805", "0.45977813", "0.45746386", "0.4571215", "0.45613363", "0.45551383", "0.4542036", "0.4527165", "0.45144346", "0.4483073", "0.44816223", "0.4477557", "0.44676432", "0.44602787", "0.44515356", "0.4444586", "0.44434837", "0.4430963", "0.44057292", "0.44031417", "0.43946868", "0.43711254", "0.43457276", "0.4344982", "0.43299514", "0.43287647", "0.43123642", "0.43104672", "0.42673272", "0.4262696", "0.42571753", "0.42537987", "0.42469952", "0.424379", "0.42351067", "0.4232531", "0.42270294", "0.42250872", "0.42102695", "0.42075145", "0.42058676", "0.42058676", "0.41969746", "0.41936773", "0.41876414", "0.41863808", "0.41841206", "0.41684297", "0.41592917", "0.41565034", "0.41542023", "0.41535464", "0.41502088", "0.41471338", "0.41306257", "0.41259864", "0.41253683", "0.41252798", "0.412484", "0.4122458", "0.41215575", "0.41188347", "0.4106405", "0.41046005", "0.40953127", "0.409496", "0.40938908", "0.40914562", "0.40803277", "0.40783927", "0.40773618", "0.40617582", "0.40607232", "0.40572533", "0.405701", "0.4048233", "0.4047052", "0.40425944" ]
0.7333012
1
Setter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _set_usr_traceroute_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_traceroute_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_traceroute_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def voterContactCount(self, user):\n return self.votercontact_set.filter(user=user).count()", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def NoOfSRTunnels(self):\r\n\t\treturn self._get_attribute('noOfSRTunnels')", "def count():\r\n return User.query.count()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def get_num_psus(self):\n return len(self._psu_list)", "def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def get_total_session_count(self) -> int:\n return self.streams_count", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def target_lun_in_use_count(self):\n return self._target_lun_in_use_count", "def count(self, value):\n \n self._count = int(value)", "def get_roles_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'roles')", "def get_users_count(self):\n try:\n roles = self.db_handler.get_roles_list()\n reply = ''\n\n for role_id, role_name in roles:\n reply += f'{role_name}ів - {self.db_handler.get_staff_count_by_role(role_id)[0]}\\n'\n\n return reply\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def setNumberOfTraces(self,numberOfTraces: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN \" + str(numberOfTraces))\n\n return", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def registered_guests_counter(window, counter_id_for_registered_attendees):\r\n window.write_event_value('-COUNT3-', counter_id_for_registered_attendees)", "def tr_count_for_run(self, run_idx, file_name, raw_rows, events):\n if self.is_gravy:\n return 180\n else:\n return 230", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def LnsCount(self):\n if self.force_auto_sync:\n self.get('LnsCount')\n return self._LnsCount", "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret", "async def connected_user_count(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: connected_user_count: count: \" + str(event[\"connected_user_count\"]))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_CONNECTED_USER_COUNT,\n\t\t\t\t\"connected_user_count\": event[\"connected_user_count\"]\n\t\t\t},\n\t\t)", "def get_total_users(request):\n number_of_users = User.objects.count()\n res = {\n 'total_users': number_of_users,\n }\n return Response(res, status=status.HTTP_200_OK)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()", "def TunnelCount(self):\n if self.force_auto_sync:\n self.get('TunnelCount')\n return self._TunnelCount", "def count_statements_by(self, actor_label):\n query = read_query('trust/count_statements_by') % actor_label\n response = self._submit_query(query)\n return response[0]['num_stat']['value']", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def non_registered_guests_counter(window, counter_id_for_non_registered_attendees):\r\n window.write_event_value('-COUNT2-', counter_id_for_non_registered_attendees)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetCount(self, label)", "def increaseTimes(self, userId):\n for user in self.requestLog:\n if str(userId) == user[0]:\n user[1] += 1\n break", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def AddUser(self, usercount, user):\n for i in range(usercount):\n login = string.replace(user[i]['Login'], ' ', '')\n home = self.__homeprefix + login[0] + '/' + login\n action = 'userman -A ' + login + ' -p ' + user[i]['Passwd'] + ' -u ' + str(user[i]['UID']) + \\\n ' -g ' + str(user[i]['GID']) + ' -H ' + home + ' -s ' + user[i]['Shell'] \n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]", "def number_of_atoms(self, value):\n self._number_of_atoms = value", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def GetPerVendorLocationCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['PerVendorLocations']", "def num_servos(self) -> int:\n return self._num_servos", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def utilization(user, ressource):\n if ressource == 'accounts':\n return Account.objects.filter(vhost__in=list(get_vhosts(user))).count()\n return None", "def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()", "def people_count(self):\n return len(self.__users)", "def snmpqosqos_sch_sessions_regulated_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_regulated_count\n\t\texcept Exception as e:\n\t\t\traise e", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetCount(self, label)", "def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def wordcount(self):\n return int(self._fetch_element('user_wordcount'))", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetCount(self, label)", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def get_otu_counts(fpath):\r\n\r\n try:\r\n otu_table = parse_biom_table(open(fpath, 'U'))\r\n except (TypeError, IOError):\r\n raise MissingFileError('OTU table file required for this analysis')\r\n\r\n if (otu_table.ObservationMetadata is None or\r\n otu_table.ObservationMetadata[0]['taxonomy'] is None):\r\n raise ValueError(\r\n '\\n\\nThe lineages are missing from the OTU table. Make sure you included the lineages for the OTUs in your OTU table. \\n')\r\n\r\n return otu_table", "def run_and_return_counts(self, user, threshold, shots=10000):\n circuit = self.create_circuit(user, threshold, measurements=True, logical_barriers=False)\n\n job = execute(circuit, BasicAer.get_backend(\"qasm_simulator\"), shots=shots)\n\n results = job.result()\n\n return results.get_counts()", "def get_top_users_total_distance_on_foot(self, number: int, offset: int = 0):\n return self._get_top_users_total_distance([ActivityType.Run, ActivityType.Walk], number, offset)", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def vsvrcount(self) :\n\t\ttry :\n\t\t\treturn self._vsvrcount\n\t\texcept Exception as e:\n\t\t\traise e", "def uracil_count(RNAsequence):\n uracil = 0\n for nucleotide in RNAsequence:\n if nucleotide == 'U':\n uracil += 1\n return uracil", "def get_historic_users_count():\n return User.objects.all().count()", "def getThreadCountForSLRUMNode(hostname, errStream):\n qhcmd = [\"sinfo\",\"-n\", hostname, \"-o\", '\"%15N %10c\"']\n process=subprocess.Popen(qhcmd,stdout=subprocess.PIPE)\n sinfoCPUsRE = re.compile(r'^\\S+\\s+(\\d+)')\n qhout=\"\"\n for line in process.stdout:\n qhout+=line\n m=sinfoCPUsRE.search(line)\n if m:\n slots = int(m.group(1))\n logging.debug(\"Node %s has %d slots\" % (hostname, slots))\n break\n else:\n slots=8\n logging.warning(\"Could not parse sinfo output:\\n%s\" % (qhout))\n return slots", "def ir_count(self) -> int:\n return int(self.graph_tuple_stats.ir_count or 0)", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def count_user_trips(session_user_id):\n\n all_trips = Trip.query.filter(Trip.user_id == session_user_id).order_by(Trip.start_date).all()\n\n future_trips = 0\n past_trips = 0\n future_trips_list = []\n past_trips_list = []\n todays_date = datetime.now()\n\n for trip in all_trips:\n if trip.start_date <= todays_date:\n past_trips += 1\n past_trips_list.append(trip)\n else:\n future_trips += 1\n future_trips_list.append(trip)\n return [(future_trips, past_trips), (future_trips_list, past_trips_list)]", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetCount(self, label)", "def action_session_user_stats(args, config, db, wdb):\n\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_users AS\n (SELECT DISTINCT\n analysis_session_requests.session_id as session_id,\n analysis_requestlog_combined.user_sid as user_sid\n FROM analysis_requestlog_combined, analysis_session_requests\n WHERE analysis_requestlog_combined.id = analysis_session_requests.request_id\n )\n ''')\n wdb.commit()\n\n # How many sessions did each user have?\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_count_per_user AS (\n SELECT\n analysis_session_users.user_sid,\n count(analysis_session_users.session_id) as session_count\n FROM analysis_session_users, user\n WHERE analysis_session_users.user_sid = user.user_name\n GROUP BY analysis_session_users.user_sid\n );''')\n wdb.commit()\n\n user_ids = db.simple_query('SELECT user_sid FROM analysis_session_users')\n sessions_per_user = collections.Counter(user_ids)\n sessions_per_user['anonymous'] = sessions_per_user[None]\n del sessions_per_user[None]\n\n write_data('user_session_counts', {\n 'data': dict(sessions_per_user.most_common()),\n })\n reverse_counts = collections.Counter(\n sessions_per_user.values()).most_common()\n write_data('user_session_counts_reverse', {\n 'data': list(reverse_counts),\n })", "def spine_switch_count(self):\n spines = GetSwitchDetails().get_spine_switch_ip()\n totalSpines = len(spines)\n return totalSpines", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e" ]
[ "0.69384295", "0.67749554", "0.55161387", "0.5156745", "0.5149116", "0.51029897", "0.506374", "0.5044057", "0.5044057", "0.4962718", "0.49115133", "0.49114674", "0.48995262", "0.48913693", "0.48689324", "0.4850962", "0.48443764", "0.4837305", "0.48038968", "0.47544253", "0.4691356", "0.4687675", "0.46174464", "0.45912525", "0.45411113", "0.44826928", "0.44712198", "0.44477135", "0.4445319", "0.44297427", "0.44274655", "0.44230044", "0.44151264", "0.44101557", "0.43753955", "0.43603256", "0.43583655", "0.435159", "0.43329164", "0.43299034", "0.4311151", "0.43086678", "0.4303956", "0.42971116", "0.42734495", "0.42686528", "0.42610708", "0.42592016", "0.42567524", "0.42534953", "0.42493144", "0.42450374", "0.42198968", "0.42125177", "0.4210847", "0.4194536", "0.4191575", "0.41877377", "0.41806024", "0.41796684", "0.41713867", "0.416404", "0.4162123", "0.41620755", "0.41546264", "0.41503036", "0.4148259", "0.41447106", "0.41447106", "0.41328487", "0.4132502", "0.41299745", "0.41275775", "0.41248414", "0.41232964", "0.41230366", "0.4120342", "0.41124374", "0.4111938", "0.40971234", "0.40952682", "0.4081633", "0.40680808", "0.40680808", "0.4061064", "0.4060536", "0.405609", "0.40521696", "0.40520197", "0.40482455", "0.40437365", "0.40390494", "0.4038573", "0.4038293", "0.40376285", "0.40344575", "0.40331894", "0.40327895", "0.40267086", "0.40258455" ]
0.88829786
0
Getter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _get_echo_req_sent_count(self): return self.__echo_req_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def sent_count(comment):\n return comment.__len__()", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def message_count(self):\n return len(self.messages)", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def message_count(self):\n pass", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def num_requests_sent(self):\n return dict(self._requests_count)", "def response_count(self):\n return self.responses.count()", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def orders_count(self):\n return Order.objects.filter(email=self.email).count()", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def count(self):\n return len(self._request_sessions)", "def count_words(sent):\n words = word_tokenize(sent)\n return len(words)", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def getNumOfMessagesToSend(self):\n return len(self.SendMessageBuffer)", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def data_flow_positive_node_count_avg(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)", "def _get_num_sentences(doc: Doc, min_sen_length=5):\n return len([sent for sent in list(doc.sents) if len(sent.text.strip())>min_sen_length])", "def __sent_len(self, title, text):\n total = 0\n text_sent = nltk.sent_tokenize(text)\n for sent in text_sent:\n total += len(nltk.word_tokenize(sent))\n return (len(nltk.word_tokenize(title)), total / len(text_sent))", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def sent_id(self):\n return self._sent_id", "def __len__(self):\n return len(self.sent)", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def get_total_session_count(self) -> int:\n return self.streams_count", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def accelerator_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accelerator_count\")", "def client_count(request):\n return request.param", "def get_total_n_events(self):\n\n return self._total_n_processed_events", "def net_stat_sent(x, interface=None):\n if not interface:\n interface = get_netiface()\n\n if interface:\n return psutil.net_io_counters(pernic=True)[interface].bytes_sent\n else:\n return 0", "def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def vertex_count(self):\n return len(self._outgoing)", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def count(self):\n return len(self.order_items)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def get_count(self, asset=None):\n if asset is None or 'pc:count' not in asset.properties:\n return self.item.properties.get('pc:count')\n else:\n return asset.properties.get('pc:count')", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def count(self):\n return len(self.order_lst)", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def get_unaccepted_event_requests_count(self, staff_id):\n return self.get_unaccepted_event_requests(staff_id).__len__()", "def get_sents_length(tokenized_sents):\n return [len(sent) for sent in tokenized_sents]", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_customer_orders_count(customerId):\n data = user_obj.get_customer_orders(customerId,\"1\")\n return data", "def getAppCount(self):\n logger.debug('Getting the number of apps discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='totalAppCount']\"))", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def n_envelopes(self):\n return RPR.CountTakeEnvelopes(self.id)", "def send_natoms(self) -> int:\n natom = len(self.molecule.geometry)\n MDI_Send(natom, 1, MDI_INT, self.comm)\n return natom", "def number_of_on_calls(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"number_of_on_calls\")", "def event_count(self):\n\n with self.lock:\n return self.num_events", "def count(self):\n \n return self._count", "def count(self):\n return len(self._commands)", "def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n # for undirected graphs, make sure not to double-count edges\n return total if self.is_directed() else total // 2", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def getNumOfMsgSend_interval(self):\n return self.MsgSendCount_interval", "def count(self):\n return len(self.read_ints())", "def test_requests_num(self):\n\n requests_num = len(self.response.context['requests'])\n self.assertLessEqual(requests_num, 10)", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def _minimal_pixel_count(conf_dict, message):\n # meaningful_channels_amount = list(conf_dict.values()).count(True)\n frequency = conf_dict['frequency']\n return len(message) + ((frequency - 1) * (len(message) - 1))", "def __get_count(self):\n if self.__count is None:\n if self.optional_count_query_set is None:\n self.optional_count_query_set = self.query_set.order_by(None)\n count_query = self.optional_count_query_set.statement.with_only_columns([func.count()])\n self.__count = self.optional_count_query_set.session.execute(count_query).scalar()\n return self.__count", "def get_ev_count(self, stmt):\n return self.get_ev_count_by_hash(stmt.get_hash(shallow=True))", "def document_count(self):\n return self._json['coredata'].get('document-count', '0')", "def count(self):\n return self.ming_cursor.count()", "def get_message_length(self):\n return len(self._payload)", "def get_total_sentences(self):\n\t\t\n\t\t# loop through batches and add up all their individual sentence counts\n\t\ttotal_sentences = 0\n\t\tfor batch in self.batch_stats:\n\t\t\ttotal_sentences += self.batch_stats[batch].total_sentences\n\t\treturn total_sentences", "def channel_count(self):\n index = self._ordered_input_names.index('channel_count')\n return self._inputs[index]", "def GetCount(self):\n return self._server.get_count()", "def get_count(self):\n return self._count", "def get_sent_messages(self):\n return self.sent_messages", "def count(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Count()\n return \"\"" ]
[ "0.7922508", "0.6860566", "0.67645717", "0.65473276", "0.64664423", "0.5968414", "0.5861087", "0.5787943", "0.5689521", "0.56807286", "0.5430021", "0.54220355", "0.5201689", "0.5130782", "0.49798772", "0.4948916", "0.48980132", "0.48834348", "0.4872083", "0.48253825", "0.48244604", "0.47584635", "0.47438982", "0.47157636", "0.47141638", "0.47054735", "0.46999666", "0.4631265", "0.4605633", "0.45964065", "0.45767012", "0.4576095", "0.45721778", "0.45126566", "0.45060614", "0.45025125", "0.44958854", "0.4495488", "0.4477669", "0.4475004", "0.44735527", "0.44722134", "0.44712415", "0.44669974", "0.44601408", "0.445253", "0.445253", "0.445253", "0.445253", "0.445253", "0.445253", "0.445253", "0.445253", "0.44521877", "0.4450153", "0.44464755", "0.44395322", "0.44248003", "0.44190526", "0.44078746", "0.43981123", "0.4392253", "0.43825153", "0.43726408", "0.43700013", "0.4360839", "0.43561998", "0.4354514", "0.43411282", "0.43403795", "0.43342623", "0.43321356", "0.43272263", "0.4321179", "0.43187147", "0.4317717", "0.43141267", "0.43125904", "0.43118113", "0.43074125", "0.4299926", "0.4295157", "0.4292532", "0.42811108", "0.4278375", "0.42774647", "0.42755902", "0.42732438", "0.42707655", "0.4269349", "0.426852", "0.42677054", "0.42673326", "0.42644486", "0.42621097", "0.42519093", "0.42446578", "0.4241157", "0.4236034", "0.42312887" ]
0.7223332
1
Setter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _set_echo_req_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def sent_count(comment):\n return comment.__len__()", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def message_count(self):\n pass", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def num_requests_sent(self):\n return dict(self._requests_count)", "def _get_num_sentences(doc: Doc, min_sen_length=5):\n return len([sent for sent in list(doc.sents) if len(sent.text.strip())>min_sen_length])", "def message_count(self):\n return len(self.messages)", "def response_count(self):\n return self.responses.count()", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def count(self, value):\n \n self._count = int(value)", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def count_words(sent):\n words = word_tokenize(sent)\n return len(words)", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def getNumOfMessagesToSend(self):\n return len(self.SendMessageBuffer)", "def __sent_len(self, title, text):\n total = 0\n text_sent = nltk.sent_tokenize(text)\n for sent in text_sent:\n total += len(nltk.word_tokenize(sent))\n return (len(nltk.word_tokenize(title)), total / len(text_sent))", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def count(self):\n return len(self._request_sessions)", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def request_sent(self):\n self._sent += 1", "def orders_count(self):\n return Order.objects.filter(email=self.email).count()", "def get_total_session_count(self) -> int:\n return self.streams_count", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def count(self, ngram, options):\n return len(self.find_docs(ngram, options))", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def vertex_count(self):\n return len(self._outgoing)", "def sent_id(self):\n return self._sent_id", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def test_requests_num(self):\n\n requests_num = len(self.response.context['requests'])\n self.assertLessEqual(requests_num, 10)", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def __len__(self):\n return len(self.sent)", "async def connected_user_count(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: connected_user_count: count: \" + str(event[\"connected_user_count\"]))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_CONNECTED_USER_COUNT,\n\t\t\t\t\"connected_user_count\": event[\"connected_user_count\"]\n\t\t\t},\n\t\t)", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def send_req(self):\n self.n_send_req += 1", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def client_count(request):\n return request.param", "def get_total_n_events(self):\n\n return self._total_n_processed_events", "def get_unaccepted_event_requests_count(self, staff_id):\n return self.get_unaccepted_event_requests(staff_id).__len__()", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def n_envelopes(self):\n return RPR.CountTakeEnvelopes(self.id)", "def count(self):\n return len(self.order_lst)", "def count(self):\n return len(self.order_items)", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def count(self):\n \n return self._count", "def number_of_on_calls(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"number_of_on_calls\")", "def compute_vocab_count(sents):\n counter = collections.Counter()\n for sentence in sents:\n counter.update(untag(sentence))\n return counter", "async def count(self, **kw):\n\n pass", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "async def _count(\n self, ctx: Context, user: discord.Member, channel: discord.TextChannel = None\n ):\n\n if not channel:\n channel = ctx.channel\n\n count = 0\n async with ctx.typing():\n async for message in channel.history(limit=None):\n if message.author.id == user.id:\n count += 1\n\n await ctx.send(_(\n \"{} has sent **{}** messages in {} channel.\"\n ).format(user.name, count, channel.mention))", "def _minimal_pixel_count(conf_dict, message):\n # meaningful_channels_amount = list(conf_dict.values()).count(True)\n frequency = conf_dict['frequency']\n return len(message) + ((frequency - 1) * (len(message) - 1))", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "def getNumOfMsgSend_interval(self):\n return self.MsgSendCount_interval", "def accelerator_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accelerator_count\")", "def count(self):\n return len(self._commands)", "def size(self):\n return len(self.sentence)", "def channel_count(self):\n index = self._ordered_input_names.index('channel_count')\n return self._inputs[index]", "def num_servos(self) -> int:\n return self._num_servos", "def snmpqosqos_sch_sessions_byte_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_sessions_byte_count\n\t\texcept Exception as e:\n\t\t\traise e" ]
[ "0.7546443", "0.74266785", "0.69616437", "0.66970843", "0.6288053", "0.6279342", "0.6064423", "0.57884806", "0.5556439", "0.5305215", "0.52710307", "0.5181049", "0.50654066", "0.50303006", "0.50086975", "0.49919927", "0.48444322", "0.4777001", "0.4770685", "0.4731099", "0.46893677", "0.46591547", "0.4655248", "0.46507818", "0.46184546", "0.4587416", "0.45654553", "0.4543196", "0.4541121", "0.4517093", "0.45088357", "0.45074433", "0.44689098", "0.44656107", "0.44369212", "0.44352213", "0.44265547", "0.44210032", "0.44175372", "0.44023955", "0.44005385", "0.4393656", "0.43924198", "0.43792027", "0.4350886", "0.43446365", "0.43424782", "0.4341587", "0.4339208", "0.43348798", "0.43042028", "0.42842874", "0.42824385", "0.4275536", "0.42742005", "0.42668137", "0.42665312", "0.42650408", "0.424851", "0.42413622", "0.42408073", "0.42356768", "0.42356768", "0.42356768", "0.42356768", "0.42356768", "0.42356768", "0.42356768", "0.42356768", "0.42350787", "0.42275453", "0.42242357", "0.42204395", "0.42204395", "0.42204395", "0.42197573", "0.42125568", "0.42087218", "0.4205307", "0.41920185", "0.41897553", "0.41885054", "0.4187528", "0.41844806", "0.41824955", "0.4181152", "0.41809347", "0.4179537", "0.41710603", "0.41701728", "0.41699716", "0.41683313", "0.41647932", "0.41637945", "0.41625756", "0.4159994", "0.415758", "0.4155706", "0.41553175", "0.41461593" ]
0.86451477
0
Getter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _get_echo_req_received_count(self): return self.__echo_req_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def message_count(self):\n return self._message_count", "def get_message_length(self):\n return len(self._payload)", "def message_count(self):\n return len(self.messages)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def message_count(self):\n pass", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def sent_count(comment):\n return comment.__len__()", "def message_length(self):\n return self._message_length", "def getLength(msg):\n return len(msg)", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def read_count(self):\n return self._read_count", "def queue_size(self):\n # pylint: disable=protected-access\n if self._handler._received_messages:\n return self._handler._received_messages.qsize()\n return 0", "def get_number_of_reactions(self):\n return self._performed_actions[REACT]", "def message_count(self) -> int:\n return len(self._leased_messages)", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def count(self):\n return len(self.read_ints())", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def _get_total_read_size(self):\n if self.read_size:\n read_size = EVENT_SIZE * self.read_size\n else:\n read_size = EVENT_SIZE\n return read_size", "def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def read_num_lines(data_socket):\r\n size_bytes = b''\r\n for i in range(0, 4):\r\n size_bytes += next_byte(data_socket)\r\n return int.from_bytes(size_bytes, 'big')", "def getMessageCount(self):\n return 9", "def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)", "def github_num_reactions(comment_object: Dict[str, Any]) -> int:\n return comment_object.get('reactions', {}).get('total_count', 0)", "def get_number_of_messages(queue_name):\n queue = sqs.get_queue_by_name(QueueName=queue_name)\n return queue.attributes.get('ApproximateNumberOfMessages')", "def getNumOfMsgRec_interval(self):\n return self.MsgReceiveCount_interval", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def getNumOfMessagesToSend(self):\n return len(self.SendMessageBuffer)", "def get_error_number(message, received):\n stripped_message = message.replace(\" \", \"\")\n stripped_message = stripped_message.strip()\n errors = 0\n for idx in range(len(received)):\n if stripped_message[idx] != received[idx]:\n errors += 1\n return errors", "def response_count(self):\n return self.responses.count()", "def handle_echo(self, event):\n print('Echo received')\n return 0x0000", "def get_replies_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n select_fields = ('messages.count(*)', 'topics.count(*)')\n api_response = query_users_table_by_id(khoros_object, select_fields, user_settings['id'])\n items_list = api.get_items_list(api_response)\n return int(items_list['messages']['count']) - int(items_list['topics']['count'])", "def count(self):\n self._read_keypad()\n return len(self._current_events)", "def getNumReactions(self):\n return _libsbml.Model_getNumReactions(self)", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def _get_count(awsclient, function_name, alias_name='ACTIVE', version=None):\n client_lambda = awsclient.get_client('lambda')\n payload = '{\"ramuda_action\": \"count\"}'\n\n if version:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=version\n )\n else:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=alias_name\n )\n\n # print type(response['Payload'])\n results = response['Payload'].read() # payload is a 'StreamingBody'\n return results", "def averaging_frame_count(self):\n fc = ct.c_uint()\n self.lib.Filter_GetAveragingFrameCount(ct.pointer(fc))\n return fc.value", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def get_unaccepted_event_requests_count(self, staff_id):\n return self.get_unaccepted_event_requests(staff_id).__len__()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def GetCount(self):\n return self._server.get_count()", "def api_get_queue_len():\n try:\n ret = AppStatus.check_manager_status(brief=False)\n if ret is not None:\n return jsonify({\"total_queue_len\": ret.get(\"total_queue_len\", 0)})\n except Exception as e:\n logger.error(\"Traceback:\\n%s\", traceback.format_exc())\n abort(500, \"failed to send message or invalid manager response\")", "def get_msg_size(self):\n return self.MsgSize - self.header_size", "def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))", "def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n # for undirected graphs, make sure not to double-count edges\n return total if self.is_directed() else total // 2", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def count_messages(queryset):\n messages = messaging.models.Message.objects.filter(thread__ad=OuterRef('pk')).only('pk')\n return queryset.annotate(message_count=core.utils.SubqueryCount(messages))", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def consumer_count(self, obj):\n return obj.get_or_set_consumer_count()", "def _read_len(self):\r\n read = self.socket.recv(4 - len(self.message))\r\n if len(read) == 0:\r\n # if we read 0 bytes and self.message is empty, it means client close \r\n # connection\r\n if len(self.message) != 0:\r\n logging.error(\"can't read frame size from socket\")\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == 4:\r\n self.len, = struct.unpack('!i', self.message)\r\n if self.len < 0:\r\n logging.error(\"negative frame size, it seems client\"\\\r\n \" doesn't use FramedTransport\")\r\n self.close()\r\n elif self.len == 0:\r\n logging.error(\"empty frame, it's really strange\")\r\n self.close()\r\n else:\r\n self.message = ''\r\n self.status = WAIT_MESSAGE", "def queue_message_count(self, queue_name):\n queue_list = self.__session.getObjects(_class=\"queue\", _name=queue_name)\n if len(queue_list):\n return queue_list[0].msgDepth", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]", "def get_messages_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'messages')", "def get_total_n_events(self):\n\n return self._total_n_processed_events", "def get_order_detail_count(orderid): \n data = order_obj.get_order_detail(orderid,\"1\")\n return data", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def get_unread_count(imap):\n status, messages = imap.select('Inbox')\n status, response = imap.uid('search', None, 'UNSEEN')\n unread_msg_nums = response[0].split()\n return len(unread_msg_nums)", "def _get_event_history_count(device_event_file_path, event_label, timeout=10.0):\n result = 0\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n grep_cmd = [\n \"timeout\", timeout_str, \"grep\", \"-c\", \"-w\", event_label,\n device_event_file_path\n ]\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n\n if out:\n result = int(out.strip())\n\n return result, timedout", "def stored_cookie_messages_count(storage, response):\n # Get a list of cookies, excluding ones with a max-age of 0 (because\n # they have been marked for deletion).\n cookie = response.cookies.get(storage.cookie_name)\n if not cookie or cookie['max-age'] == 0:\n return 0\n data = storage._decode(cookie.value)\n if not data:\n return 0\n return len(data)", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def read_message_history(_) -> int:\n return 1 << 16", "def read_message_history(_) -> int:\n return 1 << 16", "def n_midi_events(self):\n return RPR.MIDI_CountEvts(self.id, 1, 1, 1)[0]", "def get_customer_orders_count(customerId):\n data = user_obj.get_customer_orders(customerId,\"1\")\n return data", "def get_session_count(self, network_tuple: NetworkTuple) -> int:\n return len(self.network_tuple_stream_id[network_tuple])" ]
[ "0.77231425", "0.7023055", "0.6542562", "0.62707675", "0.5976226", "0.5851045", "0.58174163", "0.54856044", "0.54173255", "0.5317395", "0.50623125", "0.5022893", "0.5022014", "0.4883163", "0.4860375", "0.48226205", "0.48192063", "0.4816786", "0.4814243", "0.4797339", "0.46779853", "0.46332717", "0.4602284", "0.4594219", "0.45920545", "0.45881212", "0.4575078", "0.4533078", "0.45307335", "0.44945496", "0.44883478", "0.44761387", "0.4463707", "0.4461295", "0.44573218", "0.44506973", "0.44308043", "0.43901435", "0.43717265", "0.43672636", "0.43658903", "0.43504304", "0.4339724", "0.43326312", "0.4314553", "0.431197", "0.43085703", "0.4277862", "0.42764753", "0.42548168", "0.42521605", "0.4249205", "0.42437327", "0.42348477", "0.4231219", "0.42273754", "0.42113462", "0.42053336", "0.41973296", "0.4196879", "0.41930696", "0.4192769", "0.41921505", "0.4192001", "0.419101", "0.41815206", "0.41784325", "0.41784176", "0.41615495", "0.41615182", "0.41563672", "0.41543087", "0.41335252", "0.41335252", "0.41335252", "0.41335252", "0.41335252", "0.41335252", "0.41335252", "0.41335252", "0.41324922", "0.41284418", "0.41283876", "0.412541", "0.41248912", "0.41240844", "0.4115192", "0.41150507", "0.41112965", "0.41112965", "0.4110608", "0.4103886", "0.41018355", "0.4094292", "0.4092172", "0.4082351", "0.4082351", "0.40809268", "0.40771127", "0.4071685" ]
0.70718306
1
Setter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _set_echo_req_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def message_count(self):\n pass", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def message_count(self):\n return len(self.messages)", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def sent_count(comment):\n return comment.__len__()", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_message_length(self):\n return len(self._payload)", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def message_count(self) -> int:\n return len(self._leased_messages)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def read_count(self, read_count):\n\n self._read_count = read_count", "def message_length(self):\n return self._message_length", "def getMessageCount(self):\n return 9", "def count(self):\n return len(self.read_ints())", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def read_count(self):\n return self._read_count", "def getNumOfMessagesToSend(self):\n return len(self.SendMessageBuffer)", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def get_number_of_reactions(self):\n return self._performed_actions[REACT]", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def read_num_lines(data_socket):\r\n size_bytes = b''\r\n for i in range(0, 4):\r\n size_bytes += next_byte(data_socket)\r\n return int.from_bytes(size_bytes, 'big')", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def response_count(self):\n return self.responses.count()", "def count(self, value):\n \n self._count = int(value)", "def queue_size(self):\n # pylint: disable=protected-access\n if self._handler._received_messages:\n return self._handler._received_messages.qsize()\n return 0", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def getNumOfMsgRec_interval(self):\n return self.MsgReceiveCount_interval", "def get_replies_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n select_fields = ('messages.count(*)', 'topics.count(*)')\n api_response = query_users_table_by_id(khoros_object, select_fields, user_settings['id'])\n items_list = api.get_items_list(api_response)\n return int(items_list['messages']['count']) - int(items_list['topics']['count'])", "def handle_echo(self, event):\n print('Echo received')\n return 0x0000", "def github_num_reactions(comment_object: Dict[str, Any]) -> int:\n return comment_object.get('reactions', {}).get('total_count', 0)", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def _get_total_read_size(self):\n if self.read_size:\n read_size = EVENT_SIZE * self.read_size\n else:\n read_size = EVENT_SIZE\n return read_size", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def consumer_count(self, obj):\n return obj.get_or_set_consumer_count()", "def getNumReactions(self):\n return _libsbml.Model_getNumReactions(self)", "def get_unaccepted_event_requests_count(self, staff_id):\n return self.get_unaccepted_event_requests(staff_id).__len__()", "def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []", "def GetCount(self):\n return self._server.get_count()", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "async def count(self, **kw):\n\n pass", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def test_status_reply_count(self):\n with self.app.app_context():\n u = user(save=True)\n s = status(user=u, project=None, save=True)\n for i in range(5):\n status(user=u, project=None, reply_to=s, save=True)\n\n eq_(s.reply_count, 5)", "def nclients(self, r):\r\n return len(self.clients(r))", "def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)", "def count_number_of_reads(filename: Path) -> int:\n\tif filename.suffix == '.gz':\n\t\tcommand = f\"zcat {filename}\"\n\telse:\n\t\tcommand = f\"cat {filename}\"\n\tprocess = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n\toutput = subprocess.check_output([\"wc\", \"-l\"], stdin = process.stdout)\n\n\treads = int(output.strip()) / 4\n\treturn int(reads)", "def averaging_frame_count(self):\n fc = ct.c_uint()\n self.lib.Filter_GetAveragingFrameCount(ct.pointer(fc))\n return fc.value", "def messages_count(self, **kwargs):\n if \"order\" in kwargs and kwargs[\"order\"]:\n sign = kwargs[\"order\"][:1]\n criterion = kwargs[\"order\"][1:].upper()\n if sign == '-':\n criterion = \"REVERSE %s\" % criterion\n else:\n criterion = \"REVERSE DATE\"\n folder = kwargs[\"folder\"] if \"folder\" in kwargs else None\n\n # FIXME: pourquoi suis je obligé de faire un SELECT ici? un\n # EXAMINE plante mais je pense que c'est du à une mauvaise\n # lecture des réponses de ma part...\n self.select_mailbox(folder, readonly=False)\n cmdname = \"SORT\" if six.PY3 else b\"SORT\"\n data = self._cmd(\n cmdname,\n bytearray(\"(%s)\" % criterion, \"utf-8\"),\n b\"UTF-8\", b\"(NOT DELETED)\", *self.criterions)\n self.messages = data[0].decode().split()\n self.getquota(folder)\n return len(self.messages)", "def response_received(self, ignored):\n self._received += 1", "def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n # for undirected graphs, make sure not to double-count edges\n return total if self.is_directed() else total // 2", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def count_messages(queryset):\n messages = messaging.models.Message.objects.filter(thread__ad=OuterRef('pk')).only('pk')\n return queryset.annotate(message_count=core.utils.SubqueryCount(messages))", "def get_number_of_messages(queue_name):\n queue = sqs.get_queue_by_name(QueueName=queue_name)\n return queue.attributes.get('ApproximateNumberOfMessages')", "def count(self):\n self._read_keypad()\n return len(self._current_events)", "def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average", "def get_total_session_count(self) -> int:\n return self.streams_count", "def getLength(msg):\n return len(msg)", "def get_total_n_events(self):\n\n return self._total_n_processed_events", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "async def messagecount(self, ctx, name=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n async with ctx.channel.typing():\r\n username = name\r\n if username is None:\r\n username = ctx.message.author.name\r\n resp = await self.req('https://api.scratch.mit.edu/users/' + username + '/messages/count')\r\n if resp is None and name is None:\r\n username = getattr(ctx.message.author, 'nick', '_')\r\n resp = await self.req('https://api.scratch.mit.edu/users/' + username + '/messages/count')\r\n logger.info('Scratch.messagecount: ' + username, extra={'invoker': ctx.message.author.name})\r\n if resp is None:\r\n await ctx.send(\"Couldn't get message count for \" + username)\r\n else:\r\n await ctx.send('{} has {} messages'.format(\r\n username,\r\n json.loads(resp)['count']\r\n ))", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def recv_req(self):\n self.n_recv_req += 1", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')", "def edge_count(self) -> int:\n return self._n_edges" ]
[ "0.7757586", "0.7065394", "0.68073034", "0.67364514", "0.66612166", "0.62458676", "0.56309354", "0.5338627", "0.52505404", "0.50096846", "0.48576525", "0.48448968", "0.48185655", "0.47561345", "0.4739262", "0.46860245", "0.46509308", "0.46177593", "0.45707104", "0.45225346", "0.45101976", "0.4484876", "0.44768032", "0.44697633", "0.44539776", "0.4451774", "0.4446846", "0.4445863", "0.44211182", "0.4417924", "0.43984917", "0.43765986", "0.43258238", "0.4311429", "0.43095216", "0.43074924", "0.4290968", "0.42882082", "0.4277134", "0.42640764", "0.4261568", "0.4256773", "0.42513886", "0.42477405", "0.4235379", "0.4234901", "0.42104682", "0.41975585", "0.41849163", "0.41770592", "0.41752642", "0.41733086", "0.41518643", "0.4151421", "0.4144384", "0.4134263", "0.41311273", "0.4108862", "0.4102344", "0.40928268", "0.40897757", "0.4086842", "0.4082944", "0.40733528", "0.40711653", "0.40691686", "0.40685725", "0.4064514", "0.40582007", "0.4042194", "0.4040617", "0.403933", "0.40392026", "0.4034048", "0.40326816", "0.40243417", "0.4020885", "0.40189916", "0.40164927", "0.40105307", "0.40073946", "0.399836", "0.39854172", "0.39700332", "0.39579323", "0.3953417", "0.3951755", "0.3951755", "0.3951755", "0.3951755", "0.3951755", "0.3951755", "0.3951755", "0.3951755", "0.39506614", "0.39500204", "0.39474025", "0.39364675", "0.39313817", "0.3930972" ]
0.84776396
0
Getter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _get_echo_req_timeout_count(self): return self.__echo_req_timeout_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def get_event_history_count(self, event_label, timeout=10.0):\n\n self.verify_event_labels(\n [event_label],\n error_message=\"%s get_event_history_count failed.\" % self._device_name)\n\n try:\n count, timedout = _get_event_history_count(\n self.event_file_path, event_label, timeout=timeout)\n return ParserResult(timedout=timedout, results_list=[], count=count)\n except Exception as err:\n raise errors.ParserError(\n \"Retrieving event {} history from {} failed. Error {!r}\".format(\n event_label, self.event_file_path, err))", "def tcp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_timeout_seconds\")", "def _get_event_history_count(device_event_file_path, event_label, timeout=10.0):\n result = 0\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n grep_cmd = [\n \"timeout\", timeout_str, \"grep\", \"-c\", \"-w\", event_label,\n device_event_file_path\n ]\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n\n if out:\n result = int(out.strip())\n\n return result, timedout", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def get_message_count(self):\n return self.buffer.count()", "def response_count(self):\n return self.responses.count()", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def message_count(self):\n return self._message_count", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def message_count(self):\n return len(self.messages)", "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout", "def timeout_seconds(self):\n return self._timeout_seconds", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def message_count(self):\n pass", "def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))", "def count(self):\n return len(self._request_sessions)", "def get_transaction_count(address, endpoint=_default_endpoint, timeout=_default_timeout) -> int:\n return get_account_nonce(address, true_nonce=True, endpoint=endpoint, timeout=timeout)", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def udp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"udp_timeout_seconds\")", "def count(self):\n self._read_keypad()\n return len(self._current_events)", "def tcp_fin_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_fin_timeout_seconds\")", "def accelerator_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accelerator_count\")", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def event_count(self):\n\n with self.lock:\n return self.num_events", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def send_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"send_timeout_in_seconds\")", "def read_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"read_timeout_in_seconds\")", "def read_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"read_timeout_in_seconds\")", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def GetCount(self):\n return self._server.get_count()", "def kafka_consumer_stats_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"kafka_consumer_stats_timeout\")", "def org_apache_felix_http_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_timeout", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def _get_count(awsclient, function_name, alias_name='ACTIVE', version=None):\n client_lambda = awsclient.get_client('lambda')\n payload = '{\"ramuda_action\": \"count\"}'\n\n if version:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=version\n )\n else:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=alias_name\n )\n\n # print type(response['Payload'])\n results = response['Payload'].read() # payload is a 'StreamingBody'\n return results", "def get_message_length(self):\n return len(self._payload)", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def count_update_pool_size(self) -> ConfigNodePropertyInteger:\n return self._count_update_pool_size", "def message_length(self):\n return self._message_length", "def timeout(self, context):\n\n timeout = 0\n\n for task in flatten(self.tasks, context):\n task_timeout = DEFAULT_TASK_TIMEOUT\n task_details = getattr(task, '__garcon__', None)\n\n if task_details:\n task_timeout = task_details.get(\n 'timeout', DEFAULT_TASK_TIMEOUT)\n\n timeout = timeout + task_timeout\n\n return timeout", "def count(self):\n return len(self.read_ints())", "def message_count(self) -> int:\n return len(self._leased_messages)", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def ping_timeout(self) -> timedelta:\n return self._ping_timeout", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "async def count(self, **kw):\n\n pass", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def getMessageCount(self):\n return 9", "def gettimeout(self):\r\n return self.sock.gettimeout()", "def gettimeout(self):\r\n return self.sock.gettimeout()", "def option_thread_count(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionThreadCount/')))", "def accelerator_count(self) -> Optional[int]:\n return pulumi.get(self, \"accelerator_count\")", "def stored_cookie_messages_count(storage, response):\n # Get a list of cookies, excluding ones with a max-age of 0 (because\n # they have been marked for deletion).\n cookie = response.cookies.get(storage.cookie_name)\n if not cookie or cookie['max-age'] == 0:\n return 0\n data = storage._decode(cookie.value)\n if not data:\n return 0\n return len(data)", "def get_total_session_count(self) -> int:\n return self.streams_count", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def socket_timeout(self):\n return self.__socket_timeout", "def count(self):\n return self.connection._llen(self.key)", "def queue_size(self):\n # pylint: disable=protected-access\n if self._handler._received_messages:\n return self._handler._received_messages.qsize()\n return 0", "def count(time):\n \n return len(events(time))", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def ctrlqueue_num_actions(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(2), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def count(self):\n return len(self._commands)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def op_count(cls, crawler, stage=None):\n if stage:\n total_ops = cls.conn.get(make_key(crawler, stage))\n else:\n total_ops = cls.conn.get(make_key(crawler, \"total_ops\"))\n return unpack_int(total_ops)", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def count(self):\n return self.properties.get('count')", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def count(self):\n return self._lift(\"count\")" ]
[ "0.80206865", "0.6734563", "0.6540391", "0.6376731", "0.61047983", "0.60804176", "0.60245043", "0.5819295", "0.55777395", "0.49859354", "0.49369183", "0.4863983", "0.48448384", "0.48247913", "0.4788822", "0.4729328", "0.47008264", "0.4682831", "0.46340755", "0.46337748", "0.46146536", "0.45855942", "0.45836985", "0.4581994", "0.45731935", "0.4570734", "0.45420828", "0.45405638", "0.45283872", "0.45199665", "0.4512633", "0.44969916", "0.4490706", "0.4488389", "0.4472777", "0.44723174", "0.44675013", "0.44563574", "0.44563574", "0.44563574", "0.44563574", "0.44563574", "0.44563574", "0.44509017", "0.44379348", "0.44301626", "0.44301078", "0.44301078", "0.44197252", "0.44176424", "0.44091094", "0.4404623", "0.43919522", "0.43908644", "0.439039", "0.4390386", "0.43875787", "0.4354714", "0.4354168", "0.43410236", "0.4338639", "0.4328579", "0.43285164", "0.43277568", "0.43215665", "0.4312994", "0.4312931", "0.4312931", "0.4312931", "0.4312931", "0.4312931", "0.4312931", "0.4312931", "0.4312931", "0.43092513", "0.43068537", "0.43051258", "0.430358", "0.42814693", "0.42814693", "0.42698795", "0.42357358", "0.42339516", "0.42320833", "0.42288926", "0.42206782", "0.42189816", "0.4218943", "0.4217144", "0.42155942", "0.42155942", "0.42070267", "0.42017928", "0.41843447", "0.41811338", "0.4177657", "0.41757748", "0.41734526", "0.41711414", "0.4166271" ]
0.7347358
1
Setter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _set_echo_req_timeout_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_timeout_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_timeout_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def tcp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_timeout_seconds\")", "def count_update_pool_size(self, count_update_pool_size: ConfigNodePropertyInteger):\n\n self._count_update_pool_size = count_update_pool_size", "def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout", "def response_count(self):\n return self.responses.count()", "def count_update_pool_size(self) -> ConfigNodePropertyInteger:\n return self._count_update_pool_size", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def message_count(self):\n pass", "async def count(self, **kw):\n\n pass", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_event_history_count(self, event_label, timeout=10.0):\n\n self.verify_event_labels(\n [event_label],\n error_message=\"%s get_event_history_count failed.\" % self._device_name)\n\n try:\n count, timedout = _get_event_history_count(\n self.event_file_path, event_label, timeout=timeout)\n return ParserResult(timedout=timedout, results_list=[], count=count)\n except Exception as err:\n raise errors.ParserError(\n \"Retrieving event {} history from {} failed. Error {!r}\".format(\n event_label, self.event_file_path, err))", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count(self, value):\n \n self._count = int(value)", "def org_apache_felix_http_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_timeout", "def timeout_seconds(self):\n return self._timeout_seconds", "def message_count(self):\n return self._message_count", "def limit_num_clients(self):\n return self._limit_num_clients", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "def tcp_fin_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_fin_timeout_seconds\")", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def get_message_count(self):\n return self.buffer.count()", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def message_count(self):\n return len(self.messages)", "def count(self):\n return len(self._request_sessions)", "def send_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"send_timeout_in_seconds\")", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def timeout_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_seconds\")", "def getMessageCount(self):\n return 9", "def udp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"udp_timeout_seconds\")", "def get_max_cleverbot_requests(self):\n return int(self.bot_data_file[\"maxCleverbotRequests\"])", "def option_thread_count(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionThreadCount/')))", "def message_count(self) -> int:\n return len(self._leased_messages)", "def max_count(self):\n return self.config.get('max_count', 500)", "def event_count(self):\n\n with self.lock:\n return self.num_events", "def GetCount(self):\n return self._server.get_count()", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def accelerator_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accelerator_count\")", "def io_throttle_count(self, io_throttle_count):\n\n self._io_throttle_count = io_throttle_count", "def test_status_reply_count(self):\n with self.app.app_context():\n u = user(save=True)\n s = status(user=u, project=None, save=True)\n for i in range(5):\n status(user=u, project=None, reply_to=s, save=True)\n\n eq_(s.reply_count, 5)", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def count(self, count: int) -> None:\n self._count = count", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def set_max_order_count(self, max_count, on_error='fail'):\n control = MaxOrderCount(on_error, max_count)\n self.register_trading_control(control)", "def count(self):\n self._read_keypad()\n return len(self._current_events)", "def _get_event_history_count(device_event_file_path, event_label, timeout=10.0):\n result = 0\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n grep_cmd = [\n \"timeout\", timeout_str, \"grep\", \"-c\", \"-w\", event_label,\n device_event_file_path\n ]\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n\n if out:\n result = int(out.strip())\n\n return result, timedout", "def get_total_session_count(self) -> int:\n return self.streams_count", "def ping_timeout(self) -> timedelta:\n return self._ping_timeout", "def count(self):\n return len(self.read_ints())", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def get_transaction_count(address, endpoint=_default_endpoint, timeout=_default_timeout) -> int:\n return get_account_nonce(address, true_nonce=True, endpoint=endpoint, timeout=timeout)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))", "def read_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"read_timeout_in_seconds\")", "def read_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"read_timeout_in_seconds\")", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)", "def accelerator_count(self) -> Optional[int]:\n return pulumi.get(self, \"accelerator_count\")", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount", "def Count(self, limit=None):\n if limit is None:\n count = 0\n for i in self.Run():\n count += 1\n return count\n else:\n return len(self.Get(limit))", "def count(self, count: int):\n\n self._count = count" ]
[ "0.7362202", "0.7095978", "0.6999127", "0.6642217", "0.64998084", "0.60558325", "0.57168615", "0.5617509", "0.52749455", "0.50046265", "0.4905859", "0.49030608", "0.48003668", "0.4683387", "0.4644038", "0.45746693", "0.45659736", "0.4561842", "0.45596886", "0.4557894", "0.44879726", "0.44478822", "0.44476786", "0.4437888", "0.44238985", "0.4407464", "0.4398701", "0.4393611", "0.4392381", "0.43914813", "0.43866044", "0.43447432", "0.434387", "0.43239576", "0.43231818", "0.4318845", "0.43162122", "0.4314671", "0.43146122", "0.4310824", "0.4303332", "0.4263409", "0.42613992", "0.42495462", "0.4238756", "0.4238756", "0.4238756", "0.4238756", "0.4238756", "0.4238756", "0.4227029", "0.42167625", "0.42149064", "0.42104283", "0.42012882", "0.42002743", "0.41933244", "0.41912472", "0.4190506", "0.4188383", "0.41807815", "0.41673875", "0.41534582", "0.41262132", "0.41252318", "0.41242564", "0.41234022", "0.41215196", "0.4116588", "0.4116588", "0.4116588", "0.41141936", "0.41137406", "0.41102085", "0.4099996", "0.40939078", "0.40842095", "0.40796968", "0.40796968", "0.40796968", "0.40796968", "0.40796968", "0.40796968", "0.40796968", "0.40796968", "0.40654063", "0.40613577", "0.40501368", "0.4049322", "0.4049069", "0.4049069", "0.40468374", "0.403756", "0.403039", "0.40237296", "0.40237296", "0.4023035", "0.40185058", "0.40179846", "0.4009486" ]
0.87411803
0
Getter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _get_echo_resp_sent_count(self): return self.__echo_resp_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def response_count(self):\n return self.responses.count()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def sent_count(comment):\n return comment.__len__()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def get_message_count(self):\n return self.buffer.count()", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def message_count(self):\n return self._message_count", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def message_count(self):\n return len(self.messages)", "def get_tweet_count(self):\n return self.tweet_count.text", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def count(self, answer):\n self._validate(answer)\n\n return len(self.filter(answer.group_id, answer.block_id, answer.answer_id, answer.group_instance, answer.answer_instance))", "def message_count(self):\n pass", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def recv_resp(self):\n self.n_recv_resp += 1", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def _get_total_count(response_dict):\n try:\n return response_dict['total_count']\n except KeyError: # bug\n return '0'", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def __len__(self):\n return len(self.sentrep)", "def __len__(self):\n return len(self.sent)", "def get_ev_count(self, stmt):\n return self.get_ev_count_by_hash(stmt.get_hash(shallow=True))", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def get_response_record_count(self):\n if self.record_count is None:\n raise QueryNotExecuted(\"No query has been executed. Use the Execute Query keyword to retrieve records.\")\n else:\n return self.record_count", "def send_resp(self):\n self.n_send_resp += 1", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def get_status_code_count(har_json):\n entries = har_json['log']['entries']\n\n har_status_codes = Counter()\n\n for entry in entries:\n code = entry['response']['status']\n har_status_codes[code] += 1\n\n return har_status_codes", "def count(self):\n return self._lift(\"count\")", "def count(self):\n return self.ming_cursor.count()", "def count(self):\n return len(self._commands)", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def get_opinion_count(self, words):\n sentiment_counts = [0, 0]\n if self.allow_negation:\n negated_words = self.get_negated_words(words)\n else:\n negated_words = [(word.lower(), False) for word in words]\n for word, negated in negated_words:\n if word in self.mapping.keys():\n sentiment = self.mapping[word]\n if sentiment == 'positive' or (sentiment == 'negative' and negated):\n sentiment_counts[1] += 1\n elif sentiment == 'negative' or (sentiment == 'positive' and negated):\n sentiment_counts[0] += 1\n return sentiment_counts", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def count_response_codes(self, code=404):\n count = 0\n\n for i, line in enumerate(self.input):\n substrings = line.split()\n try:\n response_code = substrings[6]\n if response_code == str(code):\n count += 1\n except IndexError:\n print('Line {line_number} appears to be invalid.'.format(\n line_number=i + 1), file=sys.stderr)\n\n return count", "def response_code(self):\r\n return self._response_code", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def __extractUserClearedCountFromResponse(xml_string):\r\n #description\r\n #\r\n try:\r\n response_element = XML(xml_string)\r\n cleared_count = response_element.attrib.get(\"count\")\r\n return cleared_count\r\n except (ExpatError,):\r\n raise ESymplecticParseFileError(\"Could not extract the number of Users cleared from the XML file returned by Symplectic API\")", "def get_track_count(self):\n self.app.curs.execute('select count(*) c from track')\n if self.app.curs.rowcount == 1:\n row = self.app.curs.fetchone()\n return row['c']\n else: # pragma: no cover\n return 0", "async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)", "def get_ResponseStatusCode(self):\n return self._output.get('ResponseStatusCode', None)", "def response_code(self):\n return self._response_code", "def response_code(self):\n return self._response_code", "def count(self):\n \n return self._count", "def count(self):\n return self.properties.get('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def get_sents_length(tokenized_sents):\n return [len(sent) for sent in tokenized_sents]", "def Count(self):\n return self._get_attribute('count')", "def word_count(self):\n return Counter(self._normalize(self._raw_phrase_str))", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def GetCount(self):\n return self._server.get_count()", "def hives_count(self) -> int:\n return self.hives.count()", "def count(self):\n return len(self.read_ints())", "def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average", "def count(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n return profData.Count()\n return \"\"", "def get_count(self):\n return self._count", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def get_track_count(self) -> Optional[int]:\n return self.track_count", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def GetCount(self):\n return(self.count)", "def get_count(self, asset=None):\n if asset is None or 'pc:count' not in asset.properties:\n return self.item.properties.get('pc:count')\n else:\n return asset.properties.get('pc:count')", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0" ]
[ "0.7885906", "0.71892625", "0.7101065", "0.69950885", "0.66274446", "0.6617462", "0.635762", "0.5800941", "0.577533", "0.55431056", "0.5397536", "0.5257677", "0.5200311", "0.5128361", "0.5111158", "0.51002544", "0.50733143", "0.50320685", "0.50222456", "0.4920297", "0.4917206", "0.4911336", "0.48959243", "0.48711067", "0.48487678", "0.4768916", "0.47582203", "0.47199044", "0.47185415", "0.4690841", "0.46712413", "0.46595487", "0.4656887", "0.46562976", "0.4633357", "0.45997787", "0.45713925", "0.4570275", "0.45686164", "0.45601937", "0.45528248", "0.45437592", "0.45359662", "0.4528709", "0.45282397", "0.452688", "0.44951123", "0.44574478", "0.4446361", "0.44457996", "0.44421756", "0.44300136", "0.4424113", "0.44111162", "0.43864748", "0.43811098", "0.43777874", "0.43577647", "0.4352954", "0.43512544", "0.434111", "0.43410575", "0.43410575", "0.4338669", "0.43381804", "0.43312246", "0.43271774", "0.4320114", "0.4320114", "0.43172023", "0.43141454", "0.43103385", "0.43103385", "0.430963", "0.43043014", "0.43029535", "0.43013015", "0.43004826", "0.43003944", "0.42739147", "0.42532536", "0.4245109", "0.4242686", "0.4234182", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42329273", "0.42296302", "0.42271698", "0.422081", "0.42156014", "0.42146942", "0.42133337" ]
0.7451523
1
Setter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _set_echo_resp_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def response_count(self):\n return self.responses.count()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def sent_count(comment):\n return comment.__len__()", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def send_resp(self):\n self.n_send_resp += 1", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def recv_resp(self):\n self.n_recv_resp += 1", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def message_count(self):\n return self._message_count", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n pass", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)", "def __len__(self):\n return len(self.sentrep)", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_tweet_count(self):\n return self.tweet_count.text", "def message_count(self):\n return len(self.messages)", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def count(self, answer):\n self._validate(answer)\n\n return len(self.filter(answer.group_id, answer.block_id, answer.answer_id, answer.group_instance, answer.answer_instance))", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def test_status_reply_count(self):\n with self.app.app_context():\n u = user(save=True)\n s = status(user=u, project=None, save=True)\n for i in range(5):\n status(user=u, project=None, reply_to=s, save=True)\n\n eq_(s.reply_count, 5)", "def __len__(self):\n return len(self.sent)", "def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def get_ResponseStatusCode(self):\n return self._output.get('ResponseStatusCode', None)", "def count(self, value):\n \n self._count = int(value)", "def response_code(self):\n return self._response_code", "def response_code(self):\n return self._response_code", "def count(self):\n return self._lift(\"count\")", "def response_code(self):\r\n return self._response_code", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def get_ev_count(self, stmt):\n return self.get_ev_count_by_hash(stmt.get_hash(shallow=True))", "def _get_total_count(response_dict):\n try:\n return response_dict['total_count']\n except KeyError: # bug\n return '0'", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def count_response_codes(self, code=404):\n count = 0\n\n for i, line in enumerate(self.input):\n substrings = line.split()\n try:\n response_code = substrings[6]\n if response_code == str(code):\n count += 1\n except IndexError:\n print('Line {line_number} appears to be invalid.'.format(\n line_number=i + 1), file=sys.stderr)\n\n return count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def count(self):\n return len(self._commands)", "def word_count(excerpt):\n # Validate that we are actually give something to work with\n assert excerpt, \"excerpt cannot be blank\"\n return Counter(excerpt.split())", "def count(self, ngram, options):\n return len(self.find_docs(ngram, options))", "def addLikeCount(self,count):\n self.interactionCount += count\n return None", "def get_response_record_count(self):\n if self.record_count is None:\n raise QueryNotExecuted(\"No query has been executed. Use the Execute Query keyword to retrieve records.\")\n else:\n return self.record_count", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def hives_count(self) -> int:\n return self.hives.count()", "def count(self):\n return self.ming_cursor.count()", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def count(self):\n \n return self._count", "def get_counts(self):\n value = self.text_ctrl.GetValue()\n chars = len(value)\n words = len(re.findall('\\w+', value))\n pub.sendMessage('update_counts', chars=chars, words=words)", "def sent_len(self) -> int:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def word_count(self):\n return Counter(self._normalize(self._raw_phrase_str))", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def count(self):\n return self.properties.get('count')", "def getMessageCount(self):\n return 9", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def GetCount(self):\n return(self.count)", "def GetCount(self):\n return self._server.get_count()", "async def count(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.send_cmd_help(ctx)", "def get_status_code_count(har_json):\n entries = har_json['log']['entries']\n\n har_status_codes = Counter()\n\n for entry in entries:\n code = entry['response']['status']\n har_status_codes[code] += 1\n\n return har_status_codes", "def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average", "async def count(self, **kw):\n\n pass", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count" ]
[ "0.7872074", "0.7738624", "0.7278106", "0.7268996", "0.6715367", "0.65274954", "0.6379835", "0.60264504", "0.5693287", "0.55500114", "0.5237852", "0.515169", "0.50731426", "0.5025889", "0.49551892", "0.4954932", "0.4935209", "0.49100864", "0.4884356", "0.48487535", "0.48274243", "0.47079432", "0.46981698", "0.46899626", "0.46785292", "0.46620303", "0.46530867", "0.46495768", "0.4625437", "0.46002287", "0.45978555", "0.45971596", "0.4588342", "0.45698088", "0.45507216", "0.45440674", "0.45081034", "0.44982907", "0.4484465", "0.4477941", "0.44692248", "0.44450676", "0.4415779", "0.44028562", "0.44015333", "0.4392595", "0.43638298", "0.4359831", "0.43105707", "0.43075117", "0.43051168", "0.4304141", "0.4304141", "0.4299018", "0.42960465", "0.42886654", "0.42776546", "0.42697367", "0.4267334", "0.4264295", "0.42578346", "0.42565998", "0.42553884", "0.42393562", "0.42368624", "0.4229623", "0.42284828", "0.42220232", "0.42216045", "0.42216045", "0.4217279", "0.421695", "0.42102578", "0.42101106", "0.42049733", "0.42049733", "0.42049733", "0.420318", "0.42011917", "0.4199999", "0.41957164", "0.41956493", "0.41903818", "0.41783422", "0.417307", "0.41644195", "0.41350594", "0.4129986", "0.41263467", "0.41218325", "0.41190213", "0.4116661", "0.4108597", "0.4108597", "0.4108597", "0.4108597", "0.4108597", "0.4108597", "0.4108597", "0.4108597" ]
0.8611856
0
Getter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _get_echo_resp_received_count(self): return self.__echo_resp_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def response_count(self):\n return self.responses.count()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def recv_resp(self):\n self.n_recv_resp += 1", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def message_count(self):\n return self._message_count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def receive_response(self):\n return self.socket.receive()", "def message_count(self):\n return len(self.messages)", "def message_count(self):\n pass", "def get_message_length(self):\n return len(self._payload)", "def get_response_record_count(self):\n if self.record_count is None:\n raise QueryNotExecuted(\"No query has been executed. Use the Execute Query keyword to retrieve records.\")\n else:\n return self.record_count", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def get_response(self):\n\n response = self.socket.recv(1024)\n code = response.split(\" \")[0]\n message = response[4:]\n\n return int(code), message", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def _get_total_count(response_dict):\n try:\n return response_dict['total_count']\n except KeyError: # bug\n return '0'", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)", "def _parse_release_count(self, resp: Dict[str, Any]) -> str:\n return f\"{len(resp.get('releases', []))}\"", "def getLength(msg):\n return len(msg)", "def count(self, answer):\n self._validate(answer)\n\n return len(self.filter(answer.group_id, answer.block_id, answer.answer_id, answer.group_instance, answer.answer_instance))", "def count(self):\n return len(self.read_ints())", "def stored_cookie_messages_count(storage, response):\n # Get a list of cookies, excluding ones with a max-age of 0 (because\n # they have been marked for deletion).\n cookie = response.cookies.get(storage.cookie_name)\n if not cookie or cookie['max-age'] == 0:\n return 0\n data = storage._decode(cookie.value)\n if not data:\n return 0\n return len(data)", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def response_received(self, ignored):\n self._received += 1", "def _read_len(self):\r\n read = self.socket.recv(4 - len(self.message))\r\n if len(read) == 0:\r\n # if we read 0 bytes and self.message is empty, it means client close \r\n # connection\r\n if len(self.message) != 0:\r\n logging.error(\"can't read frame size from socket\")\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == 4:\r\n self.len, = struct.unpack('!i', self.message)\r\n if self.len < 0:\r\n logging.error(\"negative frame size, it seems client\"\\\r\n \" doesn't use FramedTransport\")\r\n self.close()\r\n elif self.len == 0:\r\n logging.error(\"empty frame, it's really strange\")\r\n self.close()\r\n else:\r\n self.message = ''\r\n self.status = WAIT_MESSAGE", "def get_number_of_reactions(self):\n return self._performed_actions[REACT]", "def recv_resp_simple(self, bytes=1024):\n mess = self.sock.recv(bytes)\n # print('Got message of length: %i byte(s)' % len(mess))\n return mess", "def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def handle_echo(self, event):\n print('Echo received')\n return 0x0000", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def read_count(self):\n return self._read_count", "def __extractUserClearedCountFromResponse(xml_string):\r\n #description\r\n #\r\n try:\r\n response_element = XML(xml_string)\r\n cleared_count = response_element.attrib.get(\"count\")\r\n return cleared_count\r\n except (ExpatError,):\r\n raise ESymplecticParseFileError(\"Could not extract the number of Users cleared from the XML file returned by Symplectic API\")", "def _predictResponseSize(mode, functioncode, payloadToSubordinate):\n MIN_PAYLOAD_LENGTH = 4 # For implemented functioncodes here\n BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4) # Within the payload\n\n NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4\n NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1\n\n RTU_TO_ASCII_PAYLOAD_FACTOR = 2\n\n NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2\n NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2\n NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5\n NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4\n\n # Argument validity testing\n _checkMode(mode)\n _checkFunctioncode(functioncode, None)\n _checkString(payloadToSubordinate, description='payload', minlength=MIN_PAYLOAD_LENGTH)\n\n # Calculate payload size\n if functioncode in [5, 6, 15, 16]:\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION\n\n elif functioncode in [1, 2, 3, 4]:\n given_size = _twoByteStringToNum(payloadToSubordinate[BYTERANGE_FOR_GIVEN_SIZE])\n if functioncode == 1 or functioncode == 2:\n # Algorithm from MODBUS APPLICATION PROTOCOL SPECIFICATION V1.1b\n number_of_inputs = given_size\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n number_of_inputs // 8 + (1 if number_of_inputs % 8 else 0)\n\n elif functioncode == 3 or functioncode == 4:\n number_of_registers = given_size\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER\n\n else:\n raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format( \\\n functioncode, payloadToSubordinate))\n\n # Calculate number of bytes to read\n if mode == MODE_ASCII:\n return NUMBER_OF_ASCII_RESPONSE_STARTBYTES + \\\n response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR + \\\n NUMBER_OF_ASCII_RESPONSE_ENDBYTES\n else:\n return NUMBER_OF_RTU_RESPONSE_STARTBYTES + \\\n response_payload_size + \\\n NUMBER_OF_RTU_RESPONSE_ENDBYTES", "def get_status_code_count(har_json):\n entries = har_json['log']['entries']\n\n har_status_codes = Counter()\n\n for entry in entries:\n code = entry['response']['status']\n har_status_codes[code] += 1\n\n return har_status_codes", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_order_detail_count(orderid): \n data = order_obj.get_order_detail(orderid,\"1\")\n return data", "def receive_message(self):\n num_bytes = ReadMessage(self.socket.recv(4)).read_uint32()\n\n if num_bytes > self.MAX_MESSAGE_LENGTH:\n raise ValueError('Message from agent is too long ({} bytes)'.format(num_bytes))\n\n return self.socket.recv(num_bytes)", "def count_response_codes(self, code=404):\n count = 0\n\n for i, line in enumerate(self.input):\n substrings = line.split()\n try:\n response_code = substrings[6]\n if response_code == str(code):\n count += 1\n except IndexError:\n print('Line {line_number} appears to be invalid.'.format(\n line_number=i + 1), file=sys.stderr)\n\n return count", "def get_tweet_count(self):\n return self.tweet_count.text", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def getEchoReturnLoss(self, channel, unitCode=0):\n resp = self.XAPCommand('ERL', channel, unitCode=unitCode)\n return int(resp)", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def message_length(self):\n return self._message_length", "def GetCount(self):\n return self._server.get_count()", "def github_num_reactions(comment_object: Dict[str, Any]) -> int:\n return comment_object.get('reactions', {}).get('total_count', 0)", "def _get_event_history_count(device_event_file_path, event_label, timeout=10.0):\n result = 0\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n grep_cmd = [\n \"timeout\", timeout_str, \"grep\", \"-c\", \"-w\", event_label,\n device_event_file_path\n ]\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n\n if out:\n result = int(out.strip())\n\n return result, timedout", "def winhttp_WinHttpReceiveResponse(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpReserved\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def getMessageCount(self):\n return 9", "def get_error_number(message, received):\n stripped_message = message.replace(\" \", \"\")\n stripped_message = stripped_message.strip()\n errors = 0\n for idx in range(len(received)):\n if stripped_message[idx] != received[idx]:\n errors += 1\n return errors", "def total_oros(self):\n return len(self._cards[\"oro\"])", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def get_ResponseStatusCode(self):\n return self._output.get('ResponseStatusCode', None)", "def vscr_ratchet_group_session_decrypt_len(self, ctx, message):\n vscr_ratchet_group_session_decrypt_len = self._lib.vscr_ratchet_group_session_decrypt_len\n vscr_ratchet_group_session_decrypt_len.argtypes = [POINTER(vscr_ratchet_group_session_t), POINTER(vscr_ratchet_group_message_t)]\n vscr_ratchet_group_session_decrypt_len.restype = c_size_t\n return vscr_ratchet_group_session_decrypt_len(ctx, message)", "def response_code(self):\r\n return self._response_code", "def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def print_ofpt_echo_reply(msg):\n if len(msg.data.value) > 0:\n hexdump(msg.data.value)", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetCount(self, label)", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetCount(self, label)", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def _get_count(awsclient, function_name, alias_name='ACTIVE', version=None):\n client_lambda = awsclient.get_client('lambda')\n payload = '{\"ramuda_action\": \"count\"}'\n\n if version:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=version\n )\n else:\n response = client_lambda.invoke(\n FunctionName=function_name,\n InvocationType='RequestResponse',\n Payload=payload,\n Qualifier=alias_name\n )\n\n # print type(response['Payload'])\n results = response['Payload'].read() # payload is a 'StreamingBody'\n return results", "def get_length(self):\r\n return len(self.tweets)", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def determine_length_of_json_msg(self, message_bytes):\n # All messages must be written in utf-8\n message = message_bytes.decode('utf-8')\n # Check that the message we have been given looks like a valid length header\n if \",\" not in message:\n raise InvalidLengthHeader(message)\n length_portion = message.split(\",\")[0]\n left_bracket = length_portion[0] == \"[\"\n number_before_comma = length_portion[-1] in \"1234567890\"\n if left_bracket and number_before_comma:\n for character in enumerate(length_portion):\n if character[1] not in \"[ \\n\\t\\r1234567890,\":\n raise InvalidLengthHeader(length_portion)\n elif character[1] in \"1234567890\":\n length_start = character[0]\n return int(length_portion[length_start:])\n elif left_bracket:\n raise InvalidLengthHeader(length_portion)\n else:\n raise MissingLengthHeader(length_portion)\n return False", "def response_code(self):\n return self._response_code", "def response_code(self):\n return self._response_code", "def receive_response(self, private_key, responder_id, msg_tag, response):\n return self._handle_response(private_key, responder_id, msg_tag, response)", "def recv_open_response(self, recv_payload):\n\n\tunpacked_payload = struct.unpack(\"!?Q2I\", recv_payload)\n # Read status field. If set to False, ignore remaining fields and \n\t# generate error msg (file not found) before exiting. \n\t# Each unpacked value is a tuple, so [0] accesses the value that we want\n\tstatus = unpacked_payload[0:1][0]\n\tif status == False:\n\t print \"Error: File not found.\"\n\t sys.exit()\n\t\n\t#If set to True, read remaining fields.\n\telif status == True:\n\t print(\"File found.\")\n\t self.file_length = unpacked_payload[1:2][0]\n\t self.epoch_no = unpacked_payload[2:3][0]\n\t self.handle_no = unpacked_payload[3:][0]\t \t \n\treturn", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def action_count(raw_val):\n\n if isinstance(raw_val, list):\n return len(raw_val)\n else:\n return None", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average", "def count_by_code(_input, code):\n log_parser = ApacheLogParser(_input)\n count = log_parser.count_response_codes(code)\n return count" ]
[ "0.785285", "0.70278424", "0.6552268", "0.64812785", "0.6124184", "0.55265635", "0.5396158", "0.5115494", "0.5112692", "0.50826174", "0.49409315", "0.4853131", "0.4822724", "0.47961304", "0.4737998", "0.47299495", "0.47204226", "0.46952853", "0.46684587", "0.46589735", "0.46527314", "0.46346018", "0.4572006", "0.45142123", "0.4505391", "0.44645974", "0.44538108", "0.4426214", "0.4396354", "0.43819296", "0.43710145", "0.4369689", "0.4363353", "0.43603498", "0.43567297", "0.4352795", "0.4345718", "0.4332374", "0.43296617", "0.43250796", "0.43199265", "0.42848757", "0.42584804", "0.4258398", "0.4233473", "0.4224732", "0.4213862", "0.42077348", "0.42065427", "0.42022648", "0.42018571", "0.41968924", "0.41920358", "0.41896603", "0.4183842", "0.41759735", "0.41683987", "0.41644406", "0.4162215", "0.41573018", "0.41559672", "0.41541892", "0.414914", "0.4145542", "0.4145246", "0.41449335", "0.41375315", "0.4102662", "0.409401", "0.40930474", "0.40733162", "0.40710917", "0.4068345", "0.40608406", "0.40517738", "0.40473324", "0.40471143", "0.4036198", "0.40345815", "0.40322578", "0.40238556", "0.40217012", "0.40215647", "0.40215608", "0.40123233", "0.4011308", "0.4009319", "0.400842", "0.4006448", "0.40023297", "0.3999971", "0.3999971", "0.399549", "0.3990309", "0.39879942", "0.39879942", "0.398744", "0.39862323", "0.3984972", "0.39797848" ]
0.731385
1
Setter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _set_echo_resp_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def recv_resp(self):\n self.n_recv_resp += 1", "def response_count(self):\n return self.responses.count()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def get_message_count(self):\n return self.buffer.count()", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def message_count(self):\n return self._message_count", "def message_count(self):\n pass", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def response_received(self, ignored):\n self._received += 1", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def _parse_release_count(self, resp: Dict[str, Any]) -> str:\n return f\"{len(resp.get('releases', []))}\"", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def message_count(self):\n return len(self.messages)", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def get_response_record_count(self):\n if self.record_count is None:\n raise QueryNotExecuted(\"No query has been executed. Use the Execute Query keyword to retrieve records.\")\n else:\n return self.record_count", "def test_status_reply_count(self):\n with self.app.app_context():\n u = user(save=True)\n s = status(user=u, project=None, save=True)\n for i in range(5):\n status(user=u, project=None, reply_to=s, save=True)\n\n eq_(s.reply_count, 5)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def getEchoReturnLoss(self, channel, unitCode=0):\n resp = self.XAPCommand('ERL', channel, unitCode=unitCode)\n return int(resp)", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def _get_total_count(response_dict):\n try:\n return response_dict['total_count']\n except KeyError: # bug\n return '0'", "def receive_response(self):\n return self.socket.receive()", "def on_response(self, ch, method, props, body):\n if self.corr_id == props.correlation_id:\n self.response = body", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def count_response_codes(self, code=404):\n count = 0\n\n for i, line in enumerate(self.input):\n substrings = line.split()\n try:\n response_code = substrings[6]\n if response_code == str(code):\n count += 1\n except IndexError:\n print('Line {line_number} appears to be invalid.'.format(\n line_number=i + 1), file=sys.stderr)\n\n return count", "def _read_len(self):\n\n read = self.socket.recv(4)\n if len(read) == 0:\n # if we read 0 bytes and self.message is empty, it means client\n # closed the connection\n if len(self.message) != 0:\n logging.error(\"can't read frame size from socket\")\n self.close()\n return\n self.message += read\n if len(self.message) == 4:\n self.len, = struct.unpack(b'!i', self.message)\n if self.len < 0:\n logging.error(\"negative frame size, it seems client\"\\\n \" doesn't use FramedTransport\")\n self.close()\n elif self.len == 0:\n logging.error(\"empty frame, it's really strange\")\n self.close()\n else:\n self.len += 4 # Include message length\n self._set_status(WAIT_MESSAGE)", "def count(self):\n return len(self.read_ints())", "def handle_echo(self, event):\n print('Echo received')\n return 0x0000", "def stored_cookie_messages_count(storage, response):\n # Get a list of cookies, excluding ones with a max-age of 0 (because\n # they have been marked for deletion).\n cookie = response.cookies.get(storage.cookie_name)\n if not cookie or cookie['max-age'] == 0:\n return 0\n data = storage._decode(cookie.value)\n if not data:\n return 0\n return len(data)", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def getMessageCount(self):\n return 9", "def read_count(self, read_count):\n\n self._read_count = read_count", "def get_ResponseStatusCode(self):\n return self._output.get('ResponseStatusCode', None)", "def get_message_length(self):\n return len(self._payload)", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def winhttp_WinHttpReceiveResponse(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpReserved\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def count(self, answer):\n self._validate(answer)\n\n return len(self.filter(answer.group_id, answer.block_id, answer.answer_id, answer.group_instance, answer.answer_instance))", "def _predictResponseSize(mode, functioncode, payloadToSubordinate):\n MIN_PAYLOAD_LENGTH = 4 # For implemented functioncodes here\n BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4) # Within the payload\n\n NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4\n NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1\n\n RTU_TO_ASCII_PAYLOAD_FACTOR = 2\n\n NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2\n NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2\n NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5\n NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4\n\n # Argument validity testing\n _checkMode(mode)\n _checkFunctioncode(functioncode, None)\n _checkString(payloadToSubordinate, description='payload', minlength=MIN_PAYLOAD_LENGTH)\n\n # Calculate payload size\n if functioncode in [5, 6, 15, 16]:\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION\n\n elif functioncode in [1, 2, 3, 4]:\n given_size = _twoByteStringToNum(payloadToSubordinate[BYTERANGE_FOR_GIVEN_SIZE])\n if functioncode == 1 or functioncode == 2:\n # Algorithm from MODBUS APPLICATION PROTOCOL SPECIFICATION V1.1b\n number_of_inputs = given_size\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n number_of_inputs // 8 + (1 if number_of_inputs % 8 else 0)\n\n elif functioncode == 3 or functioncode == 4:\n number_of_registers = given_size\n response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER\n\n else:\n raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format( \\\n functioncode, payloadToSubordinate))\n\n # Calculate number of bytes to read\n if mode == MODE_ASCII:\n return NUMBER_OF_ASCII_RESPONSE_STARTBYTES + \\\n response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR + \\\n NUMBER_OF_ASCII_RESPONSE_ENDBYTES\n else:\n return NUMBER_OF_RTU_RESPONSE_STARTBYTES + \\\n response_payload_size + \\\n NUMBER_OF_RTU_RESPONSE_ENDBYTES", "def get_tweet_count(self):\n return self.tweet_count.text", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def __extractUserClearedCountFromResponse(xml_string):\r\n #description\r\n #\r\n try:\r\n response_element = XML(xml_string)\r\n cleared_count = response_element.attrib.get(\"count\")\r\n return cleared_count\r\n except (ExpatError,):\r\n raise ESymplecticParseFileError(\"Could not extract the number of Users cleared from the XML file returned by Symplectic API\")", "def response_code(self):\r\n return self._response_code", "def vscr_ratchet_group_session_decrypt_len(self, ctx, message):\n vscr_ratchet_group_session_decrypt_len = self._lib.vscr_ratchet_group_session_decrypt_len\n vscr_ratchet_group_session_decrypt_len.argtypes = [POINTER(vscr_ratchet_group_session_t), POINTER(vscr_ratchet_group_message_t)]\n vscr_ratchet_group_session_decrypt_len.restype = c_size_t\n return vscr_ratchet_group_session_decrypt_len(ctx, message)", "def response_code(self):\n return self._response_code", "def response_code(self):\n return self._response_code", "def receive_response(self, private_key, responder_id, msg_tag, response):\n return self._handle_response(private_key, responder_id, msg_tag, response)", "def read_count(self):\n return self._read_count", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def _read_len(self):\r\n read = self.socket.recv(4 - len(self.message))\r\n if len(read) == 0:\r\n # if we read 0 bytes and self.message is empty, it means client close \r\n # connection\r\n if len(self.message) != 0:\r\n logging.error(\"can't read frame size from socket\")\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == 4:\r\n self.len, = struct.unpack('!i', self.message)\r\n if self.len < 0:\r\n logging.error(\"negative frame size, it seems client\"\\\r\n \" doesn't use FramedTransport\")\r\n self.close()\r\n elif self.len == 0:\r\n logging.error(\"empty frame, it's really strange\")\r\n self.close()\r\n else:\r\n self.message = ''\r\n self.status = WAIT_MESSAGE", "def get_response(self):\n\n response = self.socket.recv(1024)\n code = response.split(\" \")[0]\n message = response[4:]\n\n return int(code), message", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def GetCount(self):\n return self._server.get_count()", "def get_status_code_count(har_json):\n entries = har_json['log']['entries']\n\n har_status_codes = Counter()\n\n for entry in entries:\n code = entry['response']['status']\n har_status_codes[code] += 1\n\n return har_status_codes", "def get_number_of_reactions(self):\n return self._performed_actions[REACT]", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def _read_length(self):\n msg_length = struct.unpack('!I', self.received_data[0])[0]\n self.l.debug('msg_length = %d', msg_length)\n self.set_terminator(msg_length)\n self.process_data = self._read_message\n self.received_data = []", "def github_num_reactions(comment_object: Dict[str, Any]) -> int:\n return comment_object.get('reactions', {}).get('total_count', 0)", "def send_resp(self):\n self.n_send_resp += 1", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def count(self, value):\n \n self._count = int(value)", "async def quote_count(self):\n await self.bot.type()\n result = self.count()\n await self.bot.say(result)", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def recv_resp_simple(self, bytes=1024):\n mess = self.sock.recv(bytes)\n # print('Got message of length: %i byte(s)' % len(mess))\n return mess", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def assertResponse(self, respData, count = 4, errCode = testLib.RestTestCase.SUCCESS):\n expected = { 'errCode' : errCode }\n if count is not None:\n expected['count'] = count\n self.assertDictEqual(expected, respData)", "def sent_count(comment):\n return comment.__len__()", "def hives_count(self) -> int:\n return self.hives.count()", "def assertResponse(self, respData, count = 2, errCode = testLib.RestTestCase.SUCCESS):\n expected = { 'errCode' : errCode }\n if count is not None:\n expected['count'] = count\n self.assertDictEqual(expected, respData)", "def average_num_reply():\n post_count = Post.select().count()\n reply_count = Comment.select().count()\n if post_count == 0:\n return 0\n else:\n average = round(reply_count / post_count, 2)\n return average", "def message_length(self):\n return self._message_length", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def message_count(self) -> int:\n return len(self._leased_messages)", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def get_replies_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n select_fields = ('messages.count(*)', 'topics.count(*)')\n api_response = query_users_table_by_id(khoros_object, select_fields, user_settings['id'])\n items_list = api.get_items_list(api_response)\n return int(items_list['messages']['count']) - int(items_list['topics']['count'])", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")" ]
[ "0.76704717", "0.73230594", "0.70944583", "0.6276271", "0.618727", "0.6063231", "0.60443354", "0.51781255", "0.5142111", "0.50479865", "0.49492952", "0.48183542", "0.47569573", "0.47515997", "0.47491166", "0.4727505", "0.46277964", "0.45390615", "0.45219365", "0.44867164", "0.44473416", "0.44427106", "0.44093305", "0.4398009", "0.43944886", "0.43344253", "0.4282784", "0.42712533", "0.42634273", "0.42538837", "0.4242837", "0.42322007", "0.42056054", "0.42050803", "0.41779009", "0.41732848", "0.41665667", "0.41634387", "0.41631567", "0.41572818", "0.41323346", "0.41308454", "0.41258696", "0.41208386", "0.4110022", "0.40890378", "0.40887335", "0.40849885", "0.40786585", "0.40729177", "0.40727168", "0.4070343", "0.40697238", "0.40620404", "0.40603933", "0.40596434", "0.40342557", "0.40300953", "0.4022698", "0.40196094", "0.40164983", "0.4012292", "0.4012292", "0.40047702", "0.40046024", "0.39968973", "0.39840108", "0.39790455", "0.3978926", "0.39768472", "0.39754358", "0.39735314", "0.3972048", "0.39717114", "0.39579454", "0.3957752", "0.3946818", "0.3946661", "0.39455384", "0.39445093", "0.3936046", "0.39314294", "0.39285257", "0.39229125", "0.39176235", "0.39069358", "0.39030313", "0.39016584", "0.38987744", "0.3897354", "0.38962752", "0.38898087", "0.38869506", "0.38866678", "0.38851678", "0.38841453", "0.38716", "0.38624963", "0.38576618", "0.38576618" ]
0.8556093
0
Getter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _get_return_codes(self): return self.__return_codes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_return_codes(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"number\",return_codes.return_codes, yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"return_codes must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"number\",return_codes.return_codes, yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\"\"\",\n })\n\n self.__return_codes = t\n if hasattr(self, '_set'):\n self._set()", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def returncode(self: \"ShellOutput\") -> Artefact[int]:\n self.__check_len()\n return self.returncodes[0]", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def returncodes(self):\n for p in self.processes:\n p.wait()\n codes = [p.poll() for p in self.processes]\n if set(codes) == set([0]):\n return []\n return codes", "def get_observatory_codes_async(self, get_raw_response=False, cache=True):\n\n self.query_type = 'observatory_code'\n response = self._request('GET', self.OBSERVATORY_CODES_URL,\n timeout=self.TIMEOUT, cache=cache)\n\n return response", "def codes(self):\n return [card.code for card in self.cards]", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def is_return_code_mode(self):\n return self.bisect_config.get('test_type') == 'return_code'", "def get_lock_codes(device: Device) -> Sequence[str]:\n try:\n codes_str = cast(str, device.attributes[ATTR_LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []", "def _get_module_return_code(self, status, module):\n\n # initialize return code array\n arr = []\n check_failed = False\n\n if module not in status.data:\n # assume running\n arr = [1]\n else:\n for job_name in status.data[module].keys():\n if job_name != 'pipeline_index':\n\n # update the job status and get the status string\n status._update_job_status(module, job_name)\n js = status.data[module][job_name]['job_status']\n\n if js == 'successful':\n arr.append(0)\n elif js == 'failed':\n arr.append(2)\n check_failed = True\n elif js is None:\n arr.append(3)\n else:\n arr.append(1)\n\n status._dump()\n\n return_code = self._parse_code_array(arr)\n\n status = self.RETURN_CODES[return_code]\n fail_str = ''\n if check_failed and status != 'failed':\n fail_str = ', but some jobs have failed'\n logger.info('Module \"{}\" for job \"{}\" is {}{}.'\n .format(module, self._config.name, status, fail_str))\n\n return return_code", "def get_code(self, obj):\n return [], []", "def code_types(self):\n return self.codes.keys()", "def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")", "def pin_code(self) -> List[PinCodeSummary]:\n return self._pin_code", "def geneSymbols(self, returnType=\"list\"):\n\t\treturn self._dataframe['GeneSymbol'].to_dict() if returnType==\"dict\" else self._dataframe['GeneSymbol'].tolist()", "def set_retcodes(self,rc1,rc2) :\n\t\tif self.save_trace : \n\t\t\tself.rc1.append(rc1)\n\t\t\tself.rc2.append(rc2)\n\t\telse : \n\t\t\t# Save the return codes from the last iter\n\t\t\tself.rc1 = [rc1]\n\t\t\tself.rc2 = [rc2]", "def ReturnCodeObject(self,code):\n\n if code in self.setOfUnusableCodes:\n return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]\n else:\n return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]", "def health_check_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"health_check_codes\")", "def get_opcodes(self, script, verify_minimal_data=False, pc=0):\n while pc < len(script):\n opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(\n script, pc, verify_minimal_data=verify_minimal_data)\n yield opcode, data, pc, new_pc\n pc = new_pc", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def code(self) -> pulumi.Input['CanaryCodeArgs']:\n return pulumi.get(self, \"code\")", "def view_promo_codes():\n\n results = []\n promo_codes = Promo_code.query.filter_by().all()\n for codes in promo_codes:\n result = {\n 'id': codes.id,\n 'code': codes.code,\n 'event': codes.event,\n 'status': codes.status,\n 'price': codes.price\n }\n results.append(result)\n if datetime.utcnow() > codes.expiry_date:\n codes.status = 'expired'\n if len(results) > 0:\n return jsonify({'promo_codes': results,\n 'count': str(len(results)),\n 'status': 'pass',\n 'message': 'promo codes found'\n }), 200\n return jsonify({'count': '0','status': 'fail',\n 'message': 'no promo codes found'\n }), 404", "def codelists():\n return CodelistSet()", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def _get_indexes(self):\n\n code_indexes = []\n for match in self.parser.ansi_regex.finditer(self._raw_string):\n code_indexes.extend(list(range(match.start(), match.end())))\n if not code_indexes:\n # Plain string, no ANSI codes.\n return code_indexes, list(range(0, len(self._raw_string)))\n # all indexes not occupied by ansi codes are normal characters\n char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]\n return code_indexes, char_indexes", "def return_to_caller(self):\n value = bytearray()\n value.append(0xc3) # ret instruction\n return value", "def traffic_statuscodes(self, **kwargs):\n self.logger.debug(f\"Get status codes report data,)\")\n url_path = 'traffic/statuscodes'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def _check_return(\n ret_type: None | IntType | ReferenceType, returned: Sequence[BitString]\n) -> None:\n if len(returned) > 1:\n raise ValueError(\"code block returns multiple values\")\n\n if ret_type is None:\n if len(returned) != 0:\n raise ValueError(\n 'function has no return type, but its code block defines \"ret\"'\n )\n elif isinstance(ret_type, ReferenceType):\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not define \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )\n else: # returns a value\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not assign to \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )", "def get_ret_code(self):\n\t\treturn call_sdk_function('PrlJob_GetRetCode', self.handle)", "def return_code(self):\n return self._failures", "def list():\n\n return cache.codeTableList()", "def parse_files_to_codes_mapping( # noqa: C901\n value_: Sequence[str] | str,\n) -> list[tuple[str, list[str]]]:\n if not isinstance(value_, str):\n value = \"\\n\".join(value_)\n else:\n value = value_\n\n ret: list[tuple[str, list[str]]] = []\n if not value.strip():\n return ret\n\n class State:\n seen_sep = True\n seen_colon = False\n filenames: list[str] = []\n codes: list[str] = []\n\n def _reset() -> None:\n if State.codes:\n for filename in State.filenames:\n ret.append((filename, State.codes))\n State.seen_sep = True\n State.seen_colon = False\n State.filenames = []\n State.codes = []\n\n def _unexpected_token() -> exceptions.ExecutionError:\n return exceptions.ExecutionError(\n f\"Expected `per-file-ignores` to be a mapping from file exclude \"\n f\"patterns to ignore codes.\\n\\n\"\n f\"Configured `per-file-ignores` setting:\\n\\n\"\n f\"{textwrap.indent(value.strip(), ' ')}\"\n )\n\n for token in _tokenize_files_to_codes_mapping(value):\n # legal in any state: separator sets the sep bit\n if token.tp in {_COMMA, _WS}:\n State.seen_sep = True\n # looking for filenames\n elif not State.seen_colon:\n if token.tp == _COLON:\n State.seen_colon = True\n State.seen_sep = True\n elif State.seen_sep and token.tp == _FILE:\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n # looking for codes\n else:\n if token.tp == _EOF:\n _reset()\n elif State.seen_sep and token.tp == _CODE:\n State.codes.append(token.src)\n State.seen_sep = False\n elif State.seen_sep and token.tp == _FILE:\n _reset()\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n\n return ret", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def result_code(self):\n return self._result_code", "def get_fire_centre_station_codes() -> List[int]:\n station_codes = []\n with get_read_session_scope() as session:\n station_query = get_all_stations(session)\n for station in station_query:\n if isinstance(station, dict):\n station_codes.append(int(station['station_code']))\n else:\n station_codes.append(int(station._mapping['station_code']))\n\n return station_codes", "def from_trace_codes_file(path: str) -> Mapping[int, str]:\n with open(path, 'r') as fd:\n return from_trace_codes_text(fd.read())", "def _parseOpreturnToChunks(\n script: bytes, *, allow_op_0: bool, allow_op_number: bool\n ) -> List[bytes]:\n try:\n ops = address.Script.get_ops(script)\n except address.ScriptError as e:\n raise OpreturnError(\"Script error\") from e\n\n if ops[0][0] != address.OpCodes.OP_RETURN:\n raise OpreturnError(\"No OP_RETURN\")\n\n chunks = []\n for opitem in ops[1:]:\n op, data = opitem if isinstance(opitem, tuple) else (opitem, None)\n if op > address.OpCodes.OP_16:\n raise OpreturnError(\"Non-push opcode\")\n if op > address.OpCodes.OP_PUSHDATA4:\n if op == 80:\n raise OpreturnError(\"Non-push opcode\")\n if not allow_op_number:\n raise OpreturnError(\"OP_1NEGATE to OP_16 not allowed\")\n if op == address.OpCodes.OP_1NEGATE:\n data = [0x81]\n else: # OP_1 - OP_16\n data = [op - 80]\n if op == address.OpCodes.OP_0 and not allow_op_0:\n raise OpreturnError(\"OP_0 not allowed\")\n chunks.append(b\"\" if data is None else bytes(data))\n return chunks", "def retcode(self):\n if not self.result:\n return None\n return self.result.retcode", "def barcodes(file_path=None):\n try:\n barcodes = decode(Image.open(file_path))\n barcodes_list = []\n if barcodes:\n # reformat into a list\n for barcode in barcodes:\n symbology_type = str(barcode.type)\n data = barcode.data.decode('UTF-8')\n barcodes_list.append({'type': symbology_type, 'data': data})\n else:\n UTILITIES_LOGGER.info('No barcodes found in file: ' + file_path)\n return None\n return barcodes_list\n except OSError as e:\n print('ERROR: unable to read file. errno: ' + str(e.errno) + ' filename: ' + str(e.filename) + ' strerror: ' + str(e.strerror))\n UTILITIES_LOGGER.exception('OSError')\n return None", "def get_retcode(self):\n return self._retcode", "def opcode_list(self, script):\n opcodes = []\n new_pc = 0\n try:\n for opcode, data, pc, new_pc in self.get_opcodes(script):\n opcodes.append(self.disassemble_for_opcode_data(opcode, data))\n except ScriptError:\n opcodes.append(binascii.hexlify(script[new_pc:]).decode(\"utf8\"))\n\n return opcodes", "def getCommandcode(self):\n commandcode = \"RETURN ({})\".format(self.value) # commandcode\n return commandcode", "def _list_output_regressor_code(self, model_code):\n regressor_code = [\n code for code in model_code.ravel() if (code != 0) and (str(code)[0] == \"1\")\n ]\n\n return np.asarray(regressor_code)", "def code(self):\n return self._code", "def code(self):\n return self._code", "def code(self):\n return self._code", "def code(self):\n return self._code", "def code(self):\n return self._data", "def fallback_status_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"fallback_status_codes\")", "def code(self) -> pulumi.Output['outputs.CanaryCode']:\n return pulumi.get(self, \"code\")", "def default_trace_codes() -> Mapping[int, str]:\n with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd:\n return from_trace_codes_text(fd.read())", "def read_asm_file_to_code(file_path):\r\n with open(file_path, 'r') as file:\r\n asm_code = []\r\n for line in file:\r\n asm_code.append(line)\r\n\r\n return asm_code", "def return_ids(self, return_ids):\n\n self._return_ids = return_ids", "def get_values(self, code_block):\r\n pos_mode, imm_mode = 0, 1\r\n x, values = 1, []\r\n modes = self.get_modes(code_block)\r\n for mode in modes:\r\n if mode == pos_mode:\r\n values.append(int(self.codes[code_block[x]]))\r\n elif mode == imm_mode:\r\n values.append(int(code_block[x]))\r\n else: print('Error: Not a valid mode.')\r\n x += 1\r\n print('Get values: ')\r\n print(values)\r\n return values", "def _add_status_code(runner, return_value):\n if isinstance(return_value, Mapping):\n status_code = return_value.get('statusCode')\n if status_code:\n runner.resource['metadata']['status_code'] = status_code", "def exit_code(self) -> int:\n _args: list[Arg] = []\n _ctx = self._select(\"exitCode\", _args)\n return _ctx.execute_sync(int)", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code", "def fips_codes(self):\n return self._fips_codes", "def fips_codes(self):\n return self._fips_codes", "def ReturnCode(rc):\r\n return _hiew.ReturnCode(rc)", "def custom_block_response_status_code(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def servo_codes(self):\r\n return self._arm.servo_codes", "def coding_receptors(self):\n return self.parameters['coding_receptors']", "def get_code(self) -> List[str]:\n if self.__prefix__ == \"\":\n out = []\n else:\n out = [self.__prefix__]\n\n if self.__spacing__ == \"\":\n return out + self.__code_block__\n\n for line in self.__code_block__:\n out.append(self.__spacing__ + line)\n return out", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def code(action_sequence):\r\n # refuse any invalid action :\r\n if set(action_sequence) - set(CODE_MAP): # some action was not in the known ones.\r\n return '0' # per spec (test_unknown_action)\r\n\r\n mapdict = dict(zip(CODE_MAP, (1, 2, 4, 8)))\r\n da_code = [mapdict[action] for action in action_sequence]\r\n if sorted(da_code) != da_code: # list is not sorted : assume reversed\r\n da_code.append(16)\r\n return format(sum(da_code), 'b') # sum to integer, binstring, and return", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def code(self):\n return self._getCode()", "def returncode(self):\n return self._proc.returncode", "def encodings(self, code):\n return (self.encoding(code), )", "def getLastCode(self):\r\n codePtr = (c_ubyte * IR_MAX_CODE_DATA_LENGTH)(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)\r\n dataLength = c_int(IR_MAX_CODE_DATA_LENGTH)\r\n bitCount = c_int()\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_getLastCode(self.handle, codePtr, byref(dataLength), byref(bitCount))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n else:\r\n code = IRCode(codePtr, bitCount.value)\r\n return code", "def ancestor_codes(self):\n codes = [self.code]\n for parent_node in self.parents:\n codes += parent_node.ancestor_codes()\n return codes", "def postal_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"postal_codes\")", "def _seq(codes, seq_file):\n \n seq_temp = 'oma_temporary_sequences.fasta'\n if os.path.isfile(seq_temp):\n info('Indexing pre-existed temporary protein sequences ('\n 'oma_temporary_sequences.fasta) ... ')\n seqs = SeqIO.index(seq_temp, 'fasta')\n else:\n info('Parsing OMA protein sequences (oma-seqs.fa.gz) ... ')\n handle = gzip.open(seq_file, 'rt') if _gz(seq_file) else open(seq_file)\n records = SeqIO.parse(handle, 'fasta')\n seqs = {record.id: record for record in records if\n record.id[:5] in codes}\n SeqIO.write(seqs.values(), seq_temp, 'fasta')\n handle.close()\n return seqs", "def setExecutionStatus(self, return_code):\n if return_code == 0:\n self.execution_status = 'executed'\n else:\n self.execution_status = 'failed'", "def get_codes(path):\n hospital_codes = {}\n with open(path, encoding='utf8') as f:\n for line in f:\n val, key = line.split(\",\")\n hospital_codes[int(key)] = val\n return hospital_codes", "def calculate_vol_adjusted_returns(self, returns_df, br, returns = True):\n\n tsc = TimeSeriesCalcs()\n\n if not returns: returns_df = tsc.calculate_returns(returns_df)\n\n if not(hasattr(br, 'portfolio_vol_resample_type')):\n br.portfolio_vol_resample_type = 'mean'\n\n if not(hasattr(br, 'portfolio_vol_resample_freq')):\n br.portfolio_vol_resample_freq = None\n\n leverage_df = self.calculate_leverage_factor(returns_df,\n br.portfolio_vol_target, br.portfolio_vol_max_leverage,\n br.portfolio_vol_periods, br.portfolio_vol_obs_in_year,\n br.portfolio_vol_rebalance_freq, br.portfolio_vol_resample_freq,\n br.portfolio_vol_resample_type)\n\n vol_returns_df = tsc.calculate_signal_returns_with_tc_matrix(leverage_df, returns_df, tc = br.spot_tc_bp)\n vol_returns_df.columns = returns_df.columns\n\n return vol_returns_df, leverage_df", "def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)", "def forward_code_map(self):\n\n return { c.key:c.value for c in self.codes}", "def code_to_sequences( self, ucode ):\n\t\t\n\t\tassert isinstance( ucode, unicode ), 'ucode must be unicode string!' \n\t\t\n\t\tfor uchar in ucode:\n\t\t\tif not( uchar in self._char39 ):\n\t\t\t\traise Barcode39Error( '%s char is not listed in Barcode39 characters [0..9,A..Z,space,9,-,.,$,/,+,%]' )\n\n\t\tresult = []\n\t\tfor uchar in ucode:\n\t\t\tresult = result + self.char_to_seq(uchar) \n\t\t\t\n\t\treturn result", "def function_returns(self):\n shards = self.line.split()\n if len(shards) > 3:\n if self.line.endswith('returns') is False:\n if shards[-1] in 'return':\n return True", "def calculate_returns(data):\n pass" ]
[ "0.74746984", "0.6001439", "0.5503843", "0.52020866", "0.51311105", "0.5087448", "0.5053257", "0.50515985", "0.49911606", "0.48414174", "0.48189038", "0.46903574", "0.4685551", "0.4676945", "0.46544784", "0.46471697", "0.45722067", "0.45684677", "0.45653287", "0.44813254", "0.44799843", "0.44608784", "0.44549376", "0.4413035", "0.4407787", "0.4407223", "0.43919137", "0.43874228", "0.4381796", "0.4377668", "0.43767595", "0.43644267", "0.4357872", "0.4356118", "0.433288", "0.4329189", "0.43281737", "0.43280676", "0.43152428", "0.43149927", "0.43149927", "0.43043432", "0.4298322", "0.42942408", "0.4270004", "0.4267082", "0.42647782", "0.4263658", "0.42523116", "0.4249948", "0.42479232", "0.42383763", "0.42383763", "0.42383763", "0.42383763", "0.42352012", "0.42334577", "0.4229931", "0.42293677", "0.422616", "0.42231396", "0.422198", "0.42157137", "0.42152014", "0.42116147", "0.42116147", "0.42116147", "0.42116147", "0.42081773", "0.42081773", "0.42080143", "0.41903943", "0.41745806", "0.41745806", "0.41745806", "0.41745806", "0.41745806", "0.41745806", "0.41745806", "0.41701615", "0.4169983", "0.41688433", "0.4167964", "0.41232666", "0.41137055", "0.4097176", "0.40849957", "0.40797207", "0.40789074", "0.40723717", "0.40549156", "0.40497032", "0.40413746", "0.40388274", "0.4023087", "0.40225747", "0.4017412", "0.4015337", "0.4009368", "0.3987027" ]
0.6220025
1
Setter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _set_return_codes(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """return_codes must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""", }) self.__return_codes = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_return_codes(self):\n return self.__return_codes", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def set_retcodes(self,rc1,rc2) :\n\t\tif self.save_trace : \n\t\t\tself.rc1.append(rc1)\n\t\t\tself.rc2.append(rc2)\n\t\telse : \n\t\t\t# Save the return codes from the last iter\n\t\t\tself.rc1 = [rc1]\n\t\t\tself.rc2 = [rc2]", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def return_ids(self, return_ids):\n\n self._return_ids = return_ids", "def codes(self):\n return [card.code for card in self.cards]", "def returncode(self: \"ShellOutput\") -> Artefact[int]:\n self.__check_len()\n return self.returncodes[0]", "def returncodes(self):\n for p in self.processes:\n p.wait()\n codes = [p.poll() for p in self.processes]\n if set(codes) == set([0]):\n return []\n return codes", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def is_return_code_mode(self):\n return self.bisect_config.get('test_type') == 'return_code'", "def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")", "def get_observatory_codes_async(self, get_raw_response=False, cache=True):\n\n self.query_type = 'observatory_code'\n response = self._request('GET', self.OBSERVATORY_CODES_URL,\n timeout=self.TIMEOUT, cache=cache)\n\n return response", "def code_types(self):\n return self.codes.keys()", "def codelists():\n return CodelistSet()", "def set_ret(self, ret):\n \n self.ret = [i for i in ret]", "def setExecutionStatus(self, return_code):\n if return_code == 0:\n self.execution_status = 'executed'\n else:\n self.execution_status = 'failed'", "def geneSymbols(self, returnType=\"list\"):\n\t\treturn self._dataframe['GeneSymbol'].to_dict() if returnType==\"dict\" else self._dataframe['GeneSymbol'].tolist()", "def traffic_statuscodes(self, **kwargs):\n self.logger.debug(f\"Get status codes report data,)\")\n url_path = 'traffic/statuscodes'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def _add_status_code(runner, return_value):\n if isinstance(return_value, Mapping):\n status_code = return_value.get('statusCode')\n if status_code:\n runner.resource['metadata']['status_code'] = status_code", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def parse_files_to_codes_mapping( # noqa: C901\n value_: Sequence[str] | str,\n) -> list[tuple[str, list[str]]]:\n if not isinstance(value_, str):\n value = \"\\n\".join(value_)\n else:\n value = value_\n\n ret: list[tuple[str, list[str]]] = []\n if not value.strip():\n return ret\n\n class State:\n seen_sep = True\n seen_colon = False\n filenames: list[str] = []\n codes: list[str] = []\n\n def _reset() -> None:\n if State.codes:\n for filename in State.filenames:\n ret.append((filename, State.codes))\n State.seen_sep = True\n State.seen_colon = False\n State.filenames = []\n State.codes = []\n\n def _unexpected_token() -> exceptions.ExecutionError:\n return exceptions.ExecutionError(\n f\"Expected `per-file-ignores` to be a mapping from file exclude \"\n f\"patterns to ignore codes.\\n\\n\"\n f\"Configured `per-file-ignores` setting:\\n\\n\"\n f\"{textwrap.indent(value.strip(), ' ')}\"\n )\n\n for token in _tokenize_files_to_codes_mapping(value):\n # legal in any state: separator sets the sep bit\n if token.tp in {_COMMA, _WS}:\n State.seen_sep = True\n # looking for filenames\n elif not State.seen_colon:\n if token.tp == _COLON:\n State.seen_colon = True\n State.seen_sep = True\n elif State.seen_sep and token.tp == _FILE:\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n # looking for codes\n else:\n if token.tp == _EOF:\n _reset()\n elif State.seen_sep and token.tp == _CODE:\n State.codes.append(token.src)\n State.seen_sep = False\n elif State.seen_sep and token.tp == _FILE:\n _reset()\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n\n return ret", "def health_check_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"health_check_codes\")", "def get_code(self, obj):\n return [], []", "def pin_code(self) -> List[PinCodeSummary]:\n return self._pin_code", "def _set_success_codes(self, fname, success_codes):\n func = getattr(self._dll, fname)\n argtypes, func.argtuple_t, restype = self._fundecls[fname]\n argtypes = [argtype\n if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and\n argtype._type_.__module__ != \"ctypes\") # remove struct (nested) pointers\n else ctypes.c_voidp for argtype in argtypes]\n func.argtypes = argtypes\n try:\n success_code_type, = set(type(code) for code in success_codes)\n except ValueError:\n raise AssertionError(\"Success code of different types\")\n if success_code_type == restype:\n func.success_codes = success_codes\n func.errcheck = errcheck\n else:\n func.restype = restype\n setattr(self, fname, func)", "def ReturnCodeObject(self,code):\n\n if code in self.setOfUnusableCodes:\n return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]\n else:\n return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]", "def fallback_status_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"fallback_status_codes\")", "def _set_codes(self) -> None:\n with open(\"codes\", \"r\") as file:\n x = file.readline()\n self._codes = x.split()", "def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning", "def return_to_caller(self):\n value = bytearray()\n value.append(0xc3) # ret instruction\n return value", "def _get_module_return_code(self, status, module):\n\n # initialize return code array\n arr = []\n check_failed = False\n\n if module not in status.data:\n # assume running\n arr = [1]\n else:\n for job_name in status.data[module].keys():\n if job_name != 'pipeline_index':\n\n # update the job status and get the status string\n status._update_job_status(module, job_name)\n js = status.data[module][job_name]['job_status']\n\n if js == 'successful':\n arr.append(0)\n elif js == 'failed':\n arr.append(2)\n check_failed = True\n elif js is None:\n arr.append(3)\n else:\n arr.append(1)\n\n status._dump()\n\n return_code = self._parse_code_array(arr)\n\n status = self.RETURN_CODES[return_code]\n fail_str = ''\n if check_failed and status != 'failed':\n fail_str = ', but some jobs have failed'\n logger.info('Module \"{}\" for job \"{}\" is {}{}.'\n .format(module, self._config.name, status, fail_str))\n\n return return_code", "def _check_return(\n ret_type: None | IntType | ReferenceType, returned: Sequence[BitString]\n) -> None:\n if len(returned) > 1:\n raise ValueError(\"code block returns multiple values\")\n\n if ret_type is None:\n if len(returned) != 0:\n raise ValueError(\n 'function has no return type, but its code block defines \"ret\"'\n )\n elif isinstance(ret_type, ReferenceType):\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not define \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )\n else: # returns a value\n if len(returned) == 0:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'does not assign to \"ret\"'\n )\n (ret_bits,) = returned\n if ret_type.width != ret_bits.width:\n raise ValueError(\n f\"function has return type {ret_type}, but its code block \"\n f'defines \"ret\" with width {ret_bits.width}'\n )", "def _update_codes(self, codes: str) -> None:\n with open(\"codes\", 'w') as file:\n file.writelines(codes)\n self._codes = codes.split()", "def _set_returncode(self, code):\n if code >= self._return_code:\n self._return_code = code", "def get_lock_codes(device: Device) -> Sequence[str]:\n try:\n codes_str = cast(str, device.attributes[ATTR_LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []", "def return_code(self):\n return self._failures", "def fips_codes(self):\n return self._fips_codes", "def fips_codes(self):\n return self._fips_codes", "def forward_code_map(self):\n\n return { c.key:c.value for c in self.codes}", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def result_code(self, result_code):\n\n self._result_code = result_code", "def ReturnCode(rc):\r\n return _hiew.ReturnCode(rc)", "def code(self) -> pulumi.Input['CanaryCodeArgs']:\n return pulumi.get(self, \"code\")", "def decode_results(self, outputs):\n ...", "def exit_code(self) -> int:\n _args: list[Arg] = []\n _ctx = self._select(\"exitCode\", _args)\n return _ctx.execute_sync(int)", "def view_promo_codes():\n\n results = []\n promo_codes = Promo_code.query.filter_by().all()\n for codes in promo_codes:\n result = {\n 'id': codes.id,\n 'code': codes.code,\n 'event': codes.event,\n 'status': codes.status,\n 'price': codes.price\n }\n results.append(result)\n if datetime.utcnow() > codes.expiry_date:\n codes.status = 'expired'\n if len(results) > 0:\n return jsonify({'promo_codes': results,\n 'count': str(len(results)),\n 'status': 'pass',\n 'message': 'promo codes found'\n }), 200\n return jsonify({'count': '0','status': 'fail',\n 'message': 'no promo codes found'\n }), 404", "def coding_receptors(self):\n return self.parameters['coding_receptors']", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def get_kcca_devices_codes():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = CLARITY_API_BASE_URL + \"devices\"\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n device_codes = []\n\n for index, row in device_data.iterrows():\n device_codes.append(row['code'])\n\n return device_codes", "def default_trace_codes() -> Mapping[int, str]:\n with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd:\n return from_trace_codes_text(fd.read())", "def postal_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"postal_codes\")", "def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})", "def calculate_vol_adjusted_returns(self, returns_df, br, returns = True):\n\n tsc = TimeSeriesCalcs()\n\n if not returns: returns_df = tsc.calculate_returns(returns_df)\n\n if not(hasattr(br, 'portfolio_vol_resample_type')):\n br.portfolio_vol_resample_type = 'mean'\n\n if not(hasattr(br, 'portfolio_vol_resample_freq')):\n br.portfolio_vol_resample_freq = None\n\n leverage_df = self.calculate_leverage_factor(returns_df,\n br.portfolio_vol_target, br.portfolio_vol_max_leverage,\n br.portfolio_vol_periods, br.portfolio_vol_obs_in_year,\n br.portfolio_vol_rebalance_freq, br.portfolio_vol_resample_freq,\n br.portfolio_vol_resample_type)\n\n vol_returns_df = tsc.calculate_signal_returns_with_tc_matrix(leverage_df, returns_df, tc = br.spot_tc_bp)\n vol_returns_df.columns = returns_df.columns\n\n return vol_returns_df, leverage_df", "def encodings(self, code):\n return (self.encoding(code), )", "def _get_indexes(self):\n\n code_indexes = []\n for match in self.parser.ansi_regex.finditer(self._raw_string):\n code_indexes.extend(list(range(match.start(), match.end())))\n if not code_indexes:\n # Plain string, no ANSI codes.\n return code_indexes, list(range(0, len(self._raw_string)))\n # all indexes not occupied by ansi codes are normal characters\n char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]\n return code_indexes, char_indexes", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def load_codes(self, codes_file):\n with open(codes_file, \"r\") as cfile:\n codes = set([ line.strip().lower() for line in cfile ])\n if \"\" in codes: codes.remove(\"\")\n return(codes)", "def ancestor_codes(self):\n codes = [self.code]\n for parent_node in self.parents:\n codes += parent_node.ancestor_codes()\n return codes", "def from_trace_codes_file(path: str) -> Mapping[int, str]:\n with open(path, 'r') as fd:\n return from_trace_codes_text(fd.read())", "def get_retcode(self):\n return self._retcode", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def get_ret_code(self):\n\t\treturn call_sdk_function('PrlJob_GetRetCode', self.handle)", "def setResponseCode(code, message=None):", "def servo_codes(self):\r\n return self._arm.servo_codes", "def get_fire_centre_station_codes() -> List[int]:\n station_codes = []\n with get_read_session_scope() as session:\n station_query = get_all_stations(session)\n for station in station_query:\n if isinstance(station, dict):\n station_codes.append(int(station['station_code']))\n else:\n station_codes.append(int(station._mapping['station_code']))\n\n return station_codes", "def ircode(self, code):\n if code.lower() in self.codes:\n self._sendCommand('IRCODE ' + self.codes[code.lower()])\n else:\n print 'No such code: %s' % code", "def custom_block_response_status_code(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def list():\n\n return cache.codeTableList()", "def result_code(self):\n return self._result_code", "def getCommandcode(self):\n commandcode = \"RETURN ({})\".format(self.value) # commandcode\n return commandcode", "def get_opcodes(self, script, verify_minimal_data=False, pc=0):\n while pc < len(script):\n opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(\n script, pc, verify_minimal_data=verify_minimal_data)\n yield opcode, data, pc, new_pc\n pc = new_pc", "def set_exitcode(self, exitcode):\n self.exitcode = exitcode\n self.connected = False", "def returns(self, return_value):\n self._context.set_last_return(self, return_value)\n return self", "def exec_code(self,code,inputs=None,returns=None):\n #use the first worker to package up the cmd.\n package = self.workers[0].exec_code_pack(code,inputs,returns)\n return self._send_recv(package)", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def returnedbanks(self):\n returned = []\n for bank in self.__banks:\n if bank.returned():\n returned.append(bank)\n return returned", "def exit_program(cls, return_code):\n print(\"Exiting due to: %s\" % (ErrorMessages.verbose(return_code)))\n sys.exit(return_code)", "def code_to_sequences( self, ucode ):\n\t\t\n\t\tassert isinstance( ucode, unicode ), 'ucode must be unicode string!' \n\t\t\n\t\tfor uchar in ucode:\n\t\t\tif not( uchar in self._char39 ):\n\t\t\t\traise Barcode39Error( '%s char is not listed in Barcode39 characters [0..9,A..Z,space,9,-,.,$,/,+,%]' )\n\n\t\tresult = []\n\t\tfor uchar in ucode:\n\t\t\tresult = result + self.char_to_seq(uchar) \n\t\t\t\n\t\treturn result", "def results_csi_rs_indexes(self, results_csi_rs_indexes):\n if results_csi_rs_indexes is None:\n raise ValueError(\"Invalid value for `results_csi_rs_indexes`, must not be `None`\") # noqa: E501\n\n self._results_csi_rs_indexes = results_csi_rs_indexes", "def retcode(self):\n if not self.result:\n return None\n return self.result.retcode", "def barcodes(file_path=None):\n try:\n barcodes = decode(Image.open(file_path))\n barcodes_list = []\n if barcodes:\n # reformat into a list\n for barcode in barcodes:\n symbology_type = str(barcode.type)\n data = barcode.data.decode('UTF-8')\n barcodes_list.append({'type': symbology_type, 'data': data})\n else:\n UTILITIES_LOGGER.info('No barcodes found in file: ' + file_path)\n return None\n return barcodes_list\n except OSError as e:\n print('ERROR: unable to read file. errno: ' + str(e.errno) + ' filename: ' + str(e.filename) + ' strerror: ' + str(e.strerror))\n UTILITIES_LOGGER.exception('OSError')\n return None", "def code(self):\n return self._code", "def code(self):\n return self._code", "def code(self):\n return self._code", "def code(self):\n return self._code", "def returns(*types):\n\n def check_returns(f):\n \"\"\"Check the types.\"\"\"\n if not types:\n raise TypeError(\"A return type annotation must contain at least one type\")\n\n @functools.wraps(f)\n def new_f(*args, **kwds):\n \"\"\"A helper function.\"\"\"\n return_value = f(*args, **kwds)\n\n if len(types) == 1:\n # The function has a single return value.\n allowed_type = _replace_forward_references(types[0], f.__globals__)\n if not isinstance(return_value, allowed_type):\n raise Error(\"%r of type %r is not an instance of the allowed type %s \"\n \"for %r\"\n % (return_value, type(return_value),\n _type_repr(allowed_type), f))\n\n else:\n if len(return_value) != len(types):\n raise Error(\n \"Function %r has %d return values but only %d types were \"\n \"provided in the annotation.\" %\n (f, len(return_value), len(types)))\n\n for (r, t) in zip(return_value, types):\n allowed_type = _replace_forward_references(t, f.__globals__)\n if not isinstance(r, allowed_type):\n raise Error(\"%r of type %r is not an instance of allowed type %s \"\n \"for %r\" % (r, type(r), _type_repr(allowed_type), f))\n\n return return_value\n\n return new_f\n\n return check_returns", "def _list_output_regressor_code(self, model_code):\n regressor_code = [\n code for code in model_code.ravel() if (code != 0) and (str(code)[0] == \"1\")\n ]\n\n return np.asarray(regressor_code)", "def status_return_level(self, value):\n self._write(MX_STATUS_RETURN_LEVEL, value)", "def end_of_year_returns(model_roi, return_type, start, end):\n\n # converts index of datetimes to a list of strings\n dates = model_roi.index.astype('str').tolist()\n\n # list comprehension of a string of dates between the\n # start and end dates\n years = [str(x) for x in range(start, end + 1)]\n\n # generates an empty list of lists for each year\n end_year_dates = [[] for _ in range(len(years))]\n\n # iterates over every date in the roi series\n for date in dates:\n\n # iterates over every year in the years list\n for year in years:\n\n # iterates over every year in each date\n if year in date:\n\n # converts each date string to a datime object\n datetime_object = datetime.strptime(date, '%Y-%m-%d')\n\n # appends each date to its corresponding year in the years list\n (end_year_dates[years.index(year)]\n .append(datetime.strftime(datetime_object, '%Y-%m-%d')))\n\n # finds the last date in each year\n end_year_dates = [max(x) for x in end_year_dates]\n\n # gets the rounded end of year returns\n returns = [round(model_roi[date], 1) for date in end_year_dates]\n\n # shifts the returns list by 1 and appends 0 to the beginning of the list\n return_rates = [0] + returns[:len(returns)-1]\n \"\"\"Example: [a, b, c] -> [0, a, b]\"\"\"\n\n # converts returns list to an array\n returns_arr = np.array(returns)\n\n # converts the return_rates list to an array\n return_rates_arr = np.array(return_rates)\n\n # calculates the rounded rate of returns\n return_rates = [round(x, 1) for x in list(returns_arr - return_rates_arr)]\n \"\"\"Example: [a, b, c] - [0, a, b] = [a, b-a, c-b]\"\"\"\n\n # dictionary with the years as keys and returns as values\n returns = dict(zip(years, returns))\n\n # dictionary with the years as keys and return rates as values\n return_rates = dict(zip(years, return_rates))\n\n if return_type == 'returns':\n return returns\n\n if return_type == 'return_rates':\n return return_rates", "def get_code(self):\n return self.code", "def get_code(self):\n return self.code" ]
[ "0.6291366", "0.614847", "0.55107534", "0.54416496", "0.5277496", "0.5117377", "0.50821906", "0.50539535", "0.49968654", "0.4987946", "0.49695534", "0.47850552", "0.47627786", "0.4757852", "0.4730072", "0.4728713", "0.47217613", "0.46919942", "0.46385816", "0.4602432", "0.4597837", "0.4594224", "0.45785755", "0.45754677", "0.45607373", "0.45357856", "0.4532626", "0.45313764", "0.45036027", "0.44664925", "0.4453955", "0.4452978", "0.44485694", "0.44483638", "0.44387802", "0.44180992", "0.43781874", "0.43640792", "0.43579713", "0.43488616", "0.43488616", "0.4346978", "0.43327627", "0.43204117", "0.42955613", "0.42837703", "0.42759925", "0.42724973", "0.42690352", "0.42687607", "0.4265844", "0.4265844", "0.42639497", "0.4262137", "0.42572936", "0.42572543", "0.42515755", "0.42501417", "0.42372155", "0.42330274", "0.42330223", "0.42202684", "0.42069414", "0.4204624", "0.4196445", "0.4194637", "0.4192928", "0.41878933", "0.4165125", "0.41592884", "0.41590506", "0.4156686", "0.415489", "0.41512772", "0.41466063", "0.41341433", "0.41194564", "0.41183534", "0.41139126", "0.41139126", "0.41139126", "0.41139126", "0.41139126", "0.41139126", "0.41139126", "0.41129357", "0.41125897", "0.41122413", "0.40920806", "0.4090337", "0.4074577", "0.4074577", "0.4074577", "0.4074577", "0.40650597", "0.40585887", "0.40423164", "0.40273935", "0.40257785", "0.40257785" ]
0.8261662
0
Assumes binary array of 1 and 0 as input. Calculate longest ranges of 1's.
def count_ranges(a): ranges = [] count = 0 for i, v in enumerate(a): if v == 1: # same as previous value count += 1 else: if count > 1: ranges.append([i, count]) # [end, length] count = 0 return ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution(N):\n # write your code in Python 3.6\n bin_number = str(bin(N))[2:]\n new_bin_gap = False\n longest_bin_gap = 0\n bin_gap_counter = 0\n for char in bin_number:\n if char == '1':\n if bin_gap_counter > longest_bin_gap:\n longest_bin_gap = bin_gap_counter\n new_bin_gap = True\n bin_gap_counter = 0\n elif new_bin_gap:\n bin_gap_counter += 1\n return longest_bin_gap", "def max_ones_seq(self, array, m):\n n = len(array)\n i, j = 0, 0 # start, end of current consecutive 1s sequence\n x, y = 0, 0 # start, end of longest consecutive 1s sequence\n while j < n:\n if array[j]: # current element is 1\n if j - i > y - x: # update start, end of longest 1s sequence\n x, y = i, j\n j += 1 # move the right pointer\n elif not array[j] and m > 0: # current element is 0, we can flip it\n if j - i > y - x: # update start, end of longest 1s sequence\n x, y = i, j\n m -= 1 # deacrese number of allowed flips\n j += 1 # move the right pointer\n else: # current element is zero and we are out of flips\n if not array[i]: # start of current 1s sequence is 0\n m += 1 # increase available flips\n i += 1 # move the left pointer\n return list(range(x, y + 1))", "def find_all_maxima(arr):\n\n checks = np.r_[True, arr[1:] > arr[:-1]] & np.r_[arr[:-1] > arr[1:], True]\n maxima = np.where(checks)[0]\n return maxima", "def find_max_continous_sequence(array, start):\n pos = start\n while pos + 1 < len(array):\n if not array[pos] + 1 == array[pos + 1]:\n break\n pos += 1\n if pos + 1 == len(array):\n return array[start:]\n return array[start:pos + 1]", "def longestCommomSubsequence(self, arrays: List[List[int]]) -> List[int]:\n counts = Counter(val for arr in arrays for val in arr)\n res = []\n for val, count in counts.items():\n if count == len(arrays): res.append(val)\n return res", "def max_ones_length(self, array, m):\n n = len(array)\n i, j = 0, 0 # sliding window indices\n curr_ones = 0\n max_ones = 0\n while j < n:\n if array[j]: # current element is 1, increase 1s count\n curr_ones += 1\n j += 1\n max_ones = max(max_ones, curr_ones) # update max 1s count\n elif not array[j] and m > 0: # current element is 0, we can flip it\n curr_ones += 1\n m -= 1\n j += 1\n max_ones = max(max_ones, curr_ones) # update max 1s count\n else: # current element is zero and we are out of flips\n if not array[i]: # start of current 1s sequence is 0\n m += 1 # increase available flips\n i += 1 # move the left pointer\n curr_ones -= 1 # decrease current 1s count\n return max_ones", "def get_max_width(binary_mask):\n start_px = 0\n end_px = 0\n\n for i, row in enumerate(binary_mask):\n max = np.argmax(row)\n if max > 0:\n start_px = i\n break\n\n for i, row in enumerate(binary_mask[::-1]):\n max = np.argmax(row)\n if max > 0:\n end_px = i\n break\n\n return binary_mask.shape[0] - start_px - end_px", "def longest_seq_of_1s(n, index_to_ignore):\n max_ = 0\n counter = 0\n for i in range(SEQ_LENGTH):\n if i == index_to_ignore or get_bit(n, i):\n counter += 1\n max_ = max(counter, max_)\n else:\n counter = 0\n return max_", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def solution(n: int) -> int:\n binary_gap = 0\n count = 0\n # skip the lowest zeros\n while n and (n & 1) == 0:\n n = n >> 1\n while n:\n while n & 1:\n n = n >> 1\n while n and (n & 1) == 0:\n count += 1\n n = n >> 1\n if n & 1 and binary_gap < count:\n binary_gap = count\n count = 0\n return binary_gap", "def get_lengths_from_binary_sequence_mask(\n mask: torch.BoolTensor,\n) -> torch.LongTensor:\n return mask.sum(-1)", "def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value", "def run_length_coding(arr: np.ndarray, max_len=0xF) -> List[RunLength]:\n\n def _break_up_rle(code, max_len):\n l = code[\"zeros\"]\n div = l // max_len\n full = {\n \"zeros\": max_len - 1, # minus 1 because we get another for free from the value\n \"value\": 0\n }\n return ([full] * div) + [{\n \"zeros\": l - (div * max_len),\n \"value\": code[\"value\"]\n }]\n\n def reduction(agg, next):\n if \"value\" in agg[-1]:\n agg.append({\"zeros\": 0})\n\n if next == 0:\n agg[-1][\"zeros\"] += 1\n return agg\n\n if \"value\" not in agg[-1]:\n agg[-1][\"value\"] = next\n\n return agg\n utils.debug_msg(\"Going to determine RLE for %d size array\" % len(arr))\n rl = functools.reduce(reduction, arr, [{\"zeros\": 0}])\n utils.debug_msg(\"%d long RLE created\" % len(rl))\n # If the last element has no value then it was 0! That is a special tuple, (0,0)\n if \"value\" not in rl[-1]:\n rl[-1] = {\"zeros\": 0, \"value\": 0}\n\n # the goal of RLE in the case of compression is to contain the first symbol (length, size) within a byte\n # so if the length is too long, then we need to break it up\n if max_len is not None:\n utils.debug_msg(\"Breaking up RLE lengths that are larger than %d\" % max_len)\n rl = [_break_up_rle(code, max_len) for code in rl]\n rl = utils.flatten(rl)\n\n utils.debug_msg(\"Make RLE objects\")\n return [RunLength.from_dict(r) for r in rl]", "def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):\n return mask.long().sum(-1)", "def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens", "def build_bridge(blocks):\n bridges = []\n for start in [ b for b in blocks if 0 in b ]:\n tmp = blocks[:]\n tmp.remove(start)\n bridges.append(build(tmp, start[1], [start], sum(start)))\n return find_max()", "def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence", "def array_maximal_adjacent_difference( arr ):\n length = len(arr) - 1\n diffs = [ abs( arr[i] - arr[i+1] ) for i in range( length ) ]\n return max(diffs)", "def intervals(b, min_length=1, forgivingJump=True, removeSmallRel=True, removeSmallFact=0.1, mergeCloseRel=False, mergeCloseFact=0.2):\r\n b = np.asarray(b)\r\n total = np.sum(b)\r\n\r\n min_length=max(min_length,1)\r\n if forgivingJump:\r\n min_jump=min_length\r\n else:\r\n min_jump=1\r\n\r\n if total==0:\r\n IStart = np.array([])\r\n IEnd = np.array([])\r\n Lengths= np.array([])\r\n return IStart, IEnd, Lengths\r\n elif total==1:\r\n i = np.where(b)[0][0]\r\n IStart = np.array([i])\r\n IEnd = np.array([i])\r\n Lengths= np.array([1])\r\n else:\r\n n = len(b)\r\n Idx = np.arange(n)[b]\r\n delta_Idx=np.diff(Idx)\r\n jumps =np.where(delta_Idx>min_jump)[0]\r\n if len(jumps)==0:\r\n IStart = np.array([Idx[0]])\r\n IEnd = np.array([Idx[-1]])\r\n else:\r\n istart=Idx[0]\r\n jumps=np.concatenate(([-1],jumps,[len(Idx)-1]))\r\n IStart = Idx[jumps[:-1]+1] # intervals start right after a jump\r\n IEnd = Idx[jumps[1:]] # intervals stops at jump\r\n Lengths = IEnd-IStart+1\r\n\r\n # Removing intervals smaller than min_length\r\n bKeep = Lengths>=min_length\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n # Removing intervals smaller than less than a fraction of the max interval\r\n if removeSmallRel:\r\n bKeep = Lengths>=removeSmallFact*np.max(Lengths)\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n\r\n # Distances between intervals\r\n if mergeCloseRel:\r\n if len(IStart)<=2:\r\n pass\r\n else:\r\n D = IStart[1:]-IEnd[0:-1]\r\n #print('D',D,np.max(D),int(np.max(D) * mergeCloseFact))\r\n min_length = max(int(np.max(D) * mergeCloseFact), min_length)\r\n if min_length<=1:\r\n pass \r\n else:\r\n #print('Readjusting min_length to {} to accomodate for max interval spacing of {:.0f}'.format(min_length, np.mean(D)))\r\n return intervals(b, min_length=min_length, forgivingJump=True, removeSmallRel=removeSmallRel, removeSmallFact=removeSmallFact, mergeCloseRel=False)\r\n return IStart, IEnd, Lengths", "def get_intervals(l):\n intervals = len(l) * [0]\n # Initalize with 1\n intervals[0] = 1\n for k in range(1, len(l)):\n intervals[k] = (len(l[k]) + 1) * intervals[k - 1]\n\n return intervals", "def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])", "def highest_bin_freq(ary):\n num_true = 0\n num_false = 0\n\n for val in ary:\n num_true += 1 if val == '1' else 0\n num_false += 1 if val == '0' else 0\n\n return '1' if num_true > num_false else '0'", "def solution(N):\n\n # get binary representation of number\n binary_repr = f\"{N:b}\"\n\n # initialise counters\n current_gap, max_gap = 0, 0\n\n for b in binary_repr:\n # end of gap, update max\n if b == '1':\n max_gap = max(current_gap, max_gap)\n current_gap = 0\n # increase gap counter\n else:\n current_gap += 1\n\n return max_gap", "def longestIncreasingSubsequence(nums):\n if not nums:\n return 0\n \n dp = [None] * len(nums)\n dp[0] = 1\n maxans = 1\n \n for i in range(1, len(dp)):\n maxval = 0\n for j in range(0, i):\n if nums[i] > nums[j]:\n maxval = max(maxval, dp[j])\n \n dp[i] = maxval + 1\n maxans = max(maxans, dp[i])\n \n return maxans", "def int_to_max_bit(num, length):\n if num >= 2**length:\n return [None]\n if num == 1:\n return [str(num)]\n a = 2**(length-1)\n if num > a:\n return sorted([str(a)] + int_to_max_bit(num - a, length-1))\n elif num == a:\n return [str(a)]\n else:\n return int_to_max_bit(num, length-1)", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def largest_cc(mask):\n # We use asarray to be able to work with masked arrays.\n mask = np.asarray(mask)\n labels, label_nb = ndimage.label(mask)\n if not label_nb:\n raise ValueError('No non-zero values: no connected components')\n if label_nb == 1:\n return mask.astype(np.bool_)\n label_count = np.bincount(labels.ravel().astype(np.int_))\n # discard 0 the 0 label\n label_count[0] = 0\n return labels == label_count.argmax()", "def max_subarray(sequence=[-5, 20, -10, 30, 15]):\n\n sums = {}\n indices = []\n\n for i in range(len(sequence)):\n for j in range(i+1, len(sequence)):\n sub_seq = sequence[i:j+1]\n sub_seq_sum = sum(sub_seq)\n #print(sub_seq,'=>',sub_seq_sum)\n sums[sum(sub_seq)]=[i,j+1]\n\n i_indice = sums[max(sums)][0]\n j_indice = sums[max(sums)][1]\n return (max(sums), sequence[i_indice:j_indice])", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def longincseq(v):\n n=len(v)\n if n==0: return -1\n l = 0\n u = n-1\n max2here=1\n maxsofar=1\n for i in xrange(l+1, u+1):\n if v[i]>v[i-1]: \n max2here+=1\n else:\n max2here=1\n maxsofar = max(maxsofar, max2here)\n return maxsofar", "def find_longest(input):\r\n for thing in input:\r\n print thing\r\n dist_array = [[0 for x in range(rows)] for x in range(cols)] # rows and cols are static variables in main method\r\n for x in xrange(0, len(input), 1):\r\n for y in xrange(0, len(input[x]), 1):\r\n dist_array[x][y] = calculate_longest(dist_array, input, x, y)\r\n for item in dist_array:\r\n print item\r\n return max(max(dist_array))", "def get_mask_from_sequence_lengths(\n sequence_lengths: torch.Tensor, max_length: int\n) -> torch.BoolTensor:\n # (batch_size, max_length)\n ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)\n range_tensor = ones.cumsum(dim=1)\n return sequence_lengths.unsqueeze(1) >= range_tensor", "def findMaxConsecutiveOnes(nums: List[int]) -> int:\n count = maxCount = 0\n for num in nums:\n if num == 1:\n count += 1\n else:\n maxCount = max([count, maxCount])\n count = 0\n return max([count, maxCount])", "def get_seq_length(embeddings: np.ndarray) -> np.ndarray:\n nonzero_mask = np.any(np.logical_not(np.isclose(embeddings, 0.0)), axis=-1).astype(int) # [B, T], 0 when vector is all zeros, 1 otherwise\n seq_idx = np.expand_dims(np.arange(embeddings.shape[1]), axis=0) # [1, T]\n\n masked_idx = nonzero_mask * seq_idx # [B, T]\n return np.max(masked_idx, axis=-1) + 1 # [B]", "def c(ixs):\n return sum(range(1, sum((i > 0 for i in ixs)) + 1))", "def lps(mask):\n if not mask: return 0\n if not mask & (mask-1): return 1\n lo = int(log2(mask & ~(mask-1))) # least significant set bi\n hi = int(log2(mask)) # most significant set bit \n if s[lo] == s[hi]: return 2 + lps(mask^(1<<lo)^(1<<hi))\n return max(lps(mask^(1<<lo)), lps(mask^(1<<hi)))", "def lengthOfLIS(self, nums):\n n = len(nums)\n if n <= 1:\n return n\n\n max_len = 0\n\n dp = [0] * n\n for i, num in enumerate(nums):\n if i == 0:\n dp[0] = 1\n max_len = 1\n else:\n prev_max = 0\n for j in xrange(i):\n if nums[j] < num:\n prev_max = max(prev_max, dp[j])\n dp[i] = prev_max + 1\n max_len = max(max_len, dp[i])\n\n return max_len", "def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1", "def max_noncontiguous_subarray(array):\n if len(array) > 0:\n all_negative = True\n max_negative = None\n sum_items = 0\n for item in array:\n if item >= 0:\n all_negative = False\n sum_items += item\n else:\n if max_negative is None or item > max_negative:\n max_negative = item\n if all_negative:\n return max_negative\n else:\n return sum_items\n else:\n return 0", "def get_bin_lims(n, max_value):\n return np.linspace(max_value // n, max_value, n, dtype=int)", "def max_contiguous_subarray(array):\n if len(array) > 0:\n global_max = array[0]\n current_max = array[0]\n for item in array[1:]:\n current_max = max(current_max + item, item)\n global_max = max(global_max, current_max)\n return global_max\n else:\n return 0", "def seq2bbox(sequence: np.ndarray) -> np.ndarray:\n sequence = np.asarray(sequence, dtype=np.bool)\n selected_indices, = np.where(sequence == 1)\n\n bboxes_lr = []\n for k, g in groupby(enumerate(selected_indices), lambda x: x[0] - x[1]):\n segment = list(map(itemgetter(1), g))\n start_frame, end_frame = segment[0], segment[-1] + 1\n bboxes_lr.append([start_frame, end_frame])\n\n bboxes_lr = np.asarray(bboxes_lr, dtype=np.int32)\n return bboxes_lr", "def get_continous_time_periods(binary_array):\n binary_array = np.copy(binary_array).astype(\"int8\")\n n_times = len(binary_array)\n d_times = np.diff(binary_array)\n # show the +1 and -1 edges\n pos = np.where(d_times == 1)[0] + 1\n neg = np.where(d_times == -1)[0] + 1\n\n if (pos.size == 0) and (neg.size == 0):\n if len(np.nonzero(binary_array)[0]) > 0:\n return [(0, n_times-1)]\n else:\n return []\n elif pos.size == 0:\n # i.e., starts on an spike, then stops\n return [(0, neg[0])]\n elif neg.size == 0:\n # starts, then ends on a spike.\n return [(pos[0], n_times-1)]\n else:\n if pos[0] > neg[0]:\n # we start with a spike\n pos = np.insert(pos, 0, 0)\n if neg[-1] < pos[-1]:\n # we end with aspike\n neg = np.append(neg, n_times - 1)\n # NOTE: by this time, length(pos)==length(neg), necessarily\n h = np.matrix([pos, neg])\n # print(f\"len(h[1][0]) {len(h[1][0])} h[1][0] {h[1][0]} h.size {h.size}\")\n if np.any(h):\n result = []\n for i in np.arange(h.shape[1]):\n if h[1, i] == n_times-1:\n result.append((h[0, i], h[1, i]))\n else:\n result.append((h[0, i], h[1, i]-1))\n return result\n return []", "def solution(n, array):\n\n counters = [0] * n\n\n # Current greatest value calculated so far\n max_count = 0\n\n for i in range(len(array)):\n if array[i] == n + 1:\n # max_count = max(counters)\n counters = [max_count] * n\n else:\n counters[array[i] - 1] += 1\n\n # To avoid calculating max(), we update the max value at each step\n if counters[array[i] - 1] > max_count:\n max_count = counters[array[i] - 1]\n\n return counters", "def maxTurbulenceSize(self, arr: List[int]) -> int:\n if len(arr) == 1:\n return 1\n ret = 1\n tmp_ret = 0\n last_flag = None\n for i in range(1, len(arr)):\n if arr[i] == arr[i - 1]:\n current_flag = None\n else:\n current_flag = arr[i] > arr[i - 1]\n\n if current_flag is None:\n ret = max(ret, tmp_ret)\n tmp_ret = 1\n elif last_flag is None or last_flag == current_flag:\n ret = max(ret, tmp_ret)\n tmp_ret = 2\n else:\n tmp_ret += 1\n\n last_flag = current_flag\n return max(ret, tmp_ret)", "def max_arg_one_hot(arr: np.ndarray) -> np.ndarray:\n # using argmin because inverted indices in binary_to_one_hot\n return binary_to_one_hot(np.argmin(arr, axis=1))", "def to_bins(arr):\n result = np.zeros(len(arr)+1)\n result[1:-1] = 0.5 * (arr[1:] + arr[:-1])\n result[0] = arr[0] - 0.5*(arr[1] - arr[0])\n result[-1] = arr[-1] + 0.5*(arr[-1] - arr[-2])\n return result", "def longest_increasing_sub_seq(A):\n\n # boundary cases\n\n # The lenght the of the given list\n arr_len = len(A)\n\n if arr_len <= 1:\n return arr_len\n\n # Create an auxiliary array that will hold the \"end elements\"\n # of the intermeditae LIS' that we will be creating\n\n aux_array = [0 for _ in range(arr_len + 1)]\n\n # Initialize aux_array[0] = A[0]\n aux_array[0] = A[0]\n\n # l acts as our pointer, always points to an empty slot\n l = 1\n\n # Now iterate through the array\n for i in range(1, arr_len):\n if A[i] < aux_array[0]:\n # This is the new smallest value\n # Replace aux_array[0] = A[i]\n\n # i.e we are starting over again, creating a new active list of lenght 1\n # Case 1\n aux_array[0] = A[i]\n\n elif A[i] > aux_array[l - 1]:\n # Case 2: A[i] is largets among all active lists\n aux_array[l] = A[i]\n l += 1\n\n else:\n # Case 3\n # A[i] is in between\n # A[i] wants to be current end candidate of an existing subsequence\n index = get_ceil_index(-1, l - 1, A[i], aux_array)\n aux_array[index] = A[i]\n\n\n return l", "def reg_start_len(reg_classification, only_open=False):\n vv = np.convolve(reg_classification, [1, -1])\n #vv = np.append(-1, vv) # start always with closed\n boundaries = np.append(0, np.where(vv)) # add boundary at beginning\n lengths = np.diff(np.append(boundaries, len(vv) + 1)) # add the last\n if only_open:\n open_pos_sel = np.append(False, vv[vv != 0] == 1)\n lengths = lengths[open_pos_sel]\n return np.array([boundaries[open_pos_sel], lengths]).T\n else:\n return np.array([boundaries, lengths]).T", "def degreeOfArray(nums):\n\n # Generate frequency map\n freq = defaultdict(int)\n for num in nums:\n freq[num] += 1\n\n degree = max(freq.values())\n freq2 = defaultdict(int)\n minLen = math.inf\n\n start = end = 0\n maxCount = 0\n\n # sliding window\n while end < len(nums):\n freq2[nums[end]] += 1\n if freq2[nums[end]] == degree:\n maxCount += 1\n\n while maxCount > 0:\n if freq2[nums[start]] == degree:\n maxCount -= 1\n\n freq2[nums[start]] -= 1\n minLen = min(minLen, end - start + 1)\n start += 1\n\n end += 1\n\n return minLen", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def find_maxima(x):\n\n idx = []\n for i in range(len(x)):\n # `i` is a local maximum if the signal decreases before and after it\n if x[i-1] < x[i] and x[i+1] < x[i]:\n idx.append(i)\n return idx", "def find_lis(seq):\n\n # https://rosettacode.org/wiki/Longest_increasing_subsequence#Python:_O.28nlogn.29_Method_from_Wikipedia.27s_LIS_Article.5B1.5D\n\n l = len(seq)\n previous = [0] * l\n minimum = [0] * (l + 1)\n length = 0\n for i in range(l):\n low = 1\n high = length\n while low <= high:\n mid = (low + high) // 2\n if seq[minimum[mid]] < seq[i]:\n low = mid + 1\n else:\n high = mid - 1\n\n new = low\n previous[i] = minimum[new - 1]\n minimum[new] = i\n\n if new > length:\n length = new\n\n s = []\n k = minimum[length]\n for i in range(length - 1, -1, -1):\n s.append(seq[k])\n k = previous[k]\n return s[::-1]", "def get_longest_alternating_signs(lst2: list[int]) -> list[int]:\r\n lst1 = []\r\n x = 0\r\n count = 0\r\n maxi = 0\r\n for i in range(0, len(lst2) - 1):\r\n if alternating_signs_of_2(lst2[i], lst2[i + 1]):\r\n count += 1\r\n if count > maxi:\r\n maxi = count\r\n x = i + 1\r\n else:\r\n count = 0\r\n\r\n for i in range(x - maxi, x + 1):\r\n lst1.append(lst2[i])\r\n return lst1", "def find_extrema(s):\n max_env = np.logical_and(\n np.r_[True, s[1:] > s[:-1]],\n np.r_[s[:-1] > s[1:], True])\n min_env = np.logical_and(\n np.r_[True, s[1:] < s[:-1]],\n np.r_[s[:-1] < s[1:], True])\n max_env[0] = max_env[-1] = False\n\n #exclude endpoints\n mini = [m for m in min_env.nonzero()[0] if m != 0 and m != len(s)-1]\n maxi = [m for m in max_env.nonzero()[0] if m != 0 and m != len(s)-1]\n\n return mini,maxi", "def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data", "def maxpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])", "def largest_cc(segmentation):\n labels = skimage.measure.label(segmentation)\n unique, counts = np.unique(labels, return_counts=True)\n list_seg = list(zip(unique, counts))[1:] # the 0 label is by default background so take the rest\n try:\n largest = max(list_seg, key=lambda x: x[1])[0]\n except:\n pass\n labels_max = (labels == largest).astype(int)\n return labels_max", "def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer", "def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)", "def arange_sequence(ranges: Tensor) -> Tensor:\n maxcnt = torch.max(ranges).item()\n numuni = ranges.shape[0]\n complete_ranges = torch.arange(maxcnt, device=ranges.device).unsqueeze(0).expand(numuni, -1)\n\n return complete_ranges[complete_ranges < ranges.unsqueeze(-1)]", "def allSubsets(self):\n n = self.graph.n\n subsets = np.zeros((2**n,n))\n for i in range(2**n):\n binary = np.array(list(bin(i)[2:])).astype(float)\n if binary.shape[0] < n:\n padding = np.zeros(n-binary.shape[0])\n subsets[i,:] = np.append(padding, binary)\n else:\n subsets[i,:] = binary\n return np.where(subsets > 0, 1, -1)", "def check_argmax(array):\n # Check which movements are the best, return it as a list where 1 = max of the list.\n res = [1 if i == max(array) else 0 for i in array]\n return list(compress([\"V\", \"H\", \"D\", \"X\"], res))", "def longest_seq(n):\n max_seq = 0\n for i in range(SEQ_LENGTH):\n max_seq = max(max_seq, longest_seq_of_1s(n, i))\n\n return max_seq", "def dyadic_length(x):\n n = x.shape[0]\n return jnp.ceil(jnp.log2(n)).astype(int)", "def arraybin(array, bins):\n bin_it = lambda value: (i for i in range(len(array)) if array[i] >= value)\n splits = [next(bin_it(value), len(array)) for value in bins]\n return [list(range(start_idx, stop_idx)) for (start_idx, stop_idx)\n in zip([0] + splits, splits + [len(array)])]", "def longest_run(L):\r\n # save the current longest length for increasing run\r\n length_inc = []\r\n # save the current longest length for decreasing run\r\n length_dec = []\r\n # set the initial length to 1\r\n length_inc.append(1)\r\n length_dec.append(1)\r\n # save the result\r\n result_sum = 0\r\n # save the longest length\r\n longest_length = 0\r\n\r\n for i in range(1, len(L)):\r\n # assume the current longest length to 1\r\n length_inc.append(1)\r\n length_dec.append(1)\r\n # for increasing\r\n if L[i] >= L[i - 1]:\r\n length_inc[i] = length_inc[i - 1] + 1\r\n if length_inc[i] > longest_length:\r\n # update result\r\n longest_length = length_inc[i]\r\n result_sum = sum(L[i - longest_length + 1: i + 1])\r\n # for decreasing\r\n if L[i] <= L[i - 1]:\r\n length_dec[i] = length_dec[i - 1] + 1\r\n if length_dec[i] > longest_length:\r\n # update result\r\n longest_length = length_dec[i]\r\n result_sum = sum(L[i - longest_length + 1: i + 1])\r\n return result_sum", "def lengthOfLongestSubstring(s):\n arr = [1] * len(s)\n i = 0\n j = 1\n while j < len(s):\n if s[j] not in s[i:j]:\n arr[i] += 1\n j = j + 1\n else:\n i = i + 1\n j = i + 1\n return max(arr)", "def dynamic_programming(D):\n # Runtime: O(n^2)\n n = len(D)\n if n == 0:\n return 0\n longest = []\n for i in range(0, n):\n max_append = []\n for j in range(0, i):\n if D[i] >= D[j] and len(longest[j]) > len(max_append):\n max_append = longest[j]\n longest.append(max_append + [D[i]])\n\n return max(map(lambda s: len(s), longest))", "def label_to_range(label):\r\n C = int(label.max())\r\n arange = np.zeros((C+1,), dtype=np.int)\r\n cumsum = 0\r\n for i in xrange(C):\r\n cumsum += np.where(label == (i+1))[0].size\r\n arange[i+1] = cumsum\r\n return arange", "def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])", "def get_lengths_from_binary_sequence_mask(self, mask: torch.Tensor):\n return mask.long().sum(-1)", "def bitonic_sort(l_arr):\n #assert is_power_of_2(len(l_arr))\n arr = l_arr[:] \n n = 0\n while 2**n < len(arr):\n temp_limit = n\n while(temp_limit >= 0):\n i = 0\n switch_compare = True\n switch_tracker = 0\n jump_tracker = 0\n gap = 2 ** temp_limit\n while i < len(arr) - gap:\n if (switch_compare):\n arr[i], arr[i+gap] = min(arr[i], arr[i+gap]), max(arr[i], arr[i+gap])\n else:\n arr[i+gap], arr[i] = min(arr[i], arr[i+gap]), max(arr[i], arr[i+gap])\n jump_tracker += 1\n switch_tracker += 1\n if(jump_tracker == gap):\n jump_tracker = 0\n i += gap\n if (switch_tracker == 2**n):\n switch_compare = not switch_compare\n i += 1\n temp_limit -= 1\n n += 1\n\n return arr", "def find_longest_ranges(range, howmany):\n range.sort(key=lambda x: x[1]) # sort by length\n if howmany > 1:\n range = range[-howmany:] # get last few\n range.sort(key=lambda x: x[0]) # sorted by starttime\n return range\n else:\n return range[-1]", "def solution(n: int) -> int:\n sizearr = n + 1\n\n # create zero-filled multi_arr\n multi_arr = [[0 for x in range(sizearr)] for n in range(sizearr)]\n\n # base value is always skipped after being padded\n multi_arr[0][0] = 1\n for last in range(1, sizearr):\n for next in range(0, sizearr):\n multi_arr[last][next] = multi_arr[last - 1][next]\n if next >= last:\n multi_arr[last][next] += multi_arr[last - 1][next - last]\n\n return multi_arr[n][n] - 1", "def get_max_indices(self, input):\n \n min_element = torch.min(torch.abs(input.contiguous().view(-1)))\n input_temp = input + min_element + self.epsilon\n masked_input_temp = input_temp * self.mask\n values, indices = torch.sort(masked_input_temp, dim=1, descending=True)\n\n return indices[:, :self.num_active_nodes,:]", "def lengthOfLIS(self, nums: List[int]) -> int:\n# time complexity: O(n^2), space complexity: O(n)\n# this is inspired by the solution provided by the question.\n# dp\n# the idea is to use a list longest to record say i-th element in nums, if as the last of the longest possible subsquence, how long the subsquence would be.\n \n\n# time complexity: O(nlogn), space complexity: O(n)\n# dp with binary search\n# the key idea is to use a list to store the longest possible sequence, but the element in the list is not necessarily correct. Every element say record_long[i] in the list means the end of longest subsequence of length i+1\n# this is inspired by @bolinq in the discussion area.\n import bisect\n record_long = []\n for num in nums:\n index = bisect.bisect_left(record_long, num)\n if index == len(record_long):\n record_long.append(num)\n else:\n record_long[index] = num\n \n return len(record_long)", "def max_change(arr):\n return np.max(arr) - np.min(arr)", "def __calculate_input_ranges(diagonal_num: int, input_seq_len: int, output_seq_len: int):\n min_len = min(input_seq_len, output_seq_len)\n max_len = max(input_seq_len, output_seq_len)\n assert 0 <= diagonal_num < min_len + max_len\n\n if diagonal_num < min_len:\n max_range = (0, diagonal_num + 1)\n min_range = max_range\n elif diagonal_num < max_len:\n max_range = (diagonal_num - (min_len - 1), diagonal_num + 1)\n min_range = (0, min_len)\n else:\n max_range = (diagonal_num - (min_len - 1), max_len)\n min_range = (diagonal_num - (max_len - 1), min_len)\n\n assert (max_range[1] - max_range[0]) == (min_range[1] - min_range[0])\n assert max_len >= max_range[1] > max_range[0] >= 0\n assert min_len >= min_range[1] > min_range[0] >= 0\n\n # determine which one is for the input and which one for the output\n if min_len == input_seq_len: # the input (vertical) is shorter or of equal length to the output\n input_range = min_range\n output_range = max_range\n else: # the output (horizontal) is shorter or of equal length to the input\n input_range = max_range\n output_range = min_range\n\n return input_range, output_range", "def dyadic_length_int(x):\n n = x.shape[0]\n return math.ceil(math.log2(n))", "def _get_minimal_lanes(self):\n return np.argwhere(self.end_of_lanes == np.min(self.end_of_lanes)).flatten()", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def binarize_labels(labels):\n labels = np.where(labels == 0, labels, 1)\n\n return labels", "def task4(x: List[List[int]]) -> int:\n ###\n ###\n row = len(x)\n max_circle = 0\n i = 0\n while i < row:\n for j in range(i, row):\n if x[i][j] != 1:\n if i == j - 1:\n max_circle += 1\n j += 1\n i = j - 1\n break\n if j == row - 1 and x[i-1][j] == 0:\n max_circle += 1\n if j == row - 1:\n i += 1\n\n return max_circle\n #a b c d", "def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr", "def _lcs(pred_tokens: Sequence[str], target_tokens: Sequence[str], return_full_table: bool=False) ->Union[int, Sequence[Sequence[int]]]:\n lcs = [([0] * (len(pred_tokens) + 1)) for _ in range(len(target_tokens) + 1)]\n for i in range(1, len(target_tokens) + 1):\n for j in range(1, len(pred_tokens) + 1):\n if target_tokens[i - 1] == pred_tokens[j - 1]:\n lcs[i][j] = lcs[i - 1][j - 1] + 1\n else:\n lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])\n if return_full_table:\n return lcs\n return lcs[-1][-1]", "def find_longest_plateau(seq):\n\n start_longest_so_far = 0\n length_longest_so_far = 0\n i = 0\n\n # INVARIANT\n # The longest plateau in seq[0:i] starts at position\n # start_longest_so_far and has a length of\n # length_longest_so_far\n # VARIANT: len(seq) - i\n #\n while len(seq) - i > length_longest_so_far:\n\n length_current_plateau = length_plateau_at(seq, i)\n\n if length_current_plateau > length_longest_so_far:\n start_longest_so_far = i\n length_longest_so_far = length_current_plateau\n\n i += length_current_plateau\n\n return start_longest_so_far", "def get_mode(numlist):\n count = np.bincount(numlist)\n return np.argmax(count)", "def find_max_val_unimodal_arr(unimodal_arr):\n arr = unimodal_arr\n maxfound = False\n if (len(arr) == 0):\n print('empty list')\n return -1\n\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n\n if (len(arr) == 1):\n print('maximum value = ' + str(arr[center]))\n return arr[center]\n\n if (len(arr) == 2):\n print('maximum value = ' + str(arr[left] if arr[left] > arr[right] else arr[right]))\n return arr[left] if arr[left] > arr[right] else arr[right]\n\n while (not maxfound):\n if (arr[left] > arr[center]):\n arr = arr[:center]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if (arr[right] > arr[center]):\n arr = arr[center:]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if ((arr[right] <= arr[center]) and (arr[left] <= arr[center])):\n maxfound = True\n\n print('maximum value = ' + str(arr[center]))\n return arr[center]", "def unique_binning(t):\n diff= np.unique(t)\n diff= diff[1:] - diff[:-1]\n diff = np.min(diff)/2\n return np.digitize(t, np.hstack([np.unique(t) + diff]))", "def binarize_branches(bs):\n if len(bs) > 2:\n first, rest = bs[0], bs[1:]\n return [right_binarize(first), right_binarize(rest)]\n else:\n return [right_binarize(b) for b in bs]", "def brute_force(L):\n\n max_diff = -float(\"inf\")\n length = len(L)\n for i in range(length - 1):\n start = L[i]\n for j in range(i + 1, length):\n end = L[j]\n diff = end - start\n max_diff = max(max_diff, diff)\n return max_diff", "def recursive(a: tuple, i: int, j: int):\n if i == j: # if right bound == left bound then sequence is one element\n return 1, (a[i], )\n\n _pre_count, _pre_seq = recursive(a, i-1, j) # check if a[i] is continuation of previous max sequence\n if a[i] >= _pre_seq[-1]:\n return _pre_count + 1, _pre_seq + (a[i], )\n else:\n max_count = 1\n max_seq = (a[i],)\n for k in range(j, i): # if it's false - check all sequences between i and j\n tmp_count, tmp_seq = recursive(a, i-1, k) # from k to i-1\n if tmp_count+1 > max_count and a[i] >= tmp_seq[-1]: # find maximum\n max_count = tmp_count + 1\n max_seq = tmp_seq + (a[i], )\n\n for k in range(i):\n tmp_count, tmp_seq = recursive(a, k, 0) # and between 0 and i\n if tmp_count+1 > max_count and a[i] >= tmp_seq[-1]: # from 0 to k\n max_count = tmp_count + 1\n max_seq = tmp_seq + (a[i], )\n\n return (max_count, max_seq) if max_count > _pre_count else (_pre_count, _pre_seq)", "def longestPalindromeSubseq(self, s: str) -> int:\n n = len(s)\n dp = [[1] * n for _ in range(n)]\n for length in range(1, n + 1):\n for i in range(n - length + 1):\n j = i + length - 1\n print(i, j)\n if length == 1:\n dp[i][j] = 1\n elif s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2\n else:\n dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])\n return dp[0][n - 1]", "def lenLongestFibSubseq(self, arr: list[int]) -> int:\n dp = {}\n memo = set(arr)\n N = len(arr)\n for j in range(N):\n for i in range(j):\n a, b = arr[i], arr[j]\n if b - a < a and b - a in memo:\n dp[(a, b)] = dp.get((b - a, a), 2) + 1\n\n return max(dp.values() or [0])", "def ranges(int_list):\n begin = 0\n end = 0\n\n ranges = []\n\n for i in int_list:\n # At the start of iteration set the value of\n # `begin` and `end` to equal the first element\n if begin == 0:\n begin = i\n end = i\n # Set the current element as the value of `end`\n # as long as the array is in sequence\n elif i-1 == end:\n end = i\n # Reset flags to current element when iterating through\n # multiple integers that are of broken sequence\n elif begin == end:\n begin = i\n end = i\n else:\n # Sequence of array has been broken, append current range\n # to `ranges` and set the value of `begin and `end` flags to\n # equal the current element\n ranges.append(\"{0}->{1}\".format(begin, end))\n begin = i\n end = i\n # Grab the last range from the array\n if begin != end:\n ranges.append(\"{0}->{1}\".format(begin, end))\n\n return ranges", "def lengthOfLIS(self, nums: List[int]) -> int:\n n = len(nums)\n F = [0] * n\n \n F[0] = 1\n for i in range(1, n):\n sub_lengths = [0]\n for j in range(0, i):\n if nums[j] < nums[i]:\n sub_lengths.append(F[j])\n F[i] = max(sub_lengths) + 1\n \n return max(F)", "def min_max_range(s):\n # note np.argmax, np.argmin returns the position of first occurence of global max, min\n sign = np.sign(np.argmax(s) - np.argmin(s))\n if sign == 0:\n return 0.0\n else:\n return sign*(np.max(s) - np.min(s))", "def find_longest_line(lines, img_shape):\n _img = np.zeros(img_shape)\n _img = draw_lines(_img, lines)\n vertical_kernel = np.ones((100, 5))\n _img = dilation(_img, vertical_kernel)\n labeled, num_segments = label(_img, return_num=True)\n\n lengths = []\n for i in range(1, num_segments + 1):\n y, x = np.where(labeled == i)\n lengths.append((abs(max(y) - min(y)), max(y), min(y), int(np.mean(x))))\n\n lengths = sorted(lengths, reverse=True, key=lambda x: x[0])\n return lengths[0] if len(lengths) else None", "def high_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n max_val = values[0]\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n count += 1\n arr[i] = count\n return arr" ]
[ "0.6214653", "0.61803985", "0.61673665", "0.6140894", "0.60812706", "0.60509235", "0.60332197", "0.6032968", "0.59139353", "0.5893834", "0.5799357", "0.5777712", "0.57581043", "0.57554233", "0.57196367", "0.56999797", "0.5694365", "0.5683879", "0.56704044", "0.56537473", "0.5638462", "0.5632882", "0.5608549", "0.559905", "0.55871063", "0.55709624", "0.55658615", "0.55631924", "0.5548492", "0.55396307", "0.5499407", "0.5493024", "0.54881114", "0.54788935", "0.5477424", "0.5470147", "0.5462698", "0.54605174", "0.5454699", "0.54432553", "0.5435182", "0.54199725", "0.5395857", "0.53947276", "0.5393842", "0.5383086", "0.53791994", "0.5377439", "0.53572327", "0.5349417", "0.5346848", "0.5337621", "0.53350604", "0.5334493", "0.53341603", "0.53305584", "0.5327524", "0.531544", "0.53073853", "0.53044724", "0.5283841", "0.5278611", "0.52768594", "0.52726763", "0.52725136", "0.5271499", "0.527136", "0.52688664", "0.5268458", "0.52547354", "0.5250829", "0.5250672", "0.52281386", "0.52241933", "0.5221245", "0.5211465", "0.52080256", "0.5191676", "0.5181015", "0.51775163", "0.51720923", "0.5170334", "0.5169583", "0.5169108", "0.5165933", "0.51504827", "0.5133831", "0.51282203", "0.5127793", "0.51156586", "0.51142895", "0.51105046", "0.51098096", "0.51093", "0.5099028", "0.50959843", "0.5094785", "0.50939727", "0.5091514", "0.50908464" ]
0.6188714
1
from range of count_ranges, return the 'howmany' longest ranges
def find_longest_ranges(range, howmany): range.sort(key=lambda x: x[1]) # sort by length if howmany > 1: range = range[-howmany:] # get last few range.sort(key=lambda x: x[0]) # sorted by starttime return range else: return range[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap_len(range1, range2):\n return min(range1[1], range2[1]) - max(range1[0], range2[0])", "def count_ranges(a):\n ranges = []\n count = 0\n for i, v in enumerate(a):\n if v == 1: # same as previous value\n count += 1\n else:\n if count > 1:\n ranges.append([i, count]) # [end, length]\n count = 0\n return ranges", "def find_best_point(self, start_i, end_i, ranges):\n maxLenIdx = 0\n maxLen = 0\n for i in range(ranges):\n if ranges[i] > maxLen:\n maxLen = ranges[i]\n maxLenIdx = i\n return maxLenIdx", "def find_max_gap(self, free_space_ranges):\n # mask the bubble\n masked = np.ma.masked_where(free_space_ranges==0, free_space_ranges)\n # get a slice for each contigous sequence of non-bubble data\n slices = np.ma.notmasked_contiguous(masked)\n max_len = slices[0].stop - slices[0].start\n chosen_slice = slices[0]\n # I think we will only ever have a maximum of 2 slices but will handle an\n # indefinitely sized list for portablility\n for sl in slices[1:]:\n sl_len = sl.stop - sl.start\n if sl_len > max_len:\n max_len = sl_len\n chosen_slice = sl\n return chosen_slice.start, chosen_slice.stop", "def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value", "def get_lengths(auswahl):\n # list() for python3 compat.\n return list(map(max, list(zip(*[map(len, one) for one in auswahl]))))", "def find_max_gap(self, free_space_ranges):\n start = end = 200\n curr_start = 200\n #print(free_space_ranges)\n for i in range(201, 880):\n if free_space_ranges[i] != 0:\n if free_space_ranges[i-1] == 0:\n curr_start = i\n else:\n if (i-curr_start) > end-start:\n start = curr_start\n end = i\n return start, end", "def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval", "def findMaxLength(self, nums):\n dict1 = dict()\n count = 0\n maxlen = 0\n for i in range(len(nums)):\n if nums[i] == 1:\n count = count + 1\n else:\n count = count - 1\n\n if count == 0:\n maxlen = max(maxlen, i + 1)\n if count not in dict1:\n dict1[count] = i\n else:\n maxlen = max(maxlen, i - (dict1.get(count)))\n return maxlen", "def count_to_len(X:np.array, Max:int=10, Min:int=2):\n return np.interp(X, (X.min(), X.max()), (Max, Min))", "def get_sequence_lengths( widths ): \n seq_len = (widths - 2) / 8\n return seq_len", "def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max", "def stab_the_num(intervals):\r\n n = len(intervals)\r\n points = []\r\n\r\n left_points = []\r\n right_points = []\r\n for i, j in intervals:\r\n left_points.append(i)\r\n right_points.append(j)\r\n\r\n count = 0\r\n points.append(right_points[0])\r\n for i in range(1, n):\r\n if left_points[i] > points[count]:\r\n count += 1\r\n points.append(right_points[i])\r\n\r\n return points", "def total_range_size(self) -> int:\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()", "def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def lenRange(start, stop, step=1):\n return (stop - start + step - 1 + 2 * (step < 0)) // step", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def max_total_length(murals):\n if not murals:\n return 0\n\n no_overlap = []\n for mural in murals:\n if mural[1] <= murals[0][0] or mural[0] >= murals[0][1]:\n no_overlap.append(mural)\n\n value = murals[0][1] - murals[0][0]\n del murals[0]\n return max(value + max_total_length(no_overlap), max_total_length(murals))", "def time_interval(intervals: List[Tuple[int, int]]) -> int:\n\n start = []\n finish = []\n for elems in intervals:\n start.append(elems[0])\n finish.append(elems[1])\n\n # Sorting the start and end times separately\n start.sort()\n finish.sort()\n\n index1, index2 = 0, 0\n current_rooms = 0\n max_rooms = 0\n # The logic below is, we add each room when current finish time is greater than current start time\n # This informs us whether how many times are collapsing with each other at any time\n while (index1 < len(start)) and (index2 < len(finish)):\n if start[index1] < finish[index2]:\n current_rooms += 1\n index1 += 1\n else:\n index2 += 1\n max_rooms = max(max_rooms, current_rooms)\n current_rooms -= 1\n max_rooms = max(max_rooms, current_rooms)\n return max_rooms", "def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])", "def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])", "def howmany_within_range(row, minimum, maximum):\n count = 0\n for n in row:\n if minimum <= n <= maximum:\n count = count + 1\n return count", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens", "def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))", "def range_overlap(ranges):\n max_left = 0.0\n min_right = 1.0\n for (left, right) in ranges:\n max_left = max(max_left, left)\n min_right = min(min_right, right)\n return (max_left, min_right)", "def length_range_for_entropy(entropy):\n min_length = 3\n max_length = min_length + int(entropy / 2)\n return min_length, max_length", "def longest(self):\n cps = collections.Counter()\n for crd in self:\n cps += collections.Counter( {crd.suit} )\n return sorted(cps.items(), reverse=True, key=lambda x:x[1])", "def maximumGap(self, nums: List[int]) -> int:\r\n n = len(nums)\r\n if n < 2: return 0 \r\n l, r = min(nums), max(nums)\r\n if r - l == 0: return 0 \r\n gap_instance = max(1, (r - l) // n)\r\n gapcnts = math.ceil((r - l + 1) / gap_instance)\r\n buckets = [[-1, -1] for _ in range(gapcnts)] \r\n calpos = lambda num: (num - l) // gap_instance\r\n\r\n for num in nums:\r\n pos = calpos(num)\r\n if num < buckets[pos][0] or buckets[pos][0] == -1:\r\n buckets[pos][0] = num \r\n if num > buckets[pos][1] or buckets[pos][1] == -1:\r\n buckets[pos][1] = num \r\n\r\n ans, pre = 0, l\r\n for small, large in buckets:\r\n if small == -1:\r\n continue \r\n else:\r\n ans = max(small - pre, ans)\r\n pre = large\r\n return ans", "def count_values_in_range(data: np.ndarray, ranges: np.ndarray) -> np.ndarray:\n\n results = np.full((data.shape[0], ranges.shape[0]), 0)\n for i in prange(data.shape[0]):\n for j in prange(ranges.shape[0]):\n lower_bound, upper_bound = ranges[j][0], ranges[j][1]\n results[i][j] = data[i][\n np.logical_and(data[i] >= lower_bound, data[i] <= upper_bound)\n ].shape[0]\n return results", "def lengthOfLIS(self, nums):\n n = len(nums)\n if n <= 1:\n return n\n\n max_len = 0\n\n dp = [0] * n\n for i, num in enumerate(nums):\n if i == 0:\n dp[0] = 1\n max_len = 1\n else:\n prev_max = 0\n for j in xrange(i):\n if nums[j] < num:\n prev_max = max(prev_max, dp[j])\n dp[i] = prev_max + 1\n max_len = max(max_len, dp[i])\n\n return max_len", "def find_optimal_segmentation_length(self,blength,seg_range):\n\t\tseg_range = self.generate_seg_range(seg_range)\n\t\teasiestfit = seg_range[0]\n\t\tremainder = np.max(seg_range)\n\t\tfor segr in seg_range:\n\t\t\tremains = blength - math.floor(blength/segr)*segr\n\t\t\tif remains<remainder:\n\t\t\t\tremainder=remains\n\t\t\t\teasiestfit=segr\n\t\t\n\t\tseg_lengths = [easiestfit for i in range(math.floor(blength/easiestfit))]\n\t\treturn([segment+remainder/len(seg_lengths) for segment in seg_lengths])", "def longest_break(tracker_data):\n max_gap = 0\n gap_start = None\n gap_end = None\n previous = None\n for entry in tracker_data:\n entry_day = str_to_date(entry[1])\n if previous:\n gap = (entry_day - previous).days\n if gap > max_gap:\n max_gap = gap\n gap_start = previous\n gap_end = entry_day\n previous = entry_day\n return max_gap, gap_start, gap_end", "def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)", "def count_lorentz(fit_range, lorentz_array_2d):\n counter = 0\n for i in range(0, lorentz_array_2d.shape[0]):\n f0 = lorentz_array_2d[i][1]\n if f0 > fit_range[1] and f0 < fit_range[2]:\n counter += 1\n return counter", "def longest(self, n):\n return big_tags", "def longest_word_length(words):", "def longest_sequence_seed(ubound):\n max_seq_seed = 1\n max_seq_len = 1\n for seed in range(1, ubound):\n seq_len = sum(1 for t in collatz_sequence(seed))\n if seq_len > max_seq_len:\n max_seq_len = seq_len\n max_seq_seed = seed\n return max_seq_seed, max_seq_len", "def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def maxWidthRamp(self, nums: list[int]) -> int:\n maxWidth = 0\n descStack = []\n\n # Generate decreasing stack.\n for i, num in enumerate(nums):\n if not descStack or nums[descStack[-1]] > num:\n descStack.append(i)\n\n # Check elements from right to left.\n for j in reversed(range(len(nums))):\n while descStack and nums[descStack[-1]] <= nums[j]:\n maxWidth = max(maxWidth, j - descStack.pop())\n\n return maxWidth", "def merge_ranges():", "def longestCommomSubsequence(self, arrays: List[List[int]]) -> List[int]:\n counts = Counter(val for arr in arrays for val in arr)\n res = []\n for val, count in counts.items():\n if count == len(arrays): res.append(val)\n return res", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def find_range(reduced_dist_word_dim, range_limits):\n n_limits = len(range_limits)\n for limit in range(n_limits - 1):\n if (reduced_dist_word_dim > range_limits[limit]) and (reduced_dist_word_dim < range_limits[limit + 1]):\n return limit\n raise ValueError", "def interval_cardinality(self):\n return len(list(self.lower_contained_intervals()))", "def interval_MAX_SMT(intervals):\n lower_indices = np.argsort(intervals[:, 0])\n lower_sorted = intervals[lower_indices, 0]\n\n upper_indices = np.argsort(intervals[:, 1])\n upper_sorted = intervals[upper_indices, 1]\n\n best_lower, best_upper = 0, 0\n upper_i = 0\n best_met = -1\n n_met = 0\n for lower_i, lower in enumerate(lower_sorted):\n # First, we update upper -- everything in this loop is an interval\n # we were meeting before but not anymore.\n while upper_sorted[upper_i] < lower:\n n_met -= 1\n upper_i += 1\n # We now meet the interval that this lower is from.\n n_met += 1\n if n_met > best_met:\n best_lower, best_upper = lower, upper_sorted[upper_i]\n best_met = n_met\n elif (len(lower_sorted) - lower_i) < (best_met - n_met):\n # Each iteration adds *at most* 1 to n_met. For us to even have\n # a chance of updating best_met, then, we will have to do at\n # least (best_met - n_met) more iterations.\n break\n return best_lower, best_upper, best_met", "def calculate_number_of_guesses(self, range):\r\n # Python 2.7.3: math.ceil() is a float\r\n # CodeSculptor: math.ceil() is an integer\r\n return int(math.ceil(math.log(range,2)))", "def number_of_distances(number_of_sequences):\n return math.factorial(number_of_sequences)/(math.factorial(2)*math.factorial(number_of_sequences-2))", "def convertRanges2ExpandedRanges( self, ranges, max_length ):\n \n new_ranges = []\n \n for r in ranges:\n new_ranges.append( ( max(int(r[0] * self.resolution), 0),\n min(int(r[1] * self.resolution), max_length)) )\n return new_ranges", "def functorRangeSize(functor):\n return len(functorRange(functor))", "def get_desired_count(value, lower, upper):\n if lower != -1 and value < lower:\n return lower\n if upper != -1 and value > upper:\n return upper\n return value", "def twoMaxs(lnp):\n\tindex1 = 0\n\tindex2 = 0\n\tcnt = 0\n\tmaxArea = 0\n\tmaxArea2 = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(ew * eh >= maxArea):\n\t\t\tindex1 = cnt\n\t\t\tmaxArea = ew * eh\n\t\tcnt += 1\n\t\n\n\tcnt = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(index1 == cnt):\n\t\t\tcnt += 1\n\t\t\tcontinue\n\t\tif(ew * eh >= maxArea2):\n\t\t\tindex2 = cnt\n\t\t\tmaxArea2 = ew * eh\n\t\tcnt +=1\n\t\n\treturn (index1, index2)", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def get_nine_ranges(six_indexes, length_int_list):\n result = []\n for i, six_index in enumerate(six_indexes): # [(0,3), (1,7)]\n tup = ()\n if (i+1) < len(six_indexes): # we have not reached the end of six_indexes yet\n tup = (six_index, (six_index+1, six_indexes[i+1]-1)) # (3, (4,6))\n else: # if we reached the end of six_indexes list\n tup = (six_index, (six_index + 1, length_int_list-1)) # (7,(8,11))\n result.append(tup)\n return result # [(3,(4,6)), (7,(8,11))]", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def longest_streak(tracker_data):\n max_streak = 0\n streak = 0\n streak_start = None\n streak_end = None\n current_start = None\n last_day = None\n for entry in tracker_data:\n entry_day = str_to_date(entry[1])\n next_day = (entry_day - timedelta(days=1))\n if last_day == next_day:\n streak += 1\n else:\n streak = 1\n current_start = entry_day\n if streak > max_streak:\n max_streak = streak\n streak_start = current_start\n streak_end = entry_day\n last_day = entry_day\n return max_streak, streak_start, streak_end", "def find_max_interval_praat(sound, interval_list):\n\n max_intensity = None\n max_intensity_index = None\n\n max_length = None\n max_length_index = None\n\n # Finding interval with highest intensity and the longest interval.\n\n for index, (begin_sec, end_sec, _) in enumerate(interval_list):\n\n intensity = sound.get_interval_intensity(begin_sec, end_sec)\n length = end_sec - begin_sec\n\n if max_intensity == None or intensity > max_intensity:\n max_intensity = intensity\n max_intensity_index = index\n\n if max_length == None or length > max_length:\n max_length = length\n max_length_index = index\n\n return (max_intensity_index, max_intensity, max_length_index, max_length)", "def lengthOfLIS(self, nums: List[int]) -> int:\n n = len(nums)\n F = [0] * n\n \n F[0] = 1\n for i in range(1, n):\n sub_lengths = [0]\n for j in range(0, i):\n if nums[j] < nums[i]:\n sub_lengths.append(F[j])\n F[i] = max(sub_lengths) + 1\n \n return max(F)", "def f_get_range_length(self):\n if not self.f_has_range():\n raise TypeError(\"Not applicable, parameter does not have a range\")\n elif hasattr(self, \"__len__\"):\n return len(self)\n else:\n raise NotImplementedError(\"Should have implemented this.\")", "def find_readcount_on_islands(island_start_list, island_end_list, tag):\n\t\n\tindex = bisect.bisect_right(island_start_list, tag);\n\tif index - bisect.bisect_left(island_end_list, tag) == 1:\n\t\treturn index-1;\n\telse:\n\t\treturn -1;", "def _lcs_len(a, b):\n dp = _lcs_dp(a, b)\n return dp[-1][-1]", "def intervalLen(self):\n return self.end-self.start+1", "def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence", "def find_histogram_range(histogram):\n size = len(histogram)\n min_i = 0\n while min_i < size:\n if histogram[min_i] > 0:\n break\n min_i += 1\n\n max_i = size - 1\n while max_i >= 0:\n if histogram[max_i] > 0:\n break\n max_i -= 1\n return min_i, max_i", "def get_longest_all_primes(lst):\n return get_longest_subsequence_with_property(lst, is_list_of_primes)", "def most_words_and_longest(self, n):\n return big_tags", "def find_longest(input):\r\n for thing in input:\r\n print thing\r\n dist_array = [[0 for x in range(rows)] for x in range(cols)] # rows and cols are static variables in main method\r\n for x in xrange(0, len(input), 1):\r\n for y in xrange(0, len(input[x]), 1):\r\n dist_array[x][y] = calculate_longest(dist_array, input, x, y)\r\n for item in dist_array:\r\n print item\r\n return max(max(dist_array))", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def get_uniq_len_count(lengths, all_lengths):\n all_lengths = np.sort(all_lengths) # sort array of all possible lengths\n bins = np.append(all_lengths, all_lengths[-1] + 1)\n return np.histogram(lengths, bins)[0]", "def lenLongestFibSubseq(self, arr: list[int]) -> int:\n dp = {}\n memo = set(arr)\n N = len(arr)\n for j in range(N):\n for i in range(j):\n a, b = arr[i], arr[j]\n if b - a < a and b - a in memo:\n dp[(a, b)] = dp.get((b - a, a), 2) + 1\n\n return max(dp.values() or [0])", "def howmany_sequences(listOfTuples):\r\n #initialize number of pairs as 0\r\n pairs = 0\r\n #count pairs\r\n for n in listOfTuples:\r\n pairs += 1\r\n k = 1\r\n #find number of initial sequences \r\n while k*(k-1) != pairs*2:\r\n k += 1\r\n return(k)", "def dynamic_programming(D):\n # Runtime: O(n^2)\n n = len(D)\n if n == 0:\n return 0\n longest = []\n for i in range(0, n):\n max_append = []\n for j in range(0, i):\n if D[i] >= D[j] and len(longest[j]) > len(max_append):\n max_append = longest[j]\n longest.append(max_append + [D[i]])\n\n return max(map(lambda s: len(s), longest))", "def find_max_with_count(A):\n\n def frmax(lo, hi):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1] incl. count\"\"\"\n if lo == hi: return (0, A[lo])\n\n mid = (lo+hi)//2\n ctleft,left = frmax(lo, mid)\n ctright,right = frmax(mid+1, hi)\n return (1+ctleft+ctright, max(left, right))\n\n return frmax(0, len(A)-1)", "def count_suboptimal_atom_positions(self, lowerBound, upperBound):\n counter = 0\n for i in range(self.conformer.GetNumAtoms()):\n center = self.conformer.GetAtomPosition(i)\n point = [center.x, center.y, center.z]\n surroundingLow = self.kd_tree.query_ball_point(point, lowerBound)\n surroundingHigh = self.kd_tree.query_ball_point(point, upperBound)\n\n if len(surroundingHigh) - len(surroundingLow) > 0:\n counter += 1\n\n return counter / 2", "def count_passwords_part2(interval: tuple,\n n_digits: int = 6\n ) -> int:\n lower = interval[0]\n upper = interval[1]\n\n # Check limits are 6 digit numbers\n if len(str(lower)) > n_digits or len(str(upper)) < n_digits:\n print('No {} digits numbers in that interval!'.format(n_digits))\n return 0\n\n if len(str(lower)) < n_digits:\n lower = int('1'*n_digits)\n print('Lower limit less than {n} digits! Using {l}'.format(\n n=n_digits, l=lower))\n\n if len(str(upper)) > 6:\n upper = int('9'*n_digits)\n print('Upper limit bigger than {n} digits! Using {u}'.format(\n n=n_digits, u=upper))\n\n # Check all numbers in the interval\n n = 0\n for num in range(lower, upper+1):\n\n # Split digits into a list\n num_str = str(num)\n num_list = [int(num_str[i]) for i in range(len(num_str))]\n\n # Get difference of consecutive numbers\n num_diff = [j-i for i, j in zip(num_list[:-1], num_list[1:])]\n\n # No decreasing numbers\n if all([i >= 0 for i in num_diff]):\n\n # Mark jumps as 1's\n jumps = [(1 if i > 0 else 0) for i in num_diff]\n\n if (jumps[0] == 0 and jumps[1] == 1):\n # 2 consecutive digits at the beginning\n n = n+1\n elif (jumps[-2] == 1 and jumps[-1] == 0):\n # 2 consecutive digits at the end\n n = n+1\n elif '101' in ''.join(map(str, jumps)):\n # 2 consecutive digits in the middle\n n = n+1\n\n return n", "def __get_max_indexes(num_list, number):\n result = []\n\n num_list = np.array(num_list)\n result = num_list.argsort()[-number:][::-1]\n\n return result", "def get_max_divisible_subset_length(self, nums):\n if not nums:\n return 0\n \n max_lengths = [1]\n max_length = 1\n\n for i in range(1, len(nums)):\n max_length_here = 1\n for j in range(i - 1, -1, -1):\n if nums[i] % nums[j] == 0:\n max_length_here = max(max_length_here, 1 + max_lengths[j])\n max_lengths.append(max_length_here)\n max_length = max(max_length, max_length_here)\n \n return max_length", "def NumberOfMappingIPV4Ranges(self):\r\n\t\treturn self._get_attribute('numberOfMappingIPV4Ranges')", "def maxs(self):\n return self.intervals[:, 1]", "def largest_island(grid: list[list[int]]) -> int:\n rows = len(grid)\n cols = len(grid[0])\n\n visited = [[False for _ in range(cols)] for _ in range(rows)]\n max_island_size = 0\n for i in range(rows):\n for j in range(cols):\n if grid[i][j] == 1 and not visited[i][j]:\n island_size = flood_island(grid, i, j, visited)\n max_island_size = max(max_island_size, island_size)\n\n return max_island_size", "def get_longest_all_primes(lst: list[int]):\n subsecventa_max1 = []\n for i in range(len(lst)):\n for j in range(len(lst)):\n if toate_elementele_prime(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventa_max1):\n subsecventa_max1 = lst[i:j + 1]\n return subsecventa_max1", "def expand_ranges(ranges):\n for low, high in low_high_pairs:\n for j in range(low, high+1):\n yield j", "def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data", "def length_aln_on_sequence(start, end):\n return end - start + 1", "def largest_cc_size(ugraph):\r\n\ttotal_list = cc_visited(ugraph)\r\n\tmax_length_list = []\r\n\tfor each_list in total_list:\r\n\t\tif len(max_length_list) < len(each_list):\r\n\t\t\tmax_length_list = each_list\r\n\treturn len(max_length_list)", "def get_max_lb(self):\n max_lb = 0\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"lower_bound\"] > max_lb:\n max_lb = self.arc_info[arc][\"lower_bound\"]\n return max_lb", "def find_max_min(number):\n if max(number) == min(number):\n return [len(number)]\n return [min(number), max(number)]", "def arange_sequence(ranges: Tensor) -> Tensor:\n maxcnt = torch.max(ranges).item()\n numuni = ranges.shape[0]\n complete_ranges = torch.arange(maxcnt, device=ranges.device).unsqueeze(0).expand(numuni, -1)\n\n return complete_ranges[complete_ranges < ranges.unsqueeze(-1)]", "def longest_ORF_noncoding(dna, num_trials):\n longest=[]\n for i in range(0,num_trials):\n \tshuffled_str=shuffle_string(dna)\n \tlongest.append(longest_ORF(shuffled_str))\n long_ORF=max(longest,key=len)\n return len(long_ORF)", "def counting_sort(numbers):\n # TODO: Find range of given numbers (minimum and maximum integer values)\n min_num = min(numbers)\n max_num = max(numbers) + 1\n\n # TODO: Create list of counts with a slot for each number in input range\n # create list with a length of the value of max_num\n count_list = [0] * max_num\n\n # TODO: Loop over given numbers and increment each number's count\n # make the counted list by incrementing each instance of a number count\n for i in numbers:\n count_list[i] += 1\n\n # TODO: Loop over counts and append that many numbers into output list\n comp = 0\n for i in range(0, max_num):\n # make a temporary copy of the count_list index value\n temp = count_list[i]\n # set current loop instance index value to the comp value\n count_list[i] = comp\n # set comp value to temp value\n comp += temp\n\n # create result list with len of original list with no value\n result = [0] * len(numbers)\n for i in numbers:\n # for each number in list, set the corrisponding\n # result list index to the value of the instance\n result[count_list[i]] = i\n # move to the next index in the count_list for next instance comparison\n count_list[i] += 1\n # print(result)\n return result\n # FIXME: Improve this to mutate input instead of creating new output list", "def n_doubled(intervals):\n i = 0\n for interval in intervals.intervals:\n if not Interval.is_degenerated(interval):\n i += 1\n return i", "def get_max_width(binary_mask):\n start_px = 0\n end_px = 0\n\n for i, row in enumerate(binary_mask):\n max = np.argmax(row)\n if max > 0:\n start_px = i\n break\n\n for i, row in enumerate(binary_mask[::-1]):\n max = np.argmax(row)\n if max > 0:\n end_px = i\n break\n\n return binary_mask.shape[0] - start_px - end_px", "def lengthOfLIS(self, nums):\n def _binsearch(lst, target):\n lo, hi = 0, len(lst)\n while lo < hi:\n mid = (lo+hi) // 2\n \n if lst[mid] < target:\n lo = mid+1\n else:\n hi = mid\n return lo\n\n tails = []\n\n for num in nums:\n if not tails or num > tails[-1]:\n tails.append(num)\n else:\n idx = _binsearch(tails, num)\n tails[idx] = num\n return len(tails)", "def find_longest_plateau(seq):\n\n start_longest_so_far = 0\n length_longest_so_far = 0\n i = 0\n\n # INVARIANT\n # The longest plateau in seq[0:i] starts at position\n # start_longest_so_far and has a length of\n # length_longest_so_far\n # VARIANT: len(seq) - i\n #\n while len(seq) - i > length_longest_so_far:\n\n length_current_plateau = length_plateau_at(seq, i)\n\n if length_current_plateau > length_longest_so_far:\n start_longest_so_far = i\n length_longest_so_far = length_current_plateau\n\n i += length_current_plateau\n\n return start_longest_so_far", "def of_a_kind_size(dice_list):\n return max([dice_list.count(value) for value in range(1,7)])", "def summarize_ranges(addrlist):\n ranges = []\n start = None\n prev_range_class = None\n for addr in addrlist:\n if start is None:\n start = addr.ip\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if addr.range_class == prev_range_class:\n if int(addr.ip) == int(end) + 1:\n end = addr.ip\n prev_range_class = addr.range_class\n continue\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n start = end = addr.ip\n prev_range_class = addr.range_class\n if start is not None:\n if start == end:\n ranges.append(\"{} ({})\".format(start, prev_range_class))\n else:\n ranges.append(\"{}-{} ({})\".format(start, end, prev_range_class))\n\n return ranges", "def gen_tuples_for_loops(range_len: int, limit: int) -> list:\n ranges = [(n * range_len, (n + 1) * range_len) for n in range(limit // range_len)]\n if limit % range_len > 0:\n ranges.append((range_len * (limit // range_len), limit))\n return ranges", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges" ]
[ "0.6553023", "0.63709086", "0.6276035", "0.618169", "0.61786216", "0.60901237", "0.60474753", "0.6026283", "0.6022194", "0.6012892", "0.5978638", "0.58781105", "0.58559895", "0.58521396", "0.5841774", "0.58397764", "0.58287156", "0.58229816", "0.5816759", "0.5807792", "0.57680637", "0.5757407", "0.5746607", "0.5719498", "0.5714347", "0.5701421", "0.5691887", "0.56878656", "0.5683026", "0.5661917", "0.5654659", "0.5651884", "0.56471884", "0.5640906", "0.56283605", "0.562385", "0.5611057", "0.5601001", "0.5575311", "0.5568789", "0.55677915", "0.5545258", "0.553681", "0.55290157", "0.55283105", "0.5525609", "0.55206", "0.5516088", "0.55065596", "0.5474359", "0.5473615", "0.54626256", "0.5460674", "0.5449247", "0.54247767", "0.54152626", "0.5414203", "0.54088914", "0.5404929", "0.5398913", "0.538891", "0.53849286", "0.53770304", "0.5376748", "0.537472", "0.5365818", "0.53614897", "0.53585887", "0.5346862", "0.5346562", "0.53419214", "0.53415334", "0.53363633", "0.53363466", "0.53331697", "0.5330806", "0.5330422", "0.5326875", "0.5324203", "0.53237647", "0.5321334", "0.53193635", "0.5316307", "0.53157187", "0.5308331", "0.5300641", "0.53003347", "0.5299587", "0.529899", "0.52913135", "0.52903414", "0.52895045", "0.52887386", "0.5287822", "0.52846783", "0.5282174", "0.5281058", "0.5280341", "0.524618", "0.5245027" ]
0.74052256
0
Calc all kinds of properties for a line. The line should be an list of arrays.
def calc_output(line, react_cap=None, gen_res_high=225, gen_res_low=50): # unpack t, v, i = line t_diff = t[1] - t[0] # assert t_diff == 1e-9 # time scale should be 1ns. # values based on current measurment. Assuming voltage waveform is aligned. # validation on the real maxima/minima of current assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!' v_min = min(v) v_max = max(v) v_max_time = np.where(v == v_max)[0][0] # first value where voltage has maximum # v_min_time = np.where(v == v_min)[0][-1] # last value where voltage has minimum # assert v_max_time < v_min_time, 'Voltage valley before peak, signal inverted!' c_peak_time = i[0:v_max_time].argmax() # current peak is before voltage maximum c_max = i[c_peak_time] c_valley_time = i.argmin() c_min = min(i) assert i[c_valley_time] == c_min # some validation assert c_peak_time < c_valley_time, 'Current valley before peak, signal is inverted!' assert MAX_VOLTAGE_MIN <= v_max < MAX_VOLTAGE_MAX, 'Max voltage error (%r)' % v_max assert MAX_CURRENT_MIN <= c_max < MAX_CURRENT_MAX, 'Max current error (%r)' % c_max # Find the settling time of the current. Than use the time where the current is stable # to calculate the final pulse voltage. This pulse final voltage is then used to calculate # the settling time and risetime of the voltage. # all parts of current inside 10% of maximum, till end of pulse i_time_settling_options = [abs(x) < 0.1 * c_max for x in i[0:c_valley_time]] ranges = count_ranges(i_time_settling_options) range_before, range_pulse = find_longest_ranges(ranges, 2) # [end, length] end_pulse = range_pulse[0] i_time_settling = range_pulse[0] - range_pulse[1] # average of voltage during pulse when current is < 5% of max current v_pulse = np.mean(v[i_time_settling:end_pulse]) # all parts of current inside 10% of maximum, till end of pulse v_time_settling_options = [abs(x - v_pulse) < (0.1 * v_pulse) for x in v] ranges = count_ranges(v_time_settling_options) if ranges == []: # if too much oscillations, a range cannot be found. Increase the bounds: # all parts of current inside 10% of maximum, till end of pulse v_time_settling_options = [abs(x - v_pulse) < (0.3 * v_pulse) for x in v] ranges = count_ranges(v_time_settling_options) print('Warning, voltage settling options increased from 10% to 30%!') assert ranges != [], "Error! Line is too unstable." pulse = find_longest_ranges(ranges, 1) # pulse=[end,length] of voltage pulse stable settling_end = pulse[0] - pulse[1] # voltage pulse stable start # recalculate pulse voltage v_pulse_new = np.mean(v[settling_end:pulse[0]]) if v_pulse > 13e3: # pulses for highest voltages have to be stable. Lower voltages are always less stable. assert abs(v_pulse-v_pulse_new)/v_pulse_new < 0.01, 'Pulse voltage unstable.' t_settling_end = t[settling_end] # voltage pulse stable start time v05 = 0.05 * v_pulse settling_start = np.where(v > v05)[0][0] t_settling_start = t[settling_start] # when v first rises above 0.05 of final t_settling = t_settling_end - t_settling_start v10 = 0.1 * v_pulse v90 = 0.9 * v_pulse t_rise_start = t[np.where(v > v10)[0][0]] t_rise_end = t[np.where(v > v90)[0][0]] t_rise = t_rise_end - t_rise_start rise_rate = (v90 - v10) / (t_rise) v_overshoot = v_max / v_pulse pulse_stable = int((settling_end + end_pulse) / 2) # point where the pulse is very stable # energy p = (v * i) # for this to be correct, make sure lines are aligned in b_correct_lines using offset 'v_div' e = integrate.cumtrapz(p, t, initial=0) p_rise = p[settling_start:pulse_stable] e_rise = e[settling_start:pulse_stable][-1] p_res = np.append(i[0:pulse_stable] ** 2 * gen_res_high, i[pulse_stable:] ** 2 * gen_res_low) # 1/2*C*V^2 is energy stored in capacitor, which is lost after discharging pulse. # e_cap = 1 / 2 * react_cap * v_pulse ** 2 e_res = integrate.cumtrapz(p_res, t, initial=0) e_res_total = e_res[-1] e_plasma = e[-1] # energy to plasma is energy in positive pulse except charge on capacitor. # Correct the time axis to have 0 at the start of the pulse start = t[settling_start] t = t - start # all these values are added to the pickle and xlsx with 'output_' prepend in calc_run.py data = { 't': t, 'v': v, 'c': i, 'c_min': c_min, 'c_max': c_max, 'v_min': v_min, 'v_max': v_max, 'v_pulse': v_pulse, 't_settling': t_settling, 't_rise': t_rise, 'rise_rate': rise_rate, 'v_overshoot': v_overshoot, 'p': p, 'e': e, 'p_rise': p_rise, 'e_rise': e_rise, 'p_res': p_res, 'e_res': e_res, 'e_res_total': e_res_total, # 'e_cap': e_cap, 'e_plasma': e_plasma, 'start': start, 'end': t[end_pulse], # 'start_index': settling_start, # 'end_index': end_pulse, # 'test': i_time_settling } return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_properties_sp(lines):\n\n # TODO Better logging for crashed xtb\n if not read_status(lines):\n return None\n\n keywords = [\n \"final structure:\",\n \":: SUMMARY ::\",\n \"Property Printout \",\n \"ITERATIONS\",\n ]\n\n stoppattern = \"CYCLE \"\n idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)\n idxs[0]\n idx_summary = idxs[1]\n idx_end_summary = idxs[2]\n idxs[3]\n\n if idx_summary is None:\n # TODO Better fix\n assert False, \"uncaught xtb exception\"\n\n # Get atom count\n keyword = \"number of atoms\"\n idx = linesio.get_index(lines, keyword)\n line = lines[idx]\n n_atoms = line.split()[-1]\n n_atoms = int(n_atoms)\n\n # Get energies\n idx_summary = idxs[1] + 1\n\n # :: total energy +1\n # :: total w/o Gsasa/hb +2\n # :: gradient norm +3\n # :: HOMO-LUMO gap +4\n # ::.....................+4\n # :: SCC energy +5\n # :: -> isotropic ES +6\n # :: -> anisotropic ES +7\n # :: -> anisotropic XC +8\n # :: -> dispersion +9\n # :: -> Gsolv +10\n # :: -> Gborn +11\n # :: -> Gsasa +12\n # :: -> Ghb +13\n # :: -> Gshift +14\n # :: repulsion energy +15\n # :: add. restraining +16\n\n prop_lines = lines[idx_summary : idx_end_summary - 2]\n prop_dict = parse_sum_table(prop_lines)\n\n # total_energy = prop_dict.get(\"total_energy\", float(\"nan\"))\n # gsolv = prop_dict.get(\"gsolv\", float(\"nan\"))\n # electronic_energy = prop_dict.get(\"scc_energy\", float(\"nan\"))\n\n properties = prop_dict\n\n # Get dipole\n dipole_str = \"molecular dipole:\"\n idx = linesio.get_rev_index(lines, dipole_str)\n if idx is None:\n dipole_tot = None\n else:\n idx += 3\n line = lines[idx]\n line = line.split()\n dipole_tot = line[-1]\n dipole_tot = float(dipole_tot)\n\n properties = {\n COLUMN_DIPOLE: dipole_tot,\n **properties,\n }\n\n # Get covalent properties\n properties_covalent = read_covalent_coordination(lines)\n\n # Get orbitals\n properties_orbitals = read_properties_orbitals(lines)\n properties = {**properties, **properties_orbitals, **properties_covalent}\n\n return properties", "def read_properties_opt(lines, convert_coords=False, debug=False):\n\n # TODO Better logging for crashed xtb\n if not read_status(lines):\n return None\n\n keywords = [\n \"final structure:\",\n \":: SUMMARY ::\",\n \"Property Printout \",\n \"ITERATIONS\",\n ]\n\n stoppattern = \"CYCLE \"\n idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)\n idx_coord = idxs[0]\n idx_summary = idxs[1]\n idx_end_summary = idxs[2]\n idx_optimization = idxs[3]\n\n if idx_summary is None:\n assert False, \"Uncaught xtb exception. Please submit issue with calculation\"\n\n # Get atom count\n keyword = \"number of atoms\"\n idx = linesio.get_index(lines, keyword)\n line = lines[idx]\n n_atoms = line.split()[-1]\n n_atoms = int(n_atoms)\n\n # Get coordinates\n if idx_coord is None:\n coords = None\n atoms = None\n\n else:\n\n def parse_coordline(line):\n line = line.split()\n atom = line[0]\n coord = [float(x) for x in line[1:]]\n return atom, coord\n\n atoms = []\n coords = []\n for i in range(idx_coord + 4, idx_coord + 4 + n_atoms):\n line = lines[i]\n atom, coord = parse_coordline(line)\n atoms.append(atom)\n coords.append(coord)\n\n atoms = np.array(atoms)\n coords = np.array(coords)\n\n if convert_coords:\n coords *= units.bohr_to_aangstroem\n\n # Get energies\n idx_summary = idxs[1] + 1\n\n # :: total energy +1\n # :: total w/o Gsasa/hb +2\n # :: gradient norm +3\n # :: HOMO-LUMO gap +4\n # ::.....................+4\n # :: SCC energy +5\n # :: -> isotropic ES +6\n # :: -> anisotropic ES +7\n # :: -> anisotropic XC +8\n # :: -> dispersion +9\n # :: -> Gsolv +10\n # :: -> Gborn +11\n # :: -> Gsasa +12\n # :: -> Ghb +13\n # :: -> Gshift +14\n # :: repulsion energy +15\n # :: add. restraining +16\n\n prop_lines = lines[idx_summary : idx_end_summary - 2]\n prop_dict = parse_sum_table(prop_lines)\n\n # total_energy = prop_dict.get(\"total_energy\", float(\"nan\"))\n # gsolv = prop_dict.get(\"gsolv\", float(\"nan\"))\n # electronic_energy = prop_dict.get(\"scc_energy\", float(\"nan\"))\n\n properties = prop_dict\n\n # Get dipole\n dipole_str = \"molecular dipole:\"\n idx = linesio.get_rev_index(lines, dipole_str)\n if idx is None:\n dipole_tot = None\n else:\n idx += 3\n line = lines[idx]\n line = line.split()\n dipole_tot = line[-1]\n dipole_tot = float(dipole_tot)\n\n if idx_optimization is None:\n is_converged = None\n n_cycles = None\n\n else:\n\n line = lines[idx_optimization]\n if \"FAILED\" in line:\n is_converged = False\n else:\n is_converged = True\n\n line = line.split()\n n_cycles = line[-3]\n n_cycles = int(n_cycles)\n\n # Get covCN and alpha\n properties_covalent = read_covalent_coordination(lines)\n\n properties = {\n COLUMN_ATOMS: atoms,\n COLUMN_COORD: coords,\n COLUMN_DIPOLE: dipole_tot,\n COLUMN_CONVERGED: is_converged,\n COLUMN_STEPS: n_cycles,\n **properties_covalent,\n **properties,\n }\n\n return properties", "def get_inpProp(prop,iterable):\n for line in cleanStrings(iterable,CC='!'):\n llist = line.split()\n try:\n propIndex = index(prop,llist) + 1\n except ValueError:\n pass\n else:\n try:\n return float(llist[propIndex])\n except:\n raise ValueError", "def read_properties_fukui(lines):\n\n keywords = [\"Fukui index Calculation\", \"f(+)\", \"Property Printout\"]\n\n indices = linesio.get_rev_indices_patterns(lines, keywords)\n\n if indices[0] is None:\n return None\n\n start_index = indices[1]\n end_index = indices[2]\n\n f_plus_list = []\n f_minus_list = []\n f_zero_list = []\n\n for i in range(start_index + 1, end_index - 1):\n line = lines[i]\n line = line.split()\n\n f_plus = float(line[1])\n f_minus = float(line[2])\n f_zero = float(line[3])\n\n f_plus_list.append(f_plus)\n f_minus_list.append(f_minus)\n f_zero_list.append(f_zero)\n\n f_plus_list = np.array(f_plus_list)\n f_minus_list = np.array(f_minus_list)\n f_zero_list = np.array(f_zero_list)\n\n properties = {\n \"f_plus\": f_plus_list,\n \"f_minus\": f_minus_list,\n \"f_zero\": f_zero_list,\n }\n\n return properties", "def read_prop(self, fname, prop, add=True, mult=1):\r\n print('Reading ' + prop + ' input')\r\n typeVal = None\r\n val = 0\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == prop:\r\n if len(item) >= 2:\r\n if item[1] == \"*CON\":\r\n val = float(item[2])\r\n typeVal = '*CON'\r\n elif item[1] == '*EQUALSI' or item[1] == 'EQUALSI':\r\n attr_I = prop[:-1] + 'I'\r\n # Change 'PERMJ' to be the keyword that identifies the end of attribute section\r\n data = self.read_prop(fname, attr_I, add=False, mult=mult)\r\n if len(item) == 4:\r\n op = item[2]\r\n if op == '*':\r\n data *= float(item[3])\r\n elif op == '/':\r\n data /= float(item[3])\r\n elif op == '+':\r\n data += float(item[3])\r\n elif op == '-':\r\n data -= float(item[3])\r\n elif item[1] == 'ALL':\r\n typeVal = 'ALL'\r\n break\r\n\r\n if typeVal == 'ALL':\r\n data = []\r\n count = 0\r\n for line in fp:\r\n item = line.split()\r\n for attr in item:\r\n if \"*\" in attr:\r\n item = attr.split(\"*\")\r\n for i in range(0, int(item[0])):\r\n data.append(float(item[1]))\r\n count += 1\r\n else:\r\n data.append(float(attr))\r\n count += 1\r\n # If true, all values have been read\r\n if count == self.size[0] * self.size[1] * self.size[2]:\r\n data = np.array(data)\r\n data = np.reshape(data, (self.size[2], self.size[1], self.size[0]), order=\"C\")\r\n break\r\n elif typeVal == '*CON':\r\n data = np.full((self.size[2], self.size[1], self.size[0]), val)\r\n\r\n if add:\r\n self.add_data(data, prop)\r\n self.out_props[prop] = data\r\n return data", "def read_properties(lines, options=None, scr=None):\n\n reader = None\n read_files = True\n\n if options is None:\n reader = read_properties_opt\n\n elif \"vfukui\" in options:\n reader = read_properties_fukui\n read_files = False\n\n elif \"vomega\" in options:\n reader = read_properties_omega\n read_files = False\n\n elif \"opt\" in options or \"ohess\" in options:\n reader = read_properties_opt\n\n else:\n reader = read_properties_sp\n\n properties = reader(lines)\n\n if scr is not None and read_files:\n # Parse file properties\n charges = get_mulliken_charges(scr=scr)\n bonds, bondorders = get_wbo(scr=scr)\n\n properties[\"mulliken_charges\"] = charges\n properties.update(get_cm5_charges(lines)) # Can return {} if not GFN1\n properties[\"bonds\"] = bonds\n properties[\"bondorders\"] = bondorders\n\n if \"vibspectrum\" in os.listdir(scr):\n properties[\"frequencies\"] = get_frequencies(scr=scr)\n\n return properties", "def agline(line):\n\n vals = {}\n x = ['date', 'movie', 'offset', 'well', 'gen', 'flarem', 'flares', \n 'chargem', 'charges', 'charget', 'escdm', 'escds', 'escddur', 'escmm', 'escms', \n 'escmdur']\n y = line.strip('\\n').split(',')[0:16]\n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def test_linelist():\n # __init__() creates a LineList with 0 lines.\n linelist = LineList()\n assert isinstance(linelist, LineList)\n assert len(linelist) == 0\n inputs = []\n for iline, vslstr in enumerate(vald_short_line_strings):\n assert len(linelist) == iline\n vsl = ValdShortLine(vslstr)\n inputs.append(vsl)\n\n # Append SmeLine and ValdShortLine objects (add ValdLongLine).\n # __getitem__() returns the object just appended.\n if iline % 2 == 0:\n linelist.append(vsl)\n assert linelist[iline] == vsl\n else:\n smeline = SmeLine(vsl.species, vsl.wlcent, vsl.excit, vsl.loggf,\n vsl.gamrad, vsl.gamqst, vsl.gamvw)\n linelist.append(smeline)\n assert linelist[iline] == smeline\n\n # __len__() returns number of appended lines.\n assert len(linelist) == len(vald_short_line_strings)\n\n # Exercise __str__() to make sure it returns a value.\n assert type(str(linelist)) is str\n\n # Properties return lists of values equal to the input values.\n assert isinstance(linelist.species, list)\n assert len(linelist.species) == len(vald_short_line_strings)\n assert linelist.species == [line.species for line in inputs]\n assert linelist.wlcent == [line.wlcent for line in inputs]\n assert linelist.excit == [line.excit for line in inputs]\n assert linelist.loggf == [line.loggf for line in inputs]\n assert linelist.gamrad == [line.gamrad for line in inputs]\n assert linelist.gamqst == [line.gamqst for line in inputs]\n assert linelist.gamvw == [line.gamvw for line in inputs]\n\n # Exceptions\n with raises(TypeError, match='line in LineList has invalid type'):\n linelist[0] = 'invalid type'\n with raises(TypeError, match='line in LineList has invalid type'):\n linelist.append('invalid type')", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def update_lines(num, ax, fargs):\n \n dataLines, lines = fargs\n for line, data in zip(lines, dataLines):\n # NOTE: there is no .set_data() for 3 dim data...\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n return lines", "def buildLine(self):\n self.clearLineshape()\n if len(self.components)==0:\n y = np.zeros(len(self.x))\n self.lineshape = y\n else:\n '''for component in self.components:\n y = np.array([component.function(x) for x in self.x])\n self.lineshape = np.add(self.lineshape,y)'''\n self.buffer._sum()", "def read_ext_prop(self, fname, prop_title, mult=1):\r\n print('Reading ' + prop_title + ' input')\r\n data = []\r\n count = 0\r\n modify = False\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n if not line[:1].isdigit():\r\n if line.startswith('*MOD'):\r\n modify = True\r\n continue # it's a keyword\r\n item = line.split()\r\n if modify:\r\n i = int(item[0])-1\r\n j = int(item[1])-1\r\n K = [int(x)for x in item[2].split(':')]\r\n value = float(item[-1])\r\n for k in range(K[0]-1,K[1]):\r\n data[k,j,i] = value\r\n break\r\n for attr in item:\r\n if \"*\" in attr:\r\n item = attr.split(\"*\")\r\n for i in range(0, int(item[0])):\r\n data.append(float(item[1]) * mult)\r\n count += 1\r\n else:\r\n data.append(float(attr) * mult)\r\n count += 1\r\n # If true, all values have been read\r\n if count == self.size[0] * self.size[1] * self.size[2]:\r\n data = np.array(data)\r\n data = np.reshape(data, (self.size[2], self.size[1], self.size[0]), order=\"C\")\r\n continue\r\n self.add_data(data, prop_title)\r\n self.out_props[prop_title] = data", "def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0", "def get_education_data(line_objs):\n result = []\n tmp = []\n start = False\n for idx, line in enumerate(line_objs):\n if line.get('footer'):\n print(\"ignore footer line\")\n continue\n line_label = line.get('label')\n line_category = line.get('category')\n if line_label == 'title':\n if line_category == 'education':\n start = True\n continue\n else:\n start = False\n if line_label == 'title-in-line':\n if line_category == 'education':\n start = True\n else:\n start = False\n if start:\n tmp.append(line)\n else:\n if tmp:\n result.append(tmp)\n tmp = []\n if idx == len(line_objs) - 1:\n if tmp:\n result.append(tmp)\n\n for tmp in result:\n for idx, line in enumerate(tmp):\n if idx > 0:\n prev_line = tmp[idx-1]\n else:\n prev_line = None\n if prev_line:\n line_x, line_y = _get_x_y(line)\n prev_line_x, prev_line_y = _get_x_y(prev_line)\n if line_y == prev_line_y:\n line['cen'] = [prev_line_x, line_y]\n print(\"changed x\")\n\n for tmp in result:\n sorted_x = sorted([line.get('cen')[0] for line in tmp])\n min_left_margin = sorted_x[0]\n max_left_margin = sorted_x[-1]\n for line in tmp:\n pattern_feature = get_feature_pattern(line, min_left_margin, max_left_margin)\n line['pattern_feature'] = pattern_feature\n return result", "def calc_line( values, a, c):\n line=[]\n for val in values:\n line.append( a * val + c)\n return line", "def get_experience_data(line_objs):\n result = []\n tmp = []\n start = False\n title_in_line = False\n for idx, line in enumerate(line_objs):\n if line.get('footer'):\n print(\"ignore footer line\")\n continue\n line_label = line.get('label')\n line_category = line.get('category')\n if line_label == 'title':\n if line_category == 'experience':\n start = True\n continue\n else:\n start = False\n if line_label == 'title-in-line':\n if line_category == 'experience':\n start = True\n else:\n start = False\n if start:\n tmp.append(line)\n else:\n if tmp:\n result.append(tmp)\n tmp = []\n if idx == len(line_objs) - 1:\n if tmp:\n result.append(tmp)\n\n for tmp in result:\n for idx, line in enumerate(tmp):\n if idx > 0:\n prev_line = tmp[idx-1]\n else:\n prev_line = None\n if prev_line:\n line_x, line_y = _get_x_y(line)\n prev_line_x, prev_line_y = _get_x_y(prev_line)\n if line_y == prev_line_y:\n line['cen'] = [prev_line_x, line_y]\n print(\"changed x\")\n\n for tmp in result:\n sorted_x = sorted([line.get('cen')[0] for line in tmp])\n min_left_margin = sorted_x[0]\n max_left_margin = sorted_x[-1]\n for line in tmp:\n pattern_feature = get_feature_pattern(line, min_left_margin, max_left_margin)\n line['pattern_feature'] = pattern_feature\n return result", "def update_lineprops(imageprop):\n db = 'sage'\n j = call_responder('sage', 'cvterms?cv_term=' + imageprop)\n if len(j['cvterm_data']) < 1:\n logger.critical(\"Could not find line property %s\", imageprop)\n sys.exit(-1)\n typeid = str(j['cvterm_data'][0]['id'])\n input = open(ARG.FILE, \"r\")\n for line in input:\n line = line.strip()\n iname = line.split('_')[-1]\n logger.info(iname)\n cursor = READ['image'] % (iname)\n COUNT['read'] += 1\n try:\n CURSOR[db].execute(cursor)\n except MySQLdb.Error as err:\n sql_error(err)\n rows = CURSOR[db].fetchall()\n if len(rows) > 1:\n logger.error(\"Non-unique UID %s for image %s\", iname, line)\n COUNT['multiple'] += 1\n continue\n elif len(rows) == 0:\n logger.error(\"No image found for UID %s image %s\", iname, line)\n COUNT['notfound'] += 1\n continue\n for row in rows:\n image_id = row[0]\n cursor2 = WRITE['imageprop'] % (image_id, typeid)\n logger.debug(cursor2)\n try:\n CURSOR[db].execute(cursor2)\n COUNT['imageprop'] += 1\n except MySQLdb.Error as err:\n logger.error(\"Could not update row in image_data_mv\")\n sql_error(err)\n cursor2 = WRITE['imagedatamv'] % (ARG.IMAGEPROP, image_id)\n logger.debug(cursor2)\n try:\n CURSOR[db].execute(cursor2)\n COUNT['imagedatamv'] += 1\n except MySQLdb.Error as err:\n logger.error(\"Could not update row in image_data_mv\")\n sql_error(err)\n if ARG.WRITE:\n CONN[db].commit()\n print(\"Images read: %d\" % (COUNT['read']))\n print(\"Images not found: %d\" % (COUNT['notfound']))\n print(\"Images with nonspecific UIDs: %d\" % (COUNT['multiple']))\n if ARG.WRITE or ARG.DEBUG:\n print(\"Rows deleted from image_property: %d\" % (COUNT['imageprop']))\n print(\"Rows updated in image_data_mv: %d\" % (COUNT['imagedatamv']))", "def _get_line_vals(self, record, line_field, fields):\n line_field, max_row = get_line_max(line_field)\n lines = record[line_field]\n if max_row > 0 and len(lines) > 3800 :#and len(lines) > max_row:\n raise Exception(\n _('Records in %s exceed max record allowed!') % line_field)\n vals = dict([(field, []) for field in fields])\n # Get field condition & aggre function\n field_cond_dict = {}\n aggre_func_dict = {}\n field_format_dict = {}\n pair_fields = [] # I.e., ('debit${value and . or .}@{sum}', 'debit')\n for field in fields:\n temp_field, eval_cond = get_field_condition(field)\n temp_field, field_format = get_field_format(temp_field)\n raw_field, aggre_func = get_field_aggregation(temp_field)\n # Dict of all special conditions\n field_cond_dict.update({field: eval_cond})\n aggre_func_dict.update({field: aggre_func})\n field_format_dict.update({field: field_format})\n # --\n pair_fields.append((field, raw_field))\n # --\n for line in lines:\n for field in pair_fields: # (field, raw_field)\n value = self._get_field_data(field[1], line)\n if type(value) == type(''):\n value = re.sub(r\"[\u001e\u0006]\", '', value)\n # Case Eval\n eval_cond = field_cond_dict[field[0]]\n if eval_cond: # Get eval_cond of a raw field\n eval_context = {'float_compare': float_compare,\n 'time': time,\n 'datetime': dt,\n 'date': date,\n 'value': value,\n 'object': line,\n 'model': self.env[record._name],\n 'env': self.env,\n 'context': self._context,\n }\n # value = str(eval(eval_cond, eval_context))\n # Test removing str(), coz some case, need resulting number\n value = eval(eval_cond, eval_context)\n # --\n vals[field[0]].append(value)\n return (vals, aggre_func_dict, field_format_dict)", "def get_mv_line_vals(self, line, *args, **kwargs):\n return {\n 'ref': line.get('ref', '/'),\n 'name': line.get('label', line.get('ref', '/')),\n 'date': line.get('date', datetime.datetime.now().date()),\n 'transaction_ref': line.get('transaction_id', '/'),\n 'debit': line.get('debit', 0.0),\n 'credit': line.get('credit', 0.0),\n }", "def parse_calc_cmd(self, line):\n self.E_str = \"parse_calc_cmd\"\n # Clean up the line\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n _, calc_type, _, var_name, _, new_var_name = words\n\n # Get the variable to calculate the property with\n Var = getattr(self, var_name)\n\n # Check the required metadata has been set\n required_metadata = f_dicts.calc_fncs[calc_type].required_metadata\n Var = getattr(self, var_name)\n for attr in required_metadata:\n if attr not in Var.metadata and attr not in f_dicts.calc_fncs[calc_type]._defaults:\n err_msg = f\"'{attr}' required for calculation of '{calc_type}'\"\n err_msg += \"\\n\\nPlease set it with the following syntax:\\n\\t\"\n err_msg += f\"{var_name}['{attr}'] = <value>\"\n err_msg += f\" or by using a set command.\"\n self.print_error(err_msg)\n\n\n Calc_Obj = f_dicts.calc_fncs[calc_type](Var)\n\n Calc_Obj.calc()\n\n # Create a new variable type\n New_Var = inp_types.Variable(Calc_Obj.name, Calc_Obj, Calc_Obj.metadata)\n setattr(self, new_var_name, New_Var)\n if new_var_name not in self.variables:\n self.variables.append(new_var_name)", "def read_line(l):\n return [read_float(l[s]) for s in slices['data']]", "def getContourProperties(self, contour, properties=[]):\r\n # Initial variables.\r\n failInInput = False\r\n props = {}\r\n\r\n for prop in properties:\r\n prop = str(prop).lower()\r\n\r\n if prop == \"approximation\":\r\n props.update({\"Approximation\" : self.__CalculateApproximation(contour)})\r\n if prop == \"area\":\r\n props.update({\"Area\" : self.__CalculateArea(contour)})\r\n elif prop == \"boundingbox\":\r\n props.update({\"BoundingBox\" : self.__CalculateBoundingBox(contour)})\r\n elif prop == \"centroid\":\r\n props.update({\"Centroid\" : self.__CalculateCentroid(contour)})\r\n elif prop == \"circle\":\r\n props.update({\"Circle\" : self.__CalculateCircle(contour)})\r\n elif prop == \"circularity\":\r\n props.update({\"Circularity\" : self.__CalculateCircularity(contour)})\r\n elif prop == \"convexhull\":\r\n props.update({\"ConvexHull\" : self.__CalculateConvexHull(contour)})\r\n elif prop == \"extend\":\r\n props.update({\"Extend\" : self.__CalculateExtend(contour)})\r\n elif prop == \"ellipse\":\r\n props.update({\"Ellipse\" : self.__CalculateEllipse(contour)})\r\n elif prop == \"isconvex\":\r\n props.update({\"IsConvex\" : self.__IsConvex(contour)})\r\n elif prop == \"length\":\r\n props.update({\"Length\" : self.__CalculateLength(contour)})\r\n elif prop == \"moments\":\r\n props.update({\"Moments\" : self.__CalculateMoments(contour)})\r\n elif prop == \"perimeter\":\r\n props.update({\"Perimeter\" : self.__CalculatePerimeter(contour)})\r\n elif prop == \"rotatedbox\":\r\n props.update({\"RotatedBox\" : self.__CalculateRotatedBox(contour)})\r\n elif failInInput:\r\n pass\r\n else:\r\n print(\"\\t--\" * 20)\r\n print(\"\\t*** PROPERTY ERROR \" + prop + \" DOES NOT EXIST ***\")\r\n print(\"\\tTHIS ERROR MESSAGE WILL ONLY BE PRINTED ONCE\")\r\n print(\"\\--\" * 20)\r\n failInInput = True\r\n\r\n return props", "def getProperties(properties =['electrical_props', '__description'], \r\n sproperty ='electrical_props'):\r\n #------------------------------------\r\n from .database import GeoDataBase\r\n #-----------------------------------\r\n def _fs (v): \r\n \"\"\" Sanitize value and put on list \r\n :param v: value \r\n :Example:\r\n \r\n >>> _fs('(416.9, 100000.0)'))\r\n ...[416.9, 100000.0]\r\n \"\"\"\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n return v\r\n # connect to geodataBase \r\n try : \r\n _dbObj = GeoDataBase()\r\n except: \r\n _logger.debug('Connection to database failed!')\r\n else:\r\n _gammaVal = _dbObj._retreive_databasecolumns(properties)\r\n if sproperty in properties: \r\n indexEprops = properties.index(sproperty )\r\n try:\r\n _gammaVal [indexEprops] = list(map(lambda x:_fs(x),\r\n _gammaVal[indexEprops]))\r\n except TypeError:\r\n _gammaVal= list(map(lambda x:_fs(x),\r\n _gammaVal))\r\n return _gammaVal", "def computeProp(self):\n self.chem = {}\n for key in self.config.C:\n if key in ['P', 'T', 'Z', 'DZ']:\n continue\n self.chem[key] = chemistry.ConstituentProperties(key)\n\n # nAtm = len(self.gas[self.config.C['P']])\n self.property = []\n for op in self.config.LP:\n self.property.append([])\n zOffset = 0.0\n iOffset = 0\n psep = 1.0E6\n for i, zv in enumerate(self.gas[self.config.C['Z']]): # find the nearest z value at p_ref\n P = self.gas[self.config.C['P']][i]\n if abs(P - self.config.p_ref) < psep:\n psep = abs(P - self.config.p_ref)\n iOffset = i\n zOffset = self.gas[self.config.C['Z']][iOffset]\n z_at_p_ref = self.config.Req\n\n for i, zv in enumerate(self.gas[self.config.C['Z']]):\n T = self.gas[self.config.C['T']][i]\n P = self.gas[self.config.C['P']][i]\n self.property[self.config.LP['P']].append(P)\n self.property[self.config.LP['Z']].append(zv)\n rr = z_at_p_ref + zv - zOffset\n # note that this is the \"actual\"z along equator referenced to planet center (aka radius)\n self.property[self.config.LP['R']].append(rr)\n # ##set mean amu\n amulyr = 0.0\n for key in self.chem:\n amulyr += self.chem[key].amu * self.gas[self.config.C[key]][i]\n self.property[self.config.LP['AMU']].append(amulyr)\n # ##set GM pre-calc (normalized further down) and get lapse rate\n if not i:\n self.property[self.config.LP['GM']].append(0.0)\n self.property[self.config.LP['LAPSE']].append(0.0)\n self.property[self.config.LP['LAPSEP']].append(0.0)\n else:\n rho = (amulyr * P) / (chemistry.R * T)\n dr = abs(zv - self.gas[self.config.C['Z']][i - 1])\n dV = 4.0 * np.pi * (rr**2) * dr\n dM = 1.0e11 * rho * dV\n GdM = self.property[self.config.LP['GM']][i - 1] + chemistry.GravConst * dM\n # in km3/s2\n # mass added as you make way into atmosphere by radius r (times G)\n self.property[self.config.LP['GM']].append(GdM)\n dT = abs(T - self.gas[self.config.C['T']][i - 1])\n dP = abs(P - self.gas[self.config.C['P']][i - 1])\n self.property[self.config.LP['LAPSE']].append(dT / dr)\n self.property[self.config.LP['LAPSEP']].append(dT / dP)\n # ##set refractivity and index of refraction\n refrlyr = 0.0\n for key in self.chem:\n refrlyr += self.chem[key].refractivity(T=T) * self.gas[self.config.C[key]][i]\n refrlyr = refrlyr * P * (293.0 / T)\n self.property[self.config.LP['REFR']].append(refrlyr)\n nlyr = refrlyr / 1.0E6 + 1.0\n self.property[self.config.LP['N']].append(nlyr)\n\n # ##Now need to normalize GM to planet and calculate scale height (H)\n GMnorm = self.property[self.config.LP['GM']][iOffset] # G*(Mass added by p_ref)\n for i, mv in enumerate(self.property[self.config.LP['GM']]):\n gm = self.config.GM_ref - (mv - GMnorm)\n self.property[self.config.LP['GM']][i] = gm\n little_g = gm / self.property[self.config.LP['R']][i]**2\n m_bar = self.property[self.config.LP['AMU']][i]\n T = self.gas[self.config.C['T']][i]\n self.property[self.config.LP['H']].append((chemistry.R * T) /\n (little_g * m_bar) / 1000.0)\n self.property[self.config.LP['g']].append(little_g)\n self.property = np.array(self.property)", "def col_positions(self, line_type):\n\t\t# TODO: For peak fault current in make calculation should maximum of both methods be used\n\t\tcols = dict()\n\t\tif line_type == self.current:\n\t\t\tcols[self.ik11] = 0\n\t\t\tcols[self.ibsym] = 2\n\t\t\t# # Values no longer obtained from here since these relate to the values obtained by the sum of the\n\t\t\t# # calculated DC values rather than thevenin impedance as required by G74\n\t\t\t# #cols[self.idc] = 4\n\t\t\t# #cols[self.ibasym] = 5\n\t\t\tcols[self.idc_method1] = 4\n\t\t\tcols[self.ibasym_method1] = 5\n\t\t\tcols[self.ip_method1] = 6\n\n\t\t\t# Expected length of this list of floats\n\t\t\texpected_length = 7\n\t\telif line_type == self.impedance:\n\t\t\tcols[self.r] = 0\n\t\t\tcols[self.x] = 1\n\t\t\tcols[self.v_prefault] = 2\n\t\t\t# Obtaining the DC, asym and peak values from the second row (THEVENIN ROW) is used since this\n\t\t\t# aligns with the requirements of the G74 standard rather to use the thevenin impedance\n\t\t\tcols[self.idc_method2] = 4\n\t\t\tcols[self.ibasym_method2] = 5\n\t\t\tcols[self.ip_method2] = 6\n\t\t\t# Not possible to export this data since in some cases get a result returned which says infinity\n\t\t\t# #cols[self.idc0] = 4\n\t\t\t# #cols[self.ibasym0] = 5\n\t\t\t# #cols[self.ip0] = 6\n\n\t\t\t# Expected length of this list of floats\n\t\t\texpected_length = 7\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t(\n\t\t\t\t\t'The line_type <{}> provided does not match the available options of:\\n'\n\t\t\t\t\t'\\t - {}\\n'\n\t\t\t\t\t'\\r - {}\\n'\n\t\t\t\t\t'Check the code!'\n\t\t\t\t).format(line_type, self.current, self.impedance)\n\t\t\t)\n\n\t\treturn cols, expected_length", "def read_prop(self, fname, attr_name):\r\n print('Reading ' + attr_name + ' input')\r\n temp = []\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] != \"--\":\r\n tag = item[0]\r\n break\r\n\r\n for line in fp:\r\n attribute = line.split()\r\n if attribute:\r\n if attribute[0] != \"--\":\r\n if attribute[-1] != \"/\":\r\n for c in attribute:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n temp.append(cc[-1])\r\n else:\r\n temp.append(c)\r\n else:\r\n attribute.pop()\r\n for c in attribute:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n temp.append(cc[-1])\r\n else:\r\n temp.append(c)\r\n break\r\n\r\n # #attribute = fp.readline().split()[-1]\r\n # attribute = fp.readline().split()\r\n # print(attribute)\r\n # if attribute[0] != \"--\":\r\n # self.Prop[tag] = attribute\r\n # print(\"loading\", attribute)\r\n # for line in fp:\r\n # if line.split():\r\n # if line.split()[0] != \"--\":\r\n # if line.split()[-1] != \"/\":\r\n # temp += line.split()\r\n # else:\r\n # temp += line.split()[0:-1]\r\n # break\r\n print(temp)\r\n data = np.zeros((self.ne * self.nn * self.nz), dtype=float)\r\n count = 0\r\n for item in temp:\r\n if \"*\" in item:\r\n ct = (int)(item.split(\"*\")[0])\r\n vl = (float)(item.split(\"*\")[1])\r\n data[count:count + ct] = vl\r\n count += ct\r\n else:\r\n data[count] = (float)(item)\r\n count += 1\r\n\r\n data = np.reshape(data, (self.ne, self.nn, self.nz), order=\"F\")\r\n\r\n # Add to VTK grid\r\n ac = vtk.vtkDoubleArray()\r\n ac.SetName(attr_name)\r\n for iac in data.flatten(order='F'):\r\n ac.InsertNextTuple1(iac)\r\n self.Grid.GetCellData().AddArray(ac)\r\n\r\n return data", "def layer_properties(freq_vec, material):\n # name of the material\n material_name = material[0]\n # thickness of the material (reshape with freq shape, in a tuple, to\n # allow the sum with the tuple of material properties)\n thickness = (np.array( [material[1]]*len(freq_vec) ), )\n # check if we have to pass extra arguments for non homogenous material\n if material_name == 'meta':\n param = material[2:]\n else:\n param = ()\n # read/compute material properties\n prop = mat.properties(material_name, freq_vec, *param)\n\n return thickness + prop", "def process_line(line):\n action = {'+': operator.add, '-': operator.sub}\n nums = [int(num) for num in re.findall(r'-?\\d+', line)[1:]] # extracts numbers\n ops = re.findall(r' ([+-]) ', line) # extracts operators\n total = nums[0]\n for op, num in zip(ops, nums[1:]):\n total = action[op](total, num)\n return total", "def mdAveragePropertiesList(self):\n\t\tpass", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def lhco_line(self):\n if not self.check_def(['eta','phi','pt','mass','pid']): \n sys.exit('Particle error: some attribute not defined')\n\n jet=[1,2,3,4,5,6,21]\n inv_list=[12,14,16,18,1000022,1000023,1000024,1000025,1000035]\n\n #define pid-> type\n pid_to_type={11:1,-11:1,13:2,-13:2,15:3,-15:3,22:0}\n for data in jet:\n pid_to_type[data]=4\n pid_to_type[-data]=4\n for data in inv_list:\n pid_to_type[data]=6\n pid_to_type[-data]=6\n\n\n \n type=''\n for key in pid_to_type.keys():\n if self.pid==key:\n type=pid_to_type[key]\n break\n \n if type=='':\n print 'Warning unknown type'\n return ''\n\n text =' '+str(type) #type LHCO\n text+=' '+str(self.eta) #ETA\n text+=' '+str(self.phi) #PHI\n text+=' '+str(self.pt) #PT\n text+=' '+str(self.mass) #JMASS\n if self.pid in [11,13]: #NTRK\n text+=' -1' \n else:\n text+=' 1'\n if self.pid in [-5,5]: #BTAG\n text+=' 2'\n else:\n text+=' 0'\n text+=' 0' #HAD/EM\n text+=' 0' #DUMMY 1\n text+=' 0' #DUMMY 2\n \n return text", "def grid_line(object):\n def __init__(self, casenum):\n self.casenum = casenum\n \n def getList(self):\n \"\"\"\n to get the whole list of grid line data\n \"\"\"\n lineList = get_array(self.casenum, 'branch')\n self.lineList = lineList\n return lineList\n \n def getLineNum(self):\n \"\"\"\n :return: number of lines\n \"\"\"\n self.lineNum = len(self.getList())\n return self.lineNum\n \n def getFBus(self):\n \"\"\"\n :return:list of \"from\" bus numbers\n \"\"\"\n FBus = []\n for e in self.getList():\n FBus.append(e[0])\n self.FBus = FBus\n return self.Bus\n \n def getTBus(self):\n \"\"\"\n :return: list of \"to\" bus numbers\n \"\"\"\n TBus = []\n for e in self.getList():\n TBus.append(e[1])\n self.TBus = TBus\n return self.TBus", "def paint(lines: list):\n for i, line in enumerate(lines):\n if line == '':\n continue\n if re.match(r'\\d{8}', line[:8]):\n lines[i] = paint_date(line)\n elif line[0] == '*':\n lines[i] = paint_accomplishment(line)\n return lines", "def _get_tip_lineage(line):\n fields = line.strip().split('; ')\n tipname = fields[0]\n lineage = fields[4::2]\n percentages = map(percentage, fields[5::2])\n return tipname, lineage, percentages", "def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def get_compound_properties(path):\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]", "def _parse_line(self, line):\n with open(self._manifest.path, 'r') as manifest_file:\n if isinstance(line, str):\n assert line in self.BASE_INFORMATION.keys(), \\\n 'An attempt to get non-existent information from the manifest'\n for _ in range(self.BASE_INFORMATION[line]):\n fline = manifest_file.readline()\n return json.loads(fline)[line]\n else:\n assert self._index, 'No prepared index'\n offset = self._index[line]\n manifest_file.seek(offset)\n properties = manifest_file.readline()\n parsed_properties = ImageProperties(json.loads(properties))\n self._json_item_is_valid(**parsed_properties)\n return parsed_properties", "def map_data(self, data, properties):\n # look for width\n if 'width' in properties:\n self.width = data[properties.index('width')]\n self.options.append('line width=' + str(self.width))\n # look for tint\n if 'tint' in properties:\n self.tint = data[properties.index('tint')]\n color = self.tint_color + \"!\" + str(self.tint) + \"!\" + self.base_color\n else:\n color = self.base_color\n self.options.append('draw=' + color)\n # look for opacity\n if 'opacity' in properties:\n self.opacity = data[properties.index('opacity')]\n self.options.append('opacity=' + str(self.opacity))", "def process_property(prop):\n output = {}\n output['Property'] = prop['PropertyAddress']\n output['Sale date'] = convert_date(prop['DateSold'])\n output['Sale price'] = convert_prices(prop['SalePrice'])\n output['Rates value'] = convert_prices(prop['CapitalValue'])\n return output", "def line_parser(path):\n lines = []\n with open(path, 'r') as input:\n lines = [line.rstrip().split(',') for line in input]\n lines = [\n [[float(x1), float(y1)],\n [float(x2), float(y2)]] \n for x1, y1, x2, y2 in lines]\n return lines", "def update_lineprops(imageprop):\n db = 'sage'\n j = call_responder('sage', 'cvterms?cv_term=' + imageprop)\n if len(j['cvterm_data']) < 1:\n logger.critical(\"Could not find line property %s\", imageprop)\n sys.exit(-1)\n cursor = READ['unsynced'] % (imageprop, imageprop)\n print(\"Syncing image properties for %s (%s) on %s\" %\n (imageprop, j['cvterm_data'][0]['display_name'], db))\n try:\n CURSOR[db].execute(cursor)\n rows = CURSOR[db].fetchall()\n except MySQLdb.Error as err:\n sql_error(err)\n for row in rows:\n image_id = row[0]\n value = row[1]\n logger.debug(\"Missing %s (%s) for image %s\", imageprop,\n value, image_id)\n cursor2 = WRITE['update'] % (imageprop, value, image_id)\n logger.debug(cursor2)\n try:\n CURSOR[db].execute(cursor2)\n except MySQLdb.Error as err:\n logger.error(\"Could not update row in image_data_mv\")\n sql_error(err)\n COUNT['update'] += 1\n if ARG.TRIGGER:\n logger.debug(WRITE['refresh'] % (image_id))\n try:\n CURSOR[db].execute(WRITE['refresh'] % (image_id))\n except MySQLdb.Error as err:\n logger.error(\"Could not update rows in image_property\")\n sql_error(err)\n COUNT['triggered'] += CURSOR[db].rowcount\n cursor = READ['deleted'] % (imageprop, imageprop, imageprop)\n try:\n CURSOR[db].execute(cursor)\n rows = CURSOR[db].fetchall()\n except MySQLdb.Error as err:\n sql_error(err)\n for row in rows:\n logger.warning(\"Image ID %s has a %s (%s) in image_data_mv, but not in image_property\", row[0], imageprop, row[1])\n if ARG.WRITE:\n CONN[db].commit()\n print(\"Unsynced records: %d\" % (len(rows)))\n print(\"Updated records: %d\" % (COUNT['update']))\n print(\"Triggered updates: %d\" % (COUNT['triggered']))", "def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def line(x0: float, y0: float, x1: float, y1: float) -> LineCollection:\n return LineCollection([(complex(x0, y0), complex(x1, y1))])", "def IntersectWithLine(self, , , p_float_6, p_float_7, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def do_Promo_line_parse (Promo_line, line_number, filehash) :\n result = [filehash,\n line_number,\n Promo_line[0:8].strip(),\n Promo_line[9:13].strip(),\n Promo_line[14:19].strip(),\n Promo_line[20:26].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[27:30].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[31:40].strip(),\n Promo_line[41:49].strip(),\n Promo_line[50:].strip()\n ]\n return result\n # Having the line number passed in is ugly, but kind of works :/\n # Having all the field extraction explicit is kind of ugly too...\n # We're using the hash here to link? Yeah, that's because Python\n # doesn't know what the autonumbered load table is up to in the\n # DB when it starts to coalesce the raw files together.", "def parse_line(self, atline: List, list_of_lines: List, part: PART, afix: AFIX, resi: RESI) -> None:\n uvals = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.name = atline[0][:4] # Atom names are limited to 4 characters\n for n, u in enumerate(atline[6:12]):\n uvals[n] = float(u)\n self.uvals_orig = uvals[:]\n self.set_uvals(uvals)\n self._line_numbers = list_of_lines\n self.part = part\n self.afix = afix\n self.resi = resi\n self._get_part_and_occupation(atline)\n self.x, self.y, self.z = self._get_atom_coordinates(atline)\n self.xc, self.yc, self.zc = self._cell.o * Array(self.frac_coords)\n if abs(self.uvals[1]) > 0.0 and self.uvals[2] == 0.0 and self.shx.hklf: # qpeaks are always behind hklf\n self.peak_height = uvals[1]\n self.qpeak = True\n if self.shx.end: # After 'END' can only be Q-peaks!\n self.qpeak = True\n self.sfac_num = int(atline[1])\n self.shx.fvars.set_fvar_usage(self.fvar)\n self.Ucif = self.set_ucif(uvals)\n # TODO: I am still unsure if this these are correct values:\n # self.Ustar = self.Ucif * self._cell.N * self._cell.N.T\n # self.Ucart = self.Ustar * self._cell.o * self._cell.o.T\n # self.Ueq = self.set_ueq(uvals)\n # self.Uiso = self.Ueq\n # transformed_u = self.transform_u_by_symmetry(2)\n # print(self.name, [round(x, 6) for x in transformed_u], self.frac_coords)", "def isline(l):\n return isinstance(l,list) and len(l) == 2 \\\n and ispoint(l[0]) and ispoint(l[1])", "def Flowline_CSV(filename, nlines=None, has_width=False, flip_order=True):\n \n f = open(filename,'r')\n \n header = f.readline() #header line\n hdr = header.strip('\\r\\n')\n keys = hdr.split(',') #get names of variables\n #keys[-1] = keys[-1].strip('\\r\\n')\n \n data = {k : [] for k in keys} #end of line has hidden characters, so 'point_m' does not get read\n #data['Line number'] = []\n data['Length_ID'] = collections.OrderedDict() #new dictionary that counts how many points (i.e. lines of file) are in each flowline. Must be ordered for later iteration!\n #if nlines is not None:\n # data['Lineslist'] = [[] for k in range(nlines)] \n data['Lineslist'] = [] #initialize as empty list\n \n lines = f.readlines()\n f.close()\n \n temp = []\n j = 0\n for i,l in enumerate(lines):\n linstrip = l.strip('\\r\\n')\n parts = linstrip.split(',')\n \n #data['Line-number'].append(parts[0])\n #data['x-coord'].append(parts[1])\n #data['y-coord'].append(parts[2])\n \n x_coord = float(parts[1])\n y_coord = float(parts[2])\n \n if parts[0] not in data['Length_ID'].keys(): #finding out where lines separate \n temp = []\n data['Lineslist'].append(temp) #initialize new empty array that can be modified in-place later\n data['Length_ID'][parts[0]] = 1\n j+=1 \n else:\n data['Length_ID'][parts[0]] += 1\n #if xbounds[0]<x_coord<xbounds[1]: #taking out values outside of map area\n # if ybounds[0]<y_coord<ybounds[1]: \n \n if has_width:\n width = float(parts[3])\n temp.append((x_coord, y_coord, width))\n else:\n temp.append((x_coord, y_coord))\n \n data['Lineslist'][j-1] = np.array(temp) #need to modify an existing array rather than append to keep correct indexing\n\n #data['Lineslist'][j] = np.array(temp) \n \n if nlines is None:\n nlines = len(data['Length_ID'].keys())\n \n if flip_order: \n centrelines_list = [np.array(data['Lineslist'][j])[::-1] for j in range(nlines)] #making arrays, reversed to start at terminus rather than peak\n else:\n centrelines_list = [np.array(data['Lineslist'][j]) for j in range(nlines)] # arrays already start at terminus\n\n \n return centrelines_list", "def _doPerLine(self, lines, function):\n\t\trtnLines = []\n\t\tfor line in lines:\n\t\t\trtnLines.append(function(line))\n\t\treturn rtnLines", "def calculate_derived_properties(slice_df, r_keys):\n symm_tensor_array = slice_df[r_keys].to_numpy()\n reynolds_stress = symmetric_tensor_array_to_tensor_stack(symm_tensor_array)\n tke = calculate_tke(reynolds_stress)\n anistropy_tensor = calculate_anisotropy_tensor(reynolds_stress)\n second_invariant = calculate_second_invariant(anistropy_tensor)*(-1)\n third_invariant = calculate_third_invariant(anistropy_tensor)\n df = slice_df.copy()\n derived_properties_keys = [\"k/U_inf^2\", \"-II\", \"III\"]\n derived_properties_arrays = [tke, second_invariant, third_invariant]\n for key, array in zip(derived_properties_keys, derived_properties_arrays):\n df[key] = array\n return df", "def mapper(self, _, line):\n linea = line.split()\n causa, fallecidos = linea[0], linea[1]\n fallecidos_f = float(fallecidos)\n \n yield causa, (1, round(fallecidos_f))", "def get_same_type_lines(self, tile_grid, min_count=3):\n all_line_members = []\n\n # Check for vertical lines\n for x in range(COLS_COUNT):\n same_type_list = []\n last_tile_type = None\n for y in range(ROWS_COUNT):\n tile_type, sprite = tile_grid[x, y]\n if last_tile_type == tile_type:\n same_type_list.append((x, y))\n # Line end because type changed or edge reached\n if tile_type != last_tile_type or y == ROWS_COUNT - 1:\n if len(same_type_list) >= min_count:\n all_line_members.extend(same_type_list)\n last_tile_type = tile_type\n same_type_list = [(x, y)]\n\n # Check for horizontal lines\n for y in range(ROWS_COUNT):\n same_type_list = []\n last_tile_type = None\n for x in range(COLS_COUNT):\n tile_type, sprite = tile_grid[x, y]\n if last_tile_type == tile_type:\n same_type_list.append((x, y))\n # Line end because of type change or edge reached\n if tile_type != last_tile_type or x == COLS_COUNT - 1:\n if len(same_type_list) >= min_count:\n all_line_members.extend(same_type_list)\n last_tile_type = tile_type\n same_type_list = [(x, y)]\n\n # Remove duplicates\n all_line_members = list(set(all_line_members))\n return all_line_members", "def get_properties():", "def load_regular_coord_by_line(line):\n elems = line.split('\\t')\n if len(elems) < 4:\n elems = line.split(',')\n if len(elems) < 4:\n elems = line.split(' ')\n\n [X1, Y1, W, H] = elems[0:4]\n coord_regular = [int(X1), int(Y1), int(W), int(H)]\n return coord_regular", "def process_hough_lines(lines, min_slope=0.4, max_slope =0.7):\n processed_lines = []\n if len(lines) > 0:\n for line in lines:\n for x1, y1, x2, y2 in line:\n # Skip vertical/horizontal lines\n if(x2 == x1 or y2 == y1):\n continue\n # Check slope of lines\n calc_slope = abs((y2 - y1) / (x2 - x1))\n if calc_slope > min_slope and calc_slope < max_slope:\n processed_lines.append(line)\n\n return processed_lines", "def compute_smile_prop(smile):\n\n def compute_for_one(smi):\n\n \"\"\"\n Computes properties for a single smile sequence\n\n Inputs \n smi (str) : A sequence of smile characters\n Outputs\n prop (list): Computed properties, \"Not exist\" if properties cannot be computed\n \"\"\"\n\n try:\n mol=Chem.MolFromSmiles(smi) \n prop = [Descriptors.ExactMolWt(mol), Descriptors.MolLogP(mol), QED.qed(mol)]\n except:\n prop = 'Not exist!'\n return prop\n\n \n if isinstance(smile, (list, tuple)):\n all_list = []\n for s in list(smile):\n all_list.append(compute_for_one(s))\n props = all_list\n\n elif isinstance(smile, str):\n props = compute_for_one(smile) \n else:\n print(f\"Input must be a string or list, Instead got {type(smile)}\")\n \n return props", "def _mass_properties_no_xref(model, elements, masses, reference_point, is_cg): # pragma: no cover\n mass = 0.\n cg = array([0., 0., 0.])\n I = array([0., 0., 0., 0., 0., 0., ])\n for pack in [elements, masses]:\n for element in pack:\n try:\n p = element.Centroid_no_xref(model)\n except:\n #continue\n raise\n\n try:\n m = element.Mass_no_xref(model)\n except:\n # PLPLANE\n pid_ref = model.Property(element.pid)\n if pid_ref.type == 'PSHELL':\n model.log.warning('p=%s reference_point=%s type(reference_point)=%s' % (\n p, reference_point, type(reference_point)))\n raise\n model.log.warning(\"could not get the inertia for element/property\\n%s%s\" % (\n element, element.pid_ref))\n continue\n (x, y, z) = p - reference_point\n x2 = x * x\n y2 = y * y\n z2 = z * z\n I[0] += m * (y2 + z2) # Ixx\n I[1] += m * (x2 + z2) # Iyy\n I[2] += m * (x2 + y2) # Izz\n I[3] += m * x * y # Ixy\n I[4] += m * x * z # Ixz\n I[5] += m * y * z # Iyz\n mass += m\n cg += m * p\n\n if mass:\n cg /= mass\n\n # only transform if we're calculating the inertia about the cg\n if is_cg:\n xyz_ref = reference_point\n xyz_ref2 = cg\n I = transform_inertia(mass, cg, xyz_ref, xyz_ref2, I)\n return mass, cg, I", "def get_particles_props(self, *props, array=None):\n n = len(props)\n if not isinstance(array, np.ndarray):\n array = self.particles\n if n == 0:\n return []\n elif n == 1:\n key = props[0]\n properties = np.empty(len(array))\n else:\n properties = np.empty((n, len(array)))\n \n for ip, particle in enumerate(array):\n if n == 1:\n properties[ip] = particle[key]\n else:\n for i, key in enumerate(props):\n properties[i, ip] = particle[key]\n return properties", "def vertices_from_lines(lines):\n count = len(lines)\n print(\"Getting vertices 1/3\")\n pb = pbar.ProgressBar(count)\n vertices = []\n# print(\"getting vertices from line\")\n for line in lines:\n pb +=1\n vertices.extend(list(line.coords))\n del pb\n return [Point(p) for p in set(vertices)]", "def f(line):\n # get positive index pairs\n pos_pair, sen_len = get_pairs(line)\n\n # total data\n line_anchor, line_label, line_cls = [], [], []\n sample_indexes = []\n for word_idx in range(sen_len):\n\n # get anchors and labels for each line\n anchors, labels, cls, s_indexes = k_anchors(pos_pair, sen_len, word_idx)\n\n # append word reuslt\n line_anchor += anchors\n line_label += labels\n line_cls += cls\n sample_indexes += s_indexes\n\n # return total data\n return line_anchor, line_label, line_cls, sample_indexes", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def line_picker(p1, mouseevent):\n if mouseevent.xdata is None: return False, dict()\n xdata = p1.get_xdata()\n ydata = p1.get_ydata()\n maxd = 0.5\n d = np.sqrt((xdata-mouseevent.xdata)**2.)\n\n ind = np.nonzero(np.less_equal(d, maxd))\n if len(ind):\n pickx = np.take(xdata, ind)\n picky = np.take(ydata, ind)\n props = dict(ind=ind, pickx=pickx, picky=picky)\n i = pickx-1\n if i.size!=0:\n print '*'*50\n try:\n print 'PDB file {0}\\nExperiment type: {1}'.format(files[i],k)\n except TypeError:\n pass\n print 'clashscore_probe: {0:.4f}\\nwithout_sym_nb_clashscore: {1:.4f}\\ntotal_nb_clashscore: {2:.4f}'.format(y1[i],y2[i],y3[i])\n return True, {}\n else:\n return False, dict()", "def data_shapes(self):", "def actualize_properties(self):\n\n\t\tself.a_max_coord = np.array((\t\t\t# Computes the maximal coordinates\n\t\t\tmax(self.a_atoms[\"coord_x\"]),\t\t# For the x axis\n\t\t\tmax(self.a_atoms[\"coord_y\"]),\t\t# For the y axis\n\t\t\tmax(self.a_atoms[\"coord_z\"])\t\t# For the z axis\n\t\t))\n\t\tself.a_min_coord = np.array((\t\t\t# Computes the minimal coordinates\n\t\t\tmin(self.a_atoms[\"coord_x\"]),\t\t# For the x axis\n\t\t\tmin(self.a_atoms[\"coord_y\"]),\t\t# For the y axis\n\t\t\tmin(self.a_atoms[\"coord_z\"])\t\t# For the z axis\n\t\t))", "def parse_triload(self, lines_str):\n self.clean_load_buffer()\n self.all_loads_list = re.findall(r'object\\s*triplex_load.*?{(.*?)}',lines_str,flags=re.DOTALL)\n\n for cur_obj_str in self.all_loads_list:\n cur_obj_s_list = re.findall(r'.*constant_power_12\\s*(.*?);',cur_obj_str,flags=re.DOTALL)\n\n cur_obj_p_sum = 0\n for cur_ph_s_str in cur_obj_s_list:\n cur_obj_p_sum += complex(cur_ph_s_str).real\n\n self.all_loads_p_list.append(cur_obj_p_sum)", "def __split_line_in_molspec(self, line):\n frg = 0\n if line.find('=') != -1:\n myline = line.replace('(',' ').replace(')',' ').replace('=',' ')\n items = myline.split()\n atomname = items[0]\n frg = int(items[2])\n coord = [ float(items[3]), float(items[4]), float(items[5]) ]\n else:\n myline = line\n items = myline.split()\n if len(items) > 4:\n frg = int(items[4])\n atomname = items[0]\n coord = [ float(items[1]), float(items[2]), float(items[3]) ]\n rec = {'name': atomname, 'coord': coord, 'frg': frg}\n return rec", "def line_util(self, **kwargs):\n outputs = self._util(**kwargs)\n return outputs", "def __parseLine(line):\n\n # extract name\n name_len = line.index(\" \")\n name = line[:name_len]\n line = line[name_len + 3:]\n\n # array-ize 'electron' val\n elec_pos = line.index(\"electron\") + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n\n # quote 'small' val\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n\n # quote all keys\n for i in [\"position\", \"number\", \"small\", \"molar\", \"electron\"]:\n line = line.replace(i, '\"' + i + '\"')\n\n return eval('{\"name\":\"' + name + '\",' + line + '}')", "def plotProperty(self, x, y, z = [], idx = None, col = 1, row = 1, N = 1, ax = None,\\\n save = False, dpi = 100, format = \"pdf\", verbose = 1, handle = False,\\\n translation = None, title = None, other = None, ab = [],\\\n m = \"o\", ms = 2, leg = True, ylim = None, xlim = None, xscale = \"linear\",\\\n yscale = \"linear\", **kwargs):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n \n if type(x) == str: x = [x]\n if type(y) == str: y = [y]\n if type(z) == str: z = [z]\n if len(x) != len(y):\n string = \"Length x (%i) and y (%i) must be the same\" % (len(x), len(y))\n ut.infoPrint(string)\n return\n\n if len(z) > 0 and len(x) != len(z):\n string = \"Length x (%i) and y (%i) and z (%i) must be the same\"\\\n % (len(x), len(y), len(z))\n ut.infoPrint(string)\n return\n\n m = kwargs.pop(\"marker\", m)\n ls = kwargs.pop(\"linestyle\", \"none\")\n ms = kwargs.pop(\"markersize\", ms)\n\n if len(m) == 1: m = m * len(x)\n if isinstance(ab, (int, np.integer)): ab = [ab]\n\n x_data, x_lbl, x_leg = self.getData(idx = idx, var = x, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n y_data, y_lbl, y_leg = self.getData(idx = idx, var = y, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n if len(x_data) != len(y_data): return\n\n if len(z) > 0:\n z_data, z_lbl, z_leg = self.getData(idx = idx, var = z, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n\n if len(x_data) != len(y_data) != len(z_data) or z_data == []: return\n else:\n z_data = None\n\n hP = []\n if not handle:\n hFig = plt.figure()\n hAx = plt.subplot(row, col, N)\n else:\n hAx = ax\n\n if z_data is None:\n\n kwargs.pop(\"vmin\", None)\n kwargs.pop(\"vmax\", None)\n kwargs.pop(\"colormap\", None)\n\n for i in range(len(x_data)):\n\n tP = hAx.plot(x_data[i].T, y_data[i].T, linestyle = ls, marker = m[i],\\\n markersize = ms, **kwargs)\n\n [hP.append(lines) for lines in tP]\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 5: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 5: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n\n else:\n zmin = np.min([np.min(i) for i in z_data])\n zmax = np.max([np.max(i) for i in z_data])\n\n cm = kwargs.pop(\"colormap\", \"plasma\")\n cmap = plt.cm.get_cmap(cm)\n vmin = kwargs.pop(\"vmin\", zmin)\n vmax = kwargs.pop(\"vmax\", zmax)\n c = kwargs.pop(\"color\", 'b')\n lw = kwargs.pop(\"linewidth\", 1.2)\n\n\n for i in range(len(x_data)):\n\n if np.ndim(x_data[i]) == 1: x_data[i] = x_data[i][None, :]\n if np.ndim(y_data[i]) == 1: y_data[i] = y_data[i][None, :]\n if np.ndim(z_data[i]) == 1: z_data[i] = z_data[i][None, :]\n\n if (np.shape(z_data[i]) != np.shape(x_data[i])) and\\\n (np.shape(z_data[i]) != np.shape(y_data[i])) and\\\n (z_data[i].shape[0] != 1):\n string = \"Ambiguous z data %s with x %s and y %s\"\\\n % (np.shape(z_data[i]), np.shape(x_data[i]), np.shape(y_data[i]))\n ut.infoPrint(string)\n return\n \n j,k,l = (0, 0, 0)\n for ii, t in enumerate(translation):\n\n tP = hAx.scatter(x_data[i][j, :], y_data[i][k, :], c = z_data[i][l, :],\\\n vmin = vmin, vmax = vmax, cmap = cmap, marker = m[i],\\\n label = \"\", s = ms, linewidth = lw, **kwargs)\n\n hP.append(tP)\n\n if np.shape(x_data[i])[0] > 1: j += 1\n if np.shape(y_data[i])[0] > 1: k += 1\n if np.shape(z_data[i])[0] > 1: l += 1\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 4: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 4: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n \n if not handle: plt.colorbar(hP[0], label = z_lbl[0])\n\n if ylim is not None:\n hAx.set_ylim(bottom = ylim[0], top = ylim[1])\n if xlim is not None:\n hAx.set_xlim(left = xlim[0], right = xlim[1])\n\n hAx.set_yscale(yscale)\n hAx.set_xscale(xscale)\n hAx.set_xlabel(x_lbl[0])\n hAx.set_ylabel(y_lbl[0])\n if title is None:\n hAx.set_title(self.filename)\n else:\n hAx.set_title(title)\n\n if handle: \n return\n\n \"\"\"Annotating plot marker\"\"\"\n hP[0].set_pickradius(2)\n anP = hAx.plot([], [], marker = 'o', ms = 6, color = 'k', mew = 2, mfc = 'None',\\\n linestyle = 'None')\n\n plt.tight_layout()\n\n \"\"\"Function to allow clickable points to display information\"\"\"\n def click(event):\n if event.inaxes == hAx:\n\n for line in hP:\n cont, ind = line.contains(event)\n if cont:\n break\n\n if cont:\n if z_data is not None:\n x = line.get_offsets()[:, 0]\n y = line.get_offsets()[:, 1]\n else:\n x, y = line.get_data()\n\n xSel = x[ind[\"ind\"]]\n ySel = y[ind[\"ind\"]]\n\n pPos = hAx.transData.transform((xSel, ySel))\n pDist = np.linalg.norm(pPos - [[event.x, event.y]], axis = 1)\n index = ind[\"ind\"][np.argmin(pDist)]\n anP[0].set_data(x[ind[\"ind\"]], y[ind[\"ind\"]])\n for n, i in enumerate(ind[\"ind\"]):\n string = \"Idx: %i (%.4f, %.4f) | Nr Points: %i\"\\\n % (idx[i], x[i], y[i], len(ind[\"ind\"]))\n\n if n == 0: \n print(\"=\" * len(string))\n print(string)\n if n == len(ind[\"ind\"]) - 1: \n print(\"=\" * len(string))\n\n hFig.canvas.draw_idle()\n else:\n anP[0].set_data([], [])\n hFig.canvas.draw_idle()\n\n if save:\n if save is True:\n ut.save_fig(filename = \"PropertyPlot.%s\" % format, format = format,\\\n dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n hFig.canvas.mpl_connect(\"button_release_event\", click)\n plt.show()", "def test_constructor_with_value(self):\n line = D1Line(self.test_line)\n self.assertEqual((line.gender,\n line.event_swimmer_id,\n line.last_name,\n line.first_name,\n line.nick_name,\n line.middle_initial,\n line.uss_num,\n line.team_swimmer_id,\n line.date_of_birth,\n line.age),\n (\"F\",\n 14081,\n \"Reed\",\n \"Laramie\",\n \"\",\n \"J\",\n \"021100LARJREED\",\n 1019,\n datetime.date(2000, 2, 11),\n 9))", "def getLineInformation(line):\n \n pass", "def make_points(self,image,line):\n print(\"This is line inside make_points: \",line)\n try:\n slope, intercept = line\n y1 = int(image.shape[0]) # bottom of the image\n y2 = int(y1*3/5) # slightly lower than the middle\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n return [[x1, y1, x2, y2]]\n except:\n return None", "def updated_fixed_properties(cls, obj):\n out = super(ObjMetaschemaType, cls).updated_fixed_properties(obj)\n # Constrain dependencies for indexes into other elements\n depend_map = {'vertex_index': 'vertices', 'vertex_indices': 'vertices',\n 'texcoord_index': 'texcoords',\n 'normal_index': 'normals'}\n check_depends = {'lines': ['texcoord_index'],\n 'faces': ['texcoord_index', 'normal_index'],\n 'surfaces:vertex_indices': ['texcoord_index', 'normal_index']}\n for e, props in check_depends.items():\n sube = None\n if ':' in e:\n e, sube = e.split(':')\n if not ((e in obj) and isinstance(obj[e], (list, tuple))):\n continue\n req_flags = {k: False for k in props}\n for o in obj[e]:\n if sum(req_flags.values()) == len(props):\n break\n if isinstance(o, dict):\n assert(sube)\n if (((sube not in o) or (not isinstance(o[sube], (list, tuple)))\n or (len(o[sube]) == 0) or (not isinstance(o[sube][0], dict)))):\n continue\n for p in props:\n if p in o[sube][0]:\n req_flags[p] = True\n elif isinstance(o, (list, tuple)):\n if (len(o) == 0) or (not isinstance(o[0], dict)):\n continue\n for p in props:\n if p in o[0]:\n req_flags[p] = True\n # Set dependencies\n for p in req_flags.keys():\n if not req_flags[p]:\n continue\n if depend_map[p] not in out['dependencies'][e]:\n out['dependencies'][e].append(depend_map[p])\n # Contrain indices on number of elements refered to\n if ('vertices' in obj) and isinstance(obj['vertices'], (list, tuple)):\n out['definitions']['curve']['properties']['vertex_indices']['items'][\n 'maximum'] = len(obj['vertices']) - 1\n if ('params' in obj) and isinstance(obj['params'], (list, tuple)):\n out['definitions']['curve2D']['items']['maximum'] = len(obj['params']) - 1\n for e in ['line', 'face', 'surface']:\n if e == 'surface':\n iprop = out['definitions'][e]['properties']['vertex_indices'][\n 'items']['properties']\n else:\n iprop = out['definitions'][e]['items']['properties']\n for k, e_depends in depend_map.items():\n if k in iprop:\n if (e_depends in obj) and isinstance(obj[e_depends], (list, tuple)):\n iprop[k]['maximum'] = len(obj[e_depends]) - 1\n return out", "def _diagnostic_meta_properties_renderer(\n cls, result: Optional[ExpectationValidationResult] = None, **kwargs: dict\n ) -> Union[list, List[str], List[list]]:\n\n if not result:\n return []\n custom_property_values = []\n meta_properties_to_render: Optional[dict] = None\n if result and result.expectation_config:\n meta_properties_to_render = result.expectation_config.kwargs.get(\n \"meta_properties_to_render\"\n )\n if meta_properties_to_render:\n for key in sorted(meta_properties_to_render.keys()):\n meta_property = meta_properties_to_render[key]\n if meta_property:\n try:\n # Allow complex structure with . usage\n assert isinstance(\n result.expectation_config, ExpectationConfiguration\n )\n obj = result.expectation_config.meta[\"attributes\"]\n keys = meta_property.split(\".\")\n for i in range(0, len(keys)):\n # Allow for keys with a . in the string like {\"item.key\": \"1\"}\n remaining_key = \"\".join(keys[i:])\n if remaining_key in obj:\n obj = obj[remaining_key]\n break\n else:\n obj = obj[keys[i]]\n\n custom_property_values.append([obj])\n except KeyError:\n custom_property_values.append([\"N/A\"])\n return custom_property_values", "def plot_lines(line_list, line_width=1.0):\n \n for line in line_list: \n start_lat, end_lat, start_lon, end_lon, color, style, input_projection, resolution = line\n \n assert style in list(line_style_dict.keys())\n assert resolution in ['high', 'low']\n\n start_lat = float(start_lat)\n start_lon = float(start_lon)\n end_lat = float(end_lat)\n end_lon = float(end_lon)\n\n lons = iris.analysis.cartography.wrap_lons(numpy.array([start_lon, end_lon]), 0, 360)\n # FIXME: start=0 might not work for all input/output projection combos\n\n if resolution == 'low':\n lats = numpy.array([start_lat, end_lat]) \n elif resolution == 'high':\n assert start_lat == end_lat or start_lon == end_lon, \\\n \"High res lines need constant lat or lon in reference coordinate system\"\n\n if start_lat == end_lat:\n lons = numpy.arange(lons[0], lons[-1] + 0.5, 0.5)\n lats = numpy.repeat(start_lat, len(lons))\n else:\n lats = numpy.arange(start_lat, end_lat + 0.5, 0.5)\n lons = numpy.repeat(lons[0], len(lats))\n\n plt.plot(lons, lats, \n linestyle=line_style_dict[style], \n color=color, linewidth=line_width,\n transform=input_projections[input_projection])", "def __init__(self, lines):\n\t\tself.lines = lines\n\t\tself.points = set()\n\t\tfor l in lines:\n\t\t\tif not l.a in self.points:\n\t\t\t\tself.points.add(l.a)\n\t\t\tif not l.b in self.points:\n\t\t\t\tself.points.add(l.b)", "def parse_curves_line(L):\n data = L.split()\n if len(data) != len(column_names['curves']):\n print(\"curves line {} does not have 12 fields, skipping\".format(L))\n return\n label, record = parse_line_label_cols(L)\n\n record['conductor_ideal'] = data[4]\n record['conductor_norm'] = N = ZZ(data[5])\n record['conductor_norm_factors'] = N.support()\n\n record['ainvs'] = data[6]\n record['jinv'] = data[7]\n record['disc'] = disc = data[8]\n if \".\" in disc:\n print(\"Old disc: {}\".format(disc))\n disc = \"({})\".format(ZZ(RR(disc[1:-1])))\n print(\"New disc: {}\".format(disc))\n record['disc'] = disc\n record['normdisc'] = ZZ(data[9])\n from sage.all import sqrt\n record['root_analytic_conductor'] = sqrt(0.00798504020212804*float(N)**(1.0/float(record['degree']))*float(record['abs_disc']))\n #print('root_analytic_conductor = {}'.format(record['root_analytic_conductor']))\n\n eqn = data[10]\n # the reason for doing the following is for the unique field\n # 2.2.5.1 where the field generator is not a single character such\n # as 'a' or 'i' but is '\\phi', and we don't want to have '\\phix'\n # in a latex string (and also do not want any whitespace).\n if \"{x}\" not in eqn:\n eqn = eqn.replace('x', '{x}').replace('y', '{y}')\n record['equation'] = eqn\n\n record['cm'] = cm = ZZ(data[11]) if data[11] != '?' else '?'\n # The 'cm_type' column holds +1 for a curve with rational, -1 for\n # potential, 0 if no CM\n if cm:\n if 'CM' in label:\n record['cm_type'] = +1\n else:\n record['cm_type'] = -1\n else:\n record['cm_type'] = 0\n bc = data[12][1:-1]\n record['base_change'] = [str(lab) for lab in bc.split(\",\")] if bc else []\n record['q_curve'] = (data[13] == '1')\n return label, record", "def general(self):\n return -self.line[[0, 2]] / self.line[1]", "def read_model_performances(lines):\n performances = {}\n patients = [str(x) for x in range(13)]\n current_model = ''\n for line in lines:\n words = line.split(' ')\n if (len(words) == 10) and (words[0] == 'starting'):\n if words[-1][:-1].split('/')[0] not in performances.keys():\n performances[words[-1][:-1].split('/')[0]] = []\n current_model = words[-1][:-1].split('/')[0]\n if (len(words) == 2) and (words[0] in patients):\n performances[current_model].append(float(words[1][:-1]))\n\n return performances", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def linear_attenuation_coefficient(self, lines):\n wl = lines.to(\"nm\", \"spectroscopy\").magnitude\n if isarray(wl):\n return [self.getExtinctionCoefficient(l) for l in wl]\n else:\n return self.getExtinctionCoefficient(wl)", "def _get_parameter_lines_from_hough_lines(lines):\n mb = []\n for i in range(len(lines)):\n rho = lines[i][0]\n theta = lines[i][1]\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n m = (-1.0) * (cos_theta / sin_theta)\n b = rho / sin_theta\n mb.append((m,b))\n return mb", "def get_properties_code(self, obj):\n # called only from generate_code_ctor when creating a class constructor to get the first lines\n # otherwise properties are part of the code returned by get_code\n prop_lines = []\n self._reset_vars()\n\n self._prepare_tmpl_content(obj)\n for line in self.tmpl_props:\n prop_lines.append(line % self.tmpl_dict)\n return prop_lines", "def update(self, line, autoscale=False):\n # compute knl as function of s\n values = {p: np.zeros(self.S.size) for p in self.on_y_unique}\n orders = {p: order(p) for p in self.on_y_unique}\n Smax = line.get_length()\n for name, el, s0, s1 in iter_elements(line):\n if hasattr(el, \"knl\"):\n if 0 <= s0 <= Smax:\n mask = (self.S >= s0) & (self.S < s1)\n else:\n # handle wrap around\n mask = (self.S >= s0 % Smax) | (self.S < s1 % Smax)\n for knl, n in orders.items():\n if n <= el.order:\n values[knl][mask] += el.knl[n]\n\n # plot\n s = self.factor_for(\"s\")\n changed = []\n for i, ppp in enumerate(self.on_y):\n for j, pp in enumerate(ppp):\n for k, p in enumerate(pp):\n art = self.artists[i][j][k]\n y = self.factor_for(p) * values[p]\n if self.filled:\n art.get_paths()[0].vertices[1 : 1 + y.size, 1] = y\n else:\n art.set_data((s * self.S, y))\n changed.append(art)\n\n if autoscale:\n ax = self.axis(i, j)\n if self.filled: # At present, relim does not support collection instances.\n ax.update_datalim(\n mpl.transforms.Bbox.union(\n [a.get_datalim(ax.transData) for a in self.artists[i][j]]\n )\n )\n else:\n ax.relim()\n ax.autoscale()\n ax.set(xlim=(s * np.min(self.S), s * np.max(self.S)))\n\n return changed", "def airglow_line_components(self, vaclines, wave_range, disp_range):\n\n AA = []\n for line in vaclines:\n AA.append(np.exp(-0.5*((wave_range-line)/disp_range)**2))\n return np.vstack(AA)", "def parse_line(self, line):\n\n kv_match = self.kv_rex.match(line)\n\n if kv_match:\n kv_dict = kv_match.groupdict()\n kv_key = kv_dict['key']\n kv_value = kv_dict['value']\n\n if 'time_' in kv_key:\n kv_unit = kv_dict['unit']\n\n if not kv_key in self.kv_times:\n self.kv_times[kv_key] = {'unit': kv_unit, 'values': []}\n\n self.kv_times[kv_key]['values'].append(float(kv_value))\n else:\n if not kv_key in self.kv_counts:\n self.kv_counts[kv_key] = 0.0\n\n self.kv_counts[kv_key] += float(kv_value)", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def powerline_all(osm_path): \n return retrieve(osm_path,'lines',['power', 'voltage'])", "def __init__(self, line):\n # Might want to redo this line later to\n # exclude universal \"GLEAM \" prefix\n self.name = line[:line.index(\"|\")]\n line = line[line.index(\"|\") + 1:]\n \n self.ra = line[:line.index(\"|\")]\n self._format_ra()\n line = line[line.index(\"|\") + 1:]\n\n self.dec = line[:line.index(\"|\")]\n self._format_dec()\n line = line[line.index(\"|\") + 1:]\n\n self.flux_by_frq = {}\n\n # we extract and record fluxes according to expected_frequencies\n # at the same time, we convert mJy -> Jy\n for expected_frq in expected_frequencies:\n try:\n self.flux_by_frq[expected_frq] = \\\n float(line[:line.index(\"|\")].strip()) / 1000\n except ValueError:\n warning = \"Missing flux value for: \" + self.name + \\\n \" at frequency: \" + str(expected_frq) + \" MHz.\"\n w.warn(warning)\n self.flux_by_frq[expected_frq] = np.NaN\n line = line[line.index(\"|\") + 1:]\n\n try:\n self.alpha = float(line[:line.index(\"|\")])\n except ValueError:\n warning = \"Missing spectral index for: \" + self.name\n w.warn(warning)\n self.alpha = np.NaN", "def lines(\n self,\n lines: LineReturn,\n line_type: LineType,\n ax: figure | int = 0,\n color: str = \"C0\",\n alpha: float = 1.0,\n linewidth: float = 1,\n ) -> None:\n fig = self._get_figure(ax)\n color = self._convert_color(color)\n xs, ys = lines_to_bokeh(lines, line_type)\n if len(xs) > 0:\n fig.multi_line(xs, ys, line_color=color, line_alpha=alpha, line_width=linewidth)", "def set_properties(self):\n for key in self.prop_source.params_data:\n property_grid = self.interpolate(\n self.grid.xmin,\n self.grid.ymin,\n self.grid.dx,\n self.grid.dy,\n self.grid.nx,\n self.grid.ny,\n self.prop_source.params_data[key]['x'],\n self.prop_source.params_data[key]['y'],\n self.prop_source.params_data[key]['z']\n )\n\n self.properties[key] = self.normalize(\n property_grid,\n self.prop_source.params[key]['min'],\n self.prop_source.params[key]['max'],\n self.grid.ibound_mask\n )", "def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):\n res = {}\n tax_obj = self.pool.get('account.tax')\n cur_obj = self.pool.get('res.currency')\n for line in self.browse(cr, uid, ids):\n price = line.price_unit * (1-(line.discount or 0.0)/100.0)\n taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)\n res[line.id] = taxes['total'] + line.variation_amount\n if line.invoice_id:\n cur = line.invoice_id.currency_id\n res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])\n return res", "def properties(self) -> Optional[pulumi.Input['LineChannelPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def ProcessLine(line, rules, processing, previous_line_data):\n line_data = {'line':line, 'line_offset':processing['offset_processed']}\n \n # Update with always-included data, like glob keys, and the component\n line_data.update(processing['data'])\n \n # Test if this line is multi-line (positive test)\n is_multi_line = False\n for rule in rules:\n if rule.get('multi line regex test', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n # Negative regex test\n for rule in rules:\n if rule.get('multi line regex not', False):\n if re.match(rule['regex'], line):\n is_multi_line = True\n break\n \n # If this is multi_line and we have a real previous line to embed this data in\n if is_multi_line and previous_line_data != None:\n #print 'Multiline: %s' % line\n if 'multiline' not in previous_line_data:\n previous_line_data['multiline'] = []\n \n previous_line_data['multiline'].append(line)\n\n\n # Only process rules on first lines (not multi lines), and return the line_data to be the next line's previous_line_data\n if not is_multi_line:\n #print line\n \n # Start with: We havent found a match yet\n match_found = False\n \n for item in rules:\n # Skip the multi-line regext test/not rules\n if item.get('multi line regex test', False) or item.get('multi line regex not', False):\n continue\n \n # Break out our terms for this rule item\n terms = re.findall('%\\((.*?)\\)s', item['regex'])\n #print item['regex']\n #print terms\n \n regex = item['regex']\n \n # Pre-processing step, to remove any conflicting characters with the rest of the regex which need to be escaped/sanitized\n for term in terms:\n regex = regex.replace('%%(%s)s' % term, 'MATCHMATCHMATCH')\n \n regex = SanitizeRegex(regex)\n regex = regex.replace('MATCHMATCHMATCH', '(.*?)')\n \n #print '--- %s' % item['id']\n #print regex\n #print line\n \n regex_result = re.findall(regex, line)\n #print regex_result\n if regex_result:\n \n # Python does something stupid with multiple variables, so pull them out of the embedded tuple it adds to the list\n if type(regex_result[0]) == tuple:\n regex_result = regex_result[0]\n \n for count in range(0, len(terms)):\n #print '%s: %s: %s' % (count, terms[count], regex_result[count])\n line_data[terms[count]] = regex_result[count]\n \n #print regex\n #print 'MATCHED! %s' % regex\n #print regex_result\n \n match_found = True\n \n # Save the line match ID, so we can reference it for markup/state information\n line_data['__rule_id__'] = item['id']\n \n break\n \n return line_data\n \n # Else, this is multi-line, so return it to continue to be the next line's previous_line_data\n else:\n #TODO(g): Save this multi-line data every time? Otherwise when does it get saved out?\n pass\n \n return previous_line_data", "def line(points):\n return LineString(points)", "def detect_properties(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.image_properties(image=image)\n props = response.image_properties_annotation\n\n return props", "def typed_line(line, parser):\n user, item, rating = parser(line)\n return int(user), int(item), float(rating)", "def parse_isoclass_line(L):\n data = L.split()\n if len(data) != len(column_names['isoclass']):\n print(\"isoclass line {} does not have 6 fields, skipping\".format(L))\n return\n label, record = parse_line_label_cols(L)\n\n record['isogeny_matrix'] = mat = [[int(a) for a in r.split(\",\")]\n for r in data[4][2:-2].split(\"],[\")]\n record['class_size'] = len(mat)\n record['class_deg'] = max(max(r) for r in mat)\n record['all_iso_degs'] = dict([[n+1, sorted(list(set(row)))] for n, row in enumerate(mat)]) \n record['trace_hash'] = ZZ(data[5])\n\n # NB Every curve in the class has the same 'isogeny_matrix',\n # 'class_size', 'class_deg', and the for the i'th curve in the\n # class (for i=1,2,3,...) its 'isogeny_degrees' column is\n # all_iso_degs[i].\n\n return label, record", "def glcmProps(P, prop='contrast'):\n\n (num_level, num_level2, num_dist, num_angle) = P.shape\n assert num_level == num_level2\n assert num_dist > 0\n assert num_angle > 0\n\n # create weights for specified property\n I, J = np.ogrid[0:num_level, 0:num_level]\n if prop == 'contrast':\n weights = (I - J) ** 2\n elif prop in ['ASM', 'energy', 'correlation']:\n pass\n elif prop == 'mean':\n weights, _ = np.mgrid[0:num_level, 0:num_level]\n elif prop == 'dissimilarity':\n weights = np.abs(I - J)\n elif prop == 'homogeneity':\n weights = 1. / (1. + (I - J) ** 2)\n else:\n raise ValueError('%s is an invalid property' % (prop))\n\n # compute property for each GLCM\n if prop == 'energy':\n asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n results = np.sqrt(asm)\n elif prop == 'ASM':\n results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]\n elif prop == 'correlation':\n results = np.zeros((num_dist, num_angle), dtype=np.float64)\n I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))\n J = np.array(range(num_level)).reshape((1, num_level, 1, 1))\n diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]\n diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]\n\n std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),\n axes=(0, 1))[0, 0])\n std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),\n axes=(0, 1))[0, 0])\n cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),\n axes=(0, 1))[0, 0]\n\n # handle the special case of standard deviations near zero\n mask_0 = std_i < 1e-15\n mask_0[std_j < 1e-15] = True\n results[mask_0] = 1\n\n # handle the standard case\n mask_1 = mask_0 == False\n results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])\n elif prop in ['contrast', 'dissimilarity', 'homogeneity', 'mean']:\n weights = weights.reshape((num_level, num_level, 1, 1))\n results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]\n\n return results" ]
[ "0.59829354", "0.58867705", "0.56100065", "0.56093836", "0.53970766", "0.5322736", "0.5254309", "0.5200317", "0.5133646", "0.51039666", "0.5046336", "0.50146914", "0.5009376", "0.4989455", "0.4974461", "0.49629325", "0.4949585", "0.49364555", "0.49333733", "0.49223906", "0.49219412", "0.49080393", "0.4897002", "0.4884336", "0.4859388", "0.48399433", "0.4830907", "0.4824195", "0.48237228", "0.48209774", "0.47898135", "0.4789604", "0.47858316", "0.47812378", "0.47713068", "0.4767731", "0.47634745", "0.47462904", "0.47454092", "0.47434512", "0.4741303", "0.47328874", "0.47264782", "0.47160175", "0.47049454", "0.46991163", "0.4694768", "0.46884635", "0.46870843", "0.46784532", "0.46761325", "0.46752617", "0.466973", "0.4646377", "0.464454", "0.46412587", "0.46394908", "0.46230152", "0.46162954", "0.46160215", "0.46104434", "0.4610132", "0.46095657", "0.4608153", "0.45998812", "0.459778", "0.45967555", "0.45941508", "0.45922017", "0.459205", "0.45897081", "0.45862672", "0.45859188", "0.4573148", "0.4564447", "0.45624295", "0.45531508", "0.45396507", "0.45380428", "0.45365041", "0.45305097", "0.45299253", "0.4529474", "0.45281982", "0.45225486", "0.45223027", "0.45212227", "0.4515362", "0.45145437", "0.45142996", "0.45091006", "0.45078626", "0.4506469", "0.44979078", "0.4490645", "0.44830605", "0.4482358", "0.44802657", "0.4478906", "0.44722858", "0.44706744" ]
0.0
-1
Show dialog of RETRY,SKIP,ABORT
def retryskipabort(message, timeout=20): root = tk.Tk() root.geometry("400x200") root.title("Exception handle") root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id())) root.attributes("-topmost", True) _kvs = {"result": "abort"} def cancel_timer(*args): root.after_cancel(_kvs['root']) root.title("Manual") def update_prompt(): cancel_timer() def f(result): def _inner(): _kvs['result'] = result root.destroy() return _inner tk.Label(root, text=message).pack(side=tk.TOP, fill=tk.X, pady=10) frmbtns = tk.Frame(root) tk.Button(frmbtns, text="Skip", command=f('skip')).pack(side=tk.LEFT) tk.Button(frmbtns, text="Retry", command=f('retry')).pack(side=tk.LEFT) tk.Button(frmbtns, text="ABORT", command=f('abort')).pack(side=tk.LEFT) frmbtns.pack(side=tk.BOTTOM) prompt = tk.StringVar() label1 = tk.Label(root, textvariable=prompt) #, width=len(prompt)) label1.pack() deadline = time.time() + timeout def _refresh_timer(): leftseconds = deadline - time.time() if leftseconds <= 0: root.destroy() return root.title("Test will stop after " + str(int(leftseconds)) + " s") _kvs['root'] = root.after(500, _refresh_timer) _kvs['root'] = root.after(0, _refresh_timer) root.bind('<Button-1>', cancel_timer) root.mainloop() return _kvs['result']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def askContinue(parent,message,continueKey,title=_('Warning')):\r\n #--ContinueKey set?\r\n if _settings.get(continueKey): return wx.ID_OK\r\n #--Generate/show dialog\r\n dialog = wx.Dialog(parent,defId,title,size=(350,200),style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\r\n icon = wx.StaticBitmap(dialog,defId,\r\n wx.ArtProvider_GetBitmap(wx.ART_WARNING,wx.ART_MESSAGE_BOX, (32,32)))\r\n gCheckBox = checkBox(dialog,_(\"Don't show this in the future.\"))\r\n #--Layout\r\n sizer = vSizer(\r\n (hSizer(\r\n (icon,0,wx.ALL,6),\r\n (staticText(dialog,message,style=wx.ST_NO_AUTORESIZE),1,wx.EXPAND|wx.LEFT,6),\r\n ),1,wx.EXPAND|wx.ALL,6),\r\n (gCheckBox,0,wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM,6),\r\n (hSizer( #--Save/Cancel\r\n spacer,\r\n button(dialog,id=wx.ID_OK),\r\n (button(dialog,id=wx.ID_CANCEL),0,wx.LEFT,4),\r\n ),0,wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM,6),\r\n )\r\n dialog.SetSizer(sizer)\r\n #--Get continue key setting and return\r\n result = dialog.ShowModal()\r\n if gCheckBox.GetValue():\r\n _settings[continueKey] = 1\r\n return result in (wx.ID_OK,wx.ID_YES)", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def __continue(self, *args):\n return Menu.CONTINUE", "def askOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK|wx.CANCEL)", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass", "def CONTINUE(self):\n self.update_status(self.continue_state)\n self.pause_state = None", "def aborting(self):\n \n pass", "def onAccepted():\n dialog.done(1)", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def continue_button(self):\r\n self.update_settings()\r\n self.is_pause = False\r\n self.is_step = False\r\n if self.continue_call is not None:\r\n self.wm.after(1, self.continue_call)", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def askRestart():\n restart = None\n def Onclose(button):\n nonlocal restart\n if button == \"Redémarrer\":\n restart = True\n elif button == \"Redémarrer plus tard\" or button == 'WM_DELETE_WINDOW':\n restart = False\n else :\n restart = False\n\n f = Dialog(master = None, title = \"Redémarrage requis\", buttons = (\"Redémarrer\", \"Redémarrer plus tard\"), defaultbutton = \"Redémarrer\", exitButton = (\"Redémarrer\", \"Redémarrer plus tard\", 'WM_DELETE_WINDOW'), command = Onclose)\n\n l = Label(f, text = \"Une ou plusieurs de vos modifications nécéssitent un redémarrage\\nde l'applicationpour être correctement appliquées.\\n\\nVoulez-vous redémarrer l'application maintenant ?\")\n l.pack()\n\n f.activateandwait()\n return restart", "def __continue(self, *args, **kwargs):\n self.__should_load = False\n return Menu.CONTINUE", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def command_continue(self):\n self.step_continue = True", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def showOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK)", "def continue_command(self):\n cycles_cont = self.on_spin_cont(wx.SpinCtrl)\n global global_cycles_completed\n # check that this function has been called on pressing continue button\n text = \"\".join(\n [_(u\"continue_command function has been called, number of cycles is: \"), str(cycles_cont)])\n if self.state == 0:\n self.canvas_2d.render(text, True)\n else:\n self.canvas_3d.render()\n if cycles_cont is not None: # if the number of cycles provided is valid\n if global_cycles_completed == 0:\n print(_(u\"Error! Nothing to continue. Run first.\"))\n elif self.run_network(cycles_cont):\n global_cycles_completed += cycles_cont\n print(\" \".join([_(u\"Continuing for\"), str(cycles_cont), _(u\"cycles.\"), _(u\"Total:\"), str(\n global_cycles_completed)]))", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):\n while True:\n ok = input(prompt)\n if ok in ('y', 'ye', 'yes', 'Y', 'Ye', 'Yes'):\n return True\n if ok in ('n', 'no', 'nop', 'nope', 'N', 'No'):\n return False\n retries = retries - 1\n if retries < 0:\n raise IOError('refusenik user')\n print(complaint)", "def ask_ok(prompt, retries=4, complaint=\"Yes or No please!\"):\n while True:\n ok_string = raw_input(prompt + \" \")\n if ok_string in ('y', 'ye', 'yes'):\n print(\"You have chosen yes.\")\n return True\n elif ok_string in ('n', 'no', 'nop', 'nope'):\n print(\"You have chosen no.\")\n return False\n retries -= 1\n print(complaint)\n if retries <= 0:\n raise IOError('refusenik user')", "def message_restart():\n if dialog_yes_no(32014):\n xbmc.executebuiltin('RestartApp')", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def resetConfirm(self):\n\n ## Check if exposure is in progress\n if self.thread.isRunning():\n QtGui.QMessageBox.warning(self, \"Exposure warning.\", \"Exposure in progress, unable to close program.\", QtGui.QMessageBox.Ok)\n return\n\n else:\n reply = QtGui.QMessageBox.question(self, 'Confirmation','Are you sure you want to reset the STA3800 controller?',\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,\n QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n self.reset()", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def confirmation_failed(self):", "def oops(self):\n QMessageBox.information(self, 'Error', \"Ada yang salah...\", QMessageBox.Ok, QMessageBox.Ok)", "def on_close(self, evt):\n wx.Dialog.Show(self, False)\n evt.Skip()", "def confirm():\n\t\traise NotImplementedError", "def proceed(self):\n pass", "def _create_continue_and_abort_button():\r\n continue_button = Ctk.CButton(password_window, bg='gray15', highlight_color='gray50',\r\n pressing_color='gray12', width=250, height=30,\r\n text=self.language.refactor('Log In'),\r\n font=('Helvetica', 14), fg='white', command=_check_password)\r\n continue_button.place(x=250, y=149)\r\n\r\n continue_button = Ctk.CButton(password_window, bg='gray15', highlight_color='gray50',\r\n pressing_color='gray12', width=249.5, height=30,\r\n text=self.language.refactor('Cancel'),\r\n font=('Helvetica', 14), fg='white', command=_set_is_open_to_false)\r\n continue_button.place(x=0, y=149)", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def reject(self):\r\n QtGui.QDialog.reject(self)", "def card_failure(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2]) \n WebDriverWait(self.driver, 30).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success')))\n self.driver.find_element_by_class_name(\"danger\").click()\n self.driver.switch_to_window(handles[0])", "def on_fail(utterance):\n\n self.log.info(\"Utterance: {}\".format(utterance))\n\n user_response = self.ask_yesno('try.again')\n\n return 'try.again' if user_response is None else 'okay'", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def waiting_confirmation(self):", "def __window_confirm(self, text):\n return True", "def cancel(self): #$NON-NLS-1$\r", "def continue_shopping(self):\n self._back_btn.click()", "def work_finished():\r\n tkMessageBox.showinfo(\"Yes...\", \"Dokonano konwersji danych. \\n Życzę miłego dnia.\")", "def doContinue(self, message, razzy):\n return", "def do_continue(self, arg):\n if not self.nosigint:\n print('Resuming program, press Ctrl-C to relaunch debugger.', file=self.stdout)\n return super().do_continue(arg)", "def check_continue(config: SimpleNamespace, prev: str=None, next: str=None) -> None:\n if config.general.debug_mode:\n if prev and next:\n print(f'\\n{prev.upper()} phase completed. Next up: {next.upper()} phase.')\n x = input('\\nDo you want to continue y/n? ')\n if x not in ['yes', 'y', '']:\n print()\n sys.exit(0)", "def confirm_start(self, player=str):\n self.clear_screen()\n print(\"\\n\" * 11)\n pass_text = \"Pass the device to \" + player\n print(f\"{pass_text : ^100}\")\n input(f\"{'Press ENTER when ready.' : ^100}\")\n return self.stop_game", "def forward( self ):\n super( ConfirmationScreen, self ).forward()\n \n self._current_option = self._current_option + 1\n if self._current_option >= len( self._options ):\n self._current_option = 0", "def runAskOkDialog(self, c: Cmdr, title: str, message: str=None, text: str=\"Ok\") -> None:\n if g.unitTesting:\n return\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Information)\n dialog.addButton(text, ButtonRole.YesRole)\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n dialog.exec_()\n finally:\n c.in_qt_dialog = False", "def cancel(self):\n return self.RES_OK", "def popup(self):\r\n return self.exec_() == QDialog.Accepted", "def decision(question):\n return click.confirm(question, show_default=True)", "def abort(self):\n self.write(\":ABORT\")", "def retry(self):\n # XXX: check whether it is possible to distingish \n # between the error conditions and set meaningfull exitcode\n return False", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def accept_review_step_skip(driver):\n labels = driver.find_elements_by_tag_name(\"label\")\n label = labels[7]\n label.click()\n button = driver.find_element_by_class_name(ALERT_CLASS_NAME)\n button.send_keys(\"\\n\")\n time.sleep(1.5)", "def click_win_dispute_cancel_button(self):\n self.click_element(self.win_dispute_cancel_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass\n self.wait_for_ajax_spinner_load()", "def successful_unlock_eng(self):\n choice = input(\"Do you want to start the repair now? [Y/N]: \")\n if choice.lower() == 'y':\n print(\"Repair in process..\")\n else:\n self.successful_unlock_eng()", "def _doAbort(self):\n self._cmdAbort()", "def prompt_restart_required(self):\n restart_opts = self.restart_options\n changed_opts = self.changed_options\n options = [restart_opts[o] for o in changed_opts if o in restart_opts]\n\n if len(options) == 1:\n msg_start = _(\"EZCAD needs to restart to change the following \"\n \"setting:\")\n else:\n msg_start = _(\"EZCAD needs to restart to change the following \"\n \"settings:\")\n msg_end = _(\"Do you wish to restart now?\")\n\n msg_options = u\"\"\n for option in options:\n msg_options += u\"<li>{0}</li>\".format(option)\n\n msg_title = _(\"Information\")\n msg = u\"{0}<ul>{1}</ul><br>{2}\".format(msg_start, msg_options, msg_end)\n answer = QMessageBox.information(self, msg_title, msg,\n QMessageBox.Yes | QMessageBox.No)\n if answer == QMessageBox.Yes:\n self.restart()", "def buttonOK_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_OK)", "def sgnCancel(self):\n\n self.uiCloseWindow()", "def ask_continue():\n i = input(\"Please ensure your System Dependencies are met. Continue? [y/N] \")\n if i in (\"\", \"N\", \"n\"):\n out_error(\"Please install system dependencies to continue\")\n exit(1)", "def cancelButton(self):\n \n self.answer=-1\n self.top.destroy()", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def resume():\n if checkIfAllFixed():\n disableTaskControls()\n globs.ERREVENT.set()\n globs.DISP_SCREEN.updateScreen(globs.DISP_SCREEN.progress)\n else:\n globs.CONTROLLER.taskMonitor.errorBox.append(\"WARNING! Fix All Errors Before Resuming\")\n globs.CONTROLLER.taskMonitor.errorBox.append(\"\")", "def confirm(self):\n\t\t# TODO: write the current control scheme to config.ini\n\t\tdefault_controls = self.default_controls()\n\t\tconfig = ConfigParser.RawConfigParser()\n\t\tconfig.add_section('controls')\n\t\tconfig.add_section('default_controls')\n\t\tfor i in xrange(len(CONTROLS_OPTIONS) - 2): \n\t\t\tconfig.set('controls', CONTROLS_OPTIONS[i], self.control_map[i])\n\t\t\tconfig.set('default_controls', CONTROLS_OPTIONS[i], default_controls[i] )\n\t\twith open('config.ini', 'wb') as configfile: config.write(configfile)\n\t\tself.player.current_level.screen_manager.switch_to_options_screen(self.player)", "def is_skip(self):\n\n return self.severity == AlertSeverity.TOLERABLE and self.kind == AlertKind.ABORTION", "def Return():\n confirm_frame.forget()\n self.LoadLogInWindow()", "def onCheckConnectionError(self):\r\n\r\n # show the error message\r\n msgBox = QMessageBox(self)\r\n msgBox.setWindowTitle(conf_parser.get(\"APP\", \"name\"))\r\n msgBox.setText(\"Internet connection not detected.\")\r\n msgBox.setStandardButtons(QMessageBox.Retry | QMessageBox.Close)\r\n msgBox.setDefaultButton(QMessageBox.Close)\r\n ret = msgBox.exec()\r\n\r\n # interact user\r\n if(ret == QMessageBox.Close):\r\n # exit program\r\n sys.exit()\r\n if(ret == QMessageBox.Retry):\r\n # retry connection\r\n self.thread = threading.Thread(target=self.checkServerThread)\r\n self.thread.setDaemon(True)\r\n self.thread.start()\r\n self.pros = 0\r\n self.check_timer.start(100)", "def iscanceled(*args):", "def abort(self):\n print(\"abort\")", "def FailureMessage(self, message, caption):\n wx.MessageBox(message, caption, style=wx.OK|wx.ICON_ERROR)", "def handleOkTeaser(self):\n self.dialog.destroy()\n del self.dialog\n place = base.cr.playGame.getPlace()\n if place:\n place.fsm.request('walk')", "def Prompt(self,message):\n\t\tself.acad.ActiveDocument.Utility.Prompt(message)", "def again():\r\n\tif_again = ask_yes_no(\"\\nDo you want play again (press y or n) \")\r\n\tif if_again == \"y\":\r\n\t\tprint(\"\\n\\n\")\r\n\t\tmain()\r\n\telif if_again == \"n\":\r\n\t\tprint(\"\\nThank you for your time spared for this game. Good bye!\")", "def tryAgain(request, msg=None, url=None, buttonText=None,\n title=None):\n if msg is None:\n msg = \"Please try again\"\n if url is None:\n url = \"javascript:history.back()\"\n if buttonText is None:\n buttonText = \"Try Again\"\n if title is None:\n title = \"Try Again\"\n context = {\"msg\": msg,\n \"url\": url,\n \"button_text\": buttonText,\n \"title\": title}\n return render(request, \"dbkeeper/try_again.html\", context)", "def report_unable_to_resume(self):\n self.to_screen('[download] Unable to resume')", "def restart():\n msg = messagebox.showinfo('YES!', \"You're Right\")\n window.destroy()\n game()", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def cancel():", "def _do_fail_retry(self, event):\n if self._retries > 0:\n self._retries -= 1\n self._state_machine.retry()\n else:\n self._state_machine.abort(result=event.result)", "def _do_fail_retry(self, event):\n if self._retries > 0:\n self._retries -= 1\n self._state_machine.retry()\n else:\n self._state_machine.abort(result=event.result)", "def button_ack(self):\n self.connect_button = 0", "def on_cancel(self, *args):\n self.response(Gtk.ResponseType.CANCEL)", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n \n self.integrado = True\n QSettings().setValue('integrando', 'True')\n \n # See if OK was pressed\n result = self.dlg.exec_()\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def user_warning(self, message, caption='Warning!'):\n dlg = wx.MessageDialog(self, message, caption,\n wx.OK | wx.CANCEL | wx.ICON_WARNING)\n if self.show_dlg(dlg) == wx.ID_OK:\n continue_bool = True\n else:\n continue_bool = False\n dlg.Destroy()\n return continue_bool", "def user_warning(self, message, caption='Warning!'):\n dlg = wx.MessageDialog(self, message, caption,\n wx.OK | wx.CANCEL | wx.ICON_WARNING)\n if self.show_dlg(dlg) == wx.ID_OK:\n continue_bool = True\n else:\n continue_bool = False\n dlg.Destroy()\n return continue_bool", "def paymentfailed_cod(self):\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, self.CSS_RETRY_PAYMENT_COD)))\n self.driver.find_element_by_css_selector(self.CSS_RETRY_PAYMENT_COD).click()\n time.sleep(4)" ]
[ "0.64992744", "0.63597643", "0.6132208", "0.5936326", "0.5899217", "0.5896812", "0.5886796", "0.5824935", "0.5818185", "0.57966095", "0.5755113", "0.5753816", "0.5747904", "0.5747904", "0.5747904", "0.5747384", "0.574394", "0.57047844", "0.5692576", "0.56602055", "0.5655979", "0.56510407", "0.56236774", "0.5618869", "0.55880123", "0.5577658", "0.55672836", "0.556163", "0.55531967", "0.5525044", "0.5525044", "0.5525044", "0.5525044", "0.55101794", "0.5506297", "0.5491945", "0.5488197", "0.5485925", "0.54842997", "0.5472232", "0.54561406", "0.5454604", "0.5446218", "0.5435028", "0.5434487", "0.5422849", "0.5415241", "0.54133236", "0.5406169", "0.5401887", "0.53963184", "0.53958255", "0.5389289", "0.5378562", "0.5376013", "0.5373427", "0.536846", "0.5358849", "0.5338935", "0.5335633", "0.5334111", "0.5330931", "0.5328567", "0.5327754", "0.5319954", "0.5314947", "0.531473", "0.53091335", "0.5307228", "0.530659", "0.53029686", "0.5302177", "0.53007233", "0.52929384", "0.5286434", "0.5284132", "0.5278262", "0.5273506", "0.52629983", "0.5262665", "0.5262064", "0.5249965", "0.52425206", "0.5234708", "0.5229845", "0.5224563", "0.52190334", "0.52166456", "0.5197348", "0.5190228", "0.5190228", "0.518256", "0.5177841", "0.51769024", "0.51769024", "0.51769024", "0.51761305", "0.517383", "0.517383", "0.51694566" ]
0.57067364
17
Retrieves users' notifications based on current `auth_token`
def get_notifications( self, all: bool = False, participating: bool = False, since: Optional[datetime] = None, before: Optional[datetime] = None, per_page: int = 10, page: int = 1, ) -> List[Notification]: raw_res = self._notifications( all=all, participating=participating, since=since, before=before, per_page=per_page, page=page, ) return Notification.load_from_json_str(raw_res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n\n args = pagination_parser.parse_args()\n return get_notifications(user, args['from_id'])", "def get_user_notifications(self, login):", "async def get_user_notifications(self):\n self._old_notifications = self.user_notifications # important for keeping track of what is new.\n\n async with self.web_session.get(self._api_notifications_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_notifications_url):\n data = await resp.json()\n self.user_notifications = create_notification_objects(data.get('notifications'))\n for user_notification in self.user_notifications:\n self.all_notifications[user_notification.id] = user_notification\n return self.user_notifications", "def list_notifications():\n token = request.args.get('token')\n user = User.query.filter_by(token=token).first()\n\n if user is None:\n return jsonify({\"error\": \"Access Denied!\"})\n\n # Filter Posts so the user doesn't have to filter it\n notifications = Notifications.query.filter_by(user_id=user.id).order_by(desc('created'))\n result = notification_schema.dump(notifications)\n\n # Notifications have been read delete them\n toDelete = Notifications.query.filter_by(user_id=user.id)\n toDelete.delete()\n\n return jsonify({\n \"notifications\": result\n })", "def notifications(request):\r\n user = request.user\r\n if user.is_authenticated():\r\n notifications = models.Notification.objects.by_user(user).unseen()\r\n return {\r\n 'unseen_notifications': notifications\r\n }\r\n else:\r\n return {}", "def get(self):\n FetchNotifications.__init__(self)\n kwargs = self.parser.parse_args()\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('group', {'value': kwargs['group'], 'join_operator': 'AND'})])\n notifications = self.notifications_db.full_text_search('search', query, page=kwargs['page'], limit=10, sort=\"\\_id\")\n self.set_seen(notifications)\n self.logger.info(\"Fetched notifications\")\n return {'notifications': notifications}", "def get_user_messages_by_token(token):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_messages_by_email(token, session['data']['user'])", "def get_notifications(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"notifications\".format(self.name))\n resp = self._imgur._send_request(url, params=locals(), needs_auth=True)\n msgs = [Message(msg_dict, self._imgur, has_fetched=True) for msg_dict\n in resp['messages']]\n replies = [Comment(msg_dict, self._imgur, has_fetched=True) for\n com_dict in resp['replies']]\n return {'messages': msgs, 'replies': replies}", "def get(self):\n # TODO this endpoint returns null is instead of respoinse message when token is not in the header, read about error handling to solve this issue\n return get_users()", "def get(self):\n CheckNotifications.__init__(self)\n stats = self.notifications_db.get('notification_stats')['stats']\n if self.username in stats:\n return stats[self.username]\n return {'All': 0, 'unseen': 0}", "def get_all_users():\n token = request.headers.get('token')\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin':\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n return jsonify(list(Users.values())), 200", "async def fetch_notifications(self, type=\"all\", sent=False, after=None):\n\n logging.debug(\"Fetching user notifications\")\n\n params = {\n \"type\": type,\n \"sent\": sent\n }\n\n if after is not None:\n params[\"after\"] = after\n\n notifs = await self.client.request.get(\"/auth/user/notifications\")\n return [self.client.BaseNotification.build_notification(\n self.client, notif, self.loop) for notif in notifs[\"data\"]]", "def get_notification(limit=10, username=None):\n if username is not None:\n user_code = get_user_code(username)\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n u = models.Log.query.filter_by(user_code=user_code).order_by(models.Log.timestamp.desc()).limit(limit).all()\n else:\n u = models.Log.query.order_by(models.Log.timestamp.desc()).limit(limit).all()\n notifications = []\n for i in u:\n parameters = extract_parameters(i.parameters)\n item_name = get_item_name(i.item_code)\n username = get_username(i.user_code)\n if i.action == 'U':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Updated\", parameters, item_name, i.item_code))\n elif i.action == 'D':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Deleted\", \"entry\", item_name, i.item_code))\n elif i.action == 'IV':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new variants\", item_name, i.item_code))\n else:\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new entry\", item_name, i.item_code))\n return make_response(jsonify(notifications))", "def get_notifications(self):\n res = self.get_object(\"/integrationServices/v3/notification\")\n return res.get(\"notifications\", [])", "async def get_notifications(\n self,\n profile_id: UUID,\n older_than: Optional[dt.datetime] = Query(None),\n limit: Optional[int] = Query(10, ge=1, le=20)):\n return await self._service.find_notifications_by_profile_id(\n profile_id, older_than=older_than, limit=limit)", "def get_notifications(self, from_id):\n def _filter_noop(_):\n return True\n\n return get_page(\n mongoengine_model=Notification,\n extra_query_args={\n 'owner': self\n },\n extra_filter_func=_filter_noop,\n from_id=from_id,\n page_count=NotificationPageSize\n )", "def get_notifications(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def _notifications(\n self,\n all: bool = False,\n participating: bool = False,\n since: Optional[datetime] = None,\n before: Optional[datetime] = None,\n page: int = 1,\n per_page: int = 10,\n ) -> str:\n headers = {\n \"Authorization\": \"token {}\".format(self.auth_token),\n \"accept\": \"application/vnd.github.v3+json\",\n }\n params = {\n \"all\": \"true\" if all else \"false\",\n \"participating\": \"true\" if participating else \"false\",\n \"page\": page,\n \"per_page\": per_page,\n }\n if since is not None:\n params[\"since\"] = since.isoformat()\n if before is not None:\n params[\"before\"] = before.isoformat()\n if per_page > 100:\n raise Exception(\n \"Github API support maximum 100 notifications per page for api calls\"\n )\n res = request(\"GET\", self.NOTIFICATIONS_URL, headers=headers, params=params)\n return res.text", "async def update_cache_from_notification(self) -> List[Notification]:\n new_notifications = []\n try:\n notifications = await self.get_user_notifications()\n\n if not notifications:\n return new_notifications\n\n new_notifications = self.get_new_notifications()\n for notification in new_notifications:\n await self.__manage_notification_posts(notification)\n except Exception as e:\n if self.verbose:\n print(f\"Failed to update Weverse Cache - {e}\")\n return new_notifications", "def get_notifications(self):\n return self.ws.events['notifications']", "def list_webhooks(self):\n response = requests.get(\n '%spreferences/notifications' % self._url,\n **self._auth\n )\n\n if response.status_code == 401:\n raise MoipAuthorizationException(response.json())\n else:\n pretty_print(response.json())\n return response.json()", "def get(self):\n SearchNotifications.__init__(self)\n kwargs = self.parser.parse_args()\n search = '*' + kwargs['search'] + '*'\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('action_objects', {'value': search, 'join_operator': 'AND', 'open_parenthesis': True}),\n ('title', {'value': search, 'join_operator': 'OR', 'close_parenthesis': True})])\n notifications = self.notifications_db.full_text_search('search', query, page=kwargs['page'], limit=10, sort=\"\\_id\")\n self.set_seen(notifications)\n self.logger.info(\"Searched text %s in notifications\" % search)\n return notifications", "def get_emails(self, token):\n user_email_url = get_config('login.github.emails_info_url')\n headers = {\n \"Authorization\": \"token %s\" % token\n }\n email_info_resp = get_remote(user_email_url, headers)\n email_list = json.loads(email_info_resp)\n\n return email_list", "def notifications(apiKey):\r\n if apiKey is None and os.path.exists(KEY_FILE):\r\n apiKey = _get_saved_key(apiKey)\r\n url = '{}/notifications'.format(USGS_API_ENDPOINT)\r\n payload = {\r\n \"jsonRequest\": payloads.notifications(apiKey)\r\n }\r\n logger.debug(\"API call URL: {}\".format(url))\r\n logger.debug(\"API call payload: {}\".format(payload))\r\n response = requests.post(url, payload).json()\r\n logger.debug(\"Received response:\\n{}\".format(json.dumps(response, indent=4)))\r\n _catch_usgs_error(response)\r\n\r\n return response", "def get_messages(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/notifications/\"\n \"messages\".format(self.name))\n result = self._imgur._send_request(url, params=locals(),\n needs_auth=True)\n return [Notification(msg_dict, self._imgur, has_fetched=True) for\n msg_dict in result]", "def get_recent_notifications(user_id):\n raw_notifications = cacheAPI._get_notifications(user_id)\n response = [{\n 'user_name': notification['sender_name'],\n 'user_url': reverse('user-profile',\n args=[notification['sender_id']]),\n 'user_avatar': get_thumbnail(\n 'img/users/thumbnails/' + \\\n str(notification['sender_id']) + 't.jpg',\n '25x25',\n crop='center'\n ).url,\n 'message': MESSAGES[notification['notification_type']],\n 'item_name': notification.get('item_name', ''),\n 'item_url': (reverse(notification['item_type']+'-profile',\n args=[slugify(notification.get('item_name', 'none'))])\n if notification.get('item_type') else ''),\n 'time_since': timesince(notification['timestamp'])\n } for notification in raw_notifications]\n\n return simplejson.dumps(response)", "def get_notifications(user_id, page):\n notifications = \\\n Notification.objects.select_related('sender',\n 'sender__userprofile',\n 'item',\n 'item__movie') \\\n .filter(recipient=user_id) \\\n .order_by('-created_at')\n paginator = Paginator(notifications, 20)\n\n try:\n next_page = paginator.page(page).next_page_number()\n paginator.page(next_page)\n except (EmptyPage, InvalidPage):\n next_page = ''\n\n response = [_generate_notification_response(notification, next_page) \n for notification in paginator.page(page)]\n\n return simplejson.dumps(response)", "def show_notifications(request):\n user = request.user\n notifications = Notification.objects.filter(user=user).order_by('-date')\n\n # change loaded notifications to 'seen'\n Notification.objects.filter(user=user, is_seen=False).update(is_seen=True)\n\n template = loader.get_template('notifications.html')\n\n context = {\n 'notifications': notifications,\n }\n\n return HttpResponse(template.render(context, request))", "def hbtn_api_user(hbtn_auth_token):\n url = 'https://intranet.hbtn.io/users/me.json'\n resp = requests.get(url, params={'auth_token': hbtn_auth_token})\n return resp.json()", "def get_all_user_notifications(self, user_id):\n dao = NotificationsDAO()\n notifications = dao.get_all_user_notifications(user_id)\n result_list = []\n for row in notifications:\n result = self.build_notifications_dict(row)\n result_list.append(result)\n return jsonify(Notifications=result_list)", "def notifications(id):\n return core.query(schema.notify, id)", "def notifications(self):\r\n return notifications.Notifications(self)", "def notifications(self):\r\n return notifications.Notifications(self)", "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n for user in users:\n games = Game.query(Game.user == user.key, Game.game_over == False)\n if games:\n subject = \"This is a reminder!\"\n body = \"Hello {}, you have some unfinished games.\".format(\n user.name)\n\n mail.send_mail('noreply@{}.appspot.com'.format(app_id),\n user.email, subject, body)", "def notify_users_of_reminders():\n\n #Get current date into dd/mm/YYYY format.\n now = datetime.datetime.now()\n todays_date = now.strftime(\"%d/%m/%Y\")\n\n #Get current time and convert it to hh:mm.\n todays_time = now.strftime(\"%H:%M\")\n print(todays_time)\n\n #Select all notifications from the database based on that date and time.\n notifications_query = \"\"\"SELECT user, reminder_msg FROM reminders WHERE (date=%s AND time=%s);\"\"\"\n\n #Setup our parameters\n notifications_params = (todays_date, todays_time)\n\n #TODO: Add in cursor.\n #TODO: Run query and get reminder data.\n #TODO: Loop over returned rows, and notify users with send_message_to_irc()", "def send_notifications_to_all_users(sender,\n instance,\n created, *args, **kwargs):\n\n if instance and created:\n users_followers = instance.author.followers.all()\n\n link = f\"\"\"{os.getenv(\"HEROKU_BACKEND_URL\")}/articles/\\n\"\"\"\n f\"\"\"{instance.slug}\"\"\"\n for user in users_followers:\n if user.get_notifications:\n uuid = urlsafe_base64_encode(force_bytes(user)\n ).decode(\"utf-8\")\n subscription = f'{os.getenv(\"HEROKU_BACKEND_URL\")}/api/' +\\\n 'v1/users/' +\\\n f'unsubscribe/{uuid}/'\n sender = os.getenv('EMAIL_HOST_USER')\n email = user.email\n email_subject = \"Author's Haven Email Notification\"\n message = render_to_string('create_article.html', {\n 'title': email_subject,\n 'username': user.username,\n 'link': link,\n 'subscription': subscription\n })\n\n send_mail(email_subject, '', sender, [\n email, ], html_message=message)\n notify.send(instance.author, recipient=user,\n verb='A user you follow has a new post',\n action_object=instance)", "def _get_notifications(self):\r\n student = self._student('GET')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n self._success_response({\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_required': student.num_required,\r\n 'count_graded': student.num_graded,\r\n 'count_available': student.num_pending\r\n })", "def notification():\n # pop-up notification\n notifies = NotifyModel.get_notify(current_user.get_id())\n return jsonify(notifications=notifies)", "def get(self, request, format=None):\n user_social_auth = UserSocialAuth.objects.get(user=self.request.user)\n credentials = AccessTokenCredentials(user_social_auth.extra_data['access_token'],\n 'my-user-agent/1.0')\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = discovery.build('gmail', 'v1', credentials=credentials)\n results = service.users().messages().list(userId='me').execute()\n messages = []\n for result in results['messages'][:100]:\n \n msg = service.users().messages().get(userId='me', id=result['id']).execute()\n subject = ''\n _from = ''\n for header in msg['payload']['headers']:\n if header['name'] == 'Subject':\n subject = header['value']\n elif header['name'] == 'From':\n _from = header['value']\n messages.append({'subject': subject, 'from': _from})\n \n return Response(messages)", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def get_user_messages_by_email(token, email):\n response = get_user_data_by_token(token)\n if not response['success']:\n return response\n\n response = get_user_data_by_email(token, email)\n if not response['success']:\n return response\n\n data = query_db('SELECT * FROM Messages WHERE recipient = ? ORDER BY timestamp DESC', [email])\n\n for row in data:\n del row['recipient']\n del row['timestamp']\n\n return {'success': True, 'message': 'User messages retrieved', 'data': data, 'code': 200}", "def get_conversation_list(request):\n collected_values = {}\n\n # Only accept GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract params\n uid = request.GET['uid']\n token = request.GET['token']\n limit = int(request.GET['limit']) # Force a limiter to see how many users to get\n\n # Check if the token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Maybe cache or find better way of getting most recent id's messaged\n # Do a walkthrough of all messages and count totals\n # Potential Improvement is to keep a mapping of all messages sent from users to users\n users = {}\n msg_sent = Messages.objects.filter(user_id=uid).order_by('-created_at')[:limit]\n msg_recieved = Messages.objects.filter(other_id=uid).order_by('-created_at')[:limit]\n for msg in msg_sent:\n if users.get(msg.other_id) is None:\n users[msg.other_id] = 1\n else:\n users[msg.other_id] += 1\n for msg in msg_recieved:\n if users.get(msg.user_id) is None:\n users[msg.user_id] = 1\n else:\n users[msg.user_id] += 1\n\n # Collect return values\n collected_values[\"users\"] = users\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation List Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "async def get(self):\n await self.handle_request(self.users_api, 1)", "def lookup_users(self):\n return self.slack_users", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def list_users(access_token):\n request_url = OKTA_URL + \"api/v1/users\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request", "def _get(self, *args, **kwargs):\n return self.deserialize_notifications(self.request.session.get(self.session_key)), True", "def get_notifications(ceilometer, base_id):\n\n _filter = [{\"field\": \"base_id\", \"op\": \"eq\", \"value\": base_id}]\n # limit is hardcoded in this code state. Later that will be changed via\n # connection string usage\n return [n.to_dict()\n for n in ceilometer.events.list(_filter, limit=100000)]", "def get_global_instant_notification_subscribers(self):\n subscriber_set = set()\n\n global_subscriptions = EmailFeedSetting.objects.filter(\n feed_type = 'q_all',\n frequency = 'i'\n )\n\n #segment of users who have tag filter turned off\n global_subscribers = User.objects.filter(\n email_tag_filter_strategy = const.INCLUDE_ALL\n )\n subscriber_set.update(global_subscribers)\n\n #segment of users who want emails on selected questions only\n subscriber_set.update(\n self.get_global_tag_based_subscribers(\n subscription_records = global_subscriptions,\n tag_mark_reason = 'good'\n )\n )\n\n #segment of users who want to exclude ignored tags\n subscriber_set.update(\n self.get_global_tag_based_subscribers(\n subscription_records = global_subscriptions,\n tag_mark_reason = 'bad'\n )\n )\n return subscriber_set", "def users_unauthed_endpoint():\n if not config.DEBUG:\n limit_to_localhost()\n return jsonify(user_ids=get_unauthed_users())", "def query_notifications(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/QueryNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"QueryNotificationsV1\",\n keywords=kwargs,\n params=parameters\n )", "def notification_listener(self, interval=60):\n while True:\n for notification in self.get_notifications():\n yield notification\n time.sleep(interval)", "def notification_list(request):\n try:\n validator = NotificationListValidator(request.GET)\n valid = validator.validate() # Validate the request\n if valid:\n current_user_id = request.user_id\n page_limit = int(request.GET['page_limit'])\n page_offset = int(request.GET['page_offset'])\n\n # notification listing\n notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]\n serializer = NoitifcationListSerializer(notification_list, many=True)\n\n # set is_read = 1\n Notification.objects.filter(user_id=current_user_id).update(\n is_read=1\n )\n \n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)\n except Exception as exception:\n logerror('notifications/views.py/notification_list', str(exception))\n return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def get_users_to_notify():\n db = sqlite3.connect(database)\n cursor = db.cursor()\n query = \"SELECT chat_id, notify_at, restaurant_id FROM notifications WHERE status=1\"\n users = cursor.execute(query)\n result = users.fetchall()\n selected_users = [\n {\"chat_id\": list(user)[0], \"time\": list(user)[1], \"restaurant\": list(user)[2]}\n for user in result\n ]\n db.close()\n\n return selected_users", "def get_subscribed_to_newsletter(self, queryset=None):\n if queryset is None:\n queryset = super(RevolvUserProfileManager, self).get_queryset()\n subscribed_users = queryset.filter(\n subscribed_to_newsletter=True\n ).order_by('user__date_joined')\n return subscribed_users", "def test_retrieve_notifications_list(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'The winners will be announced in 1 minute'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 15,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n new_notification_message_two = 'There is a problem with one score'\n new_notification_category_two = 'Error'\n post_response = create_notification(\n client,\n new_notification_message_two,\n 10,\n new_notification_category_two)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 2\n\n get_first_page_url = url_for('service.notificationlistresource', _external=True)\n get_first_page_response = client.get(\n get_first_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_first_page_response.status_code == HttpStatus.ok_200.value\n\n get_first_page_response_data = json.loads(\n get_first_page_response.get_data(as_text=True))\n assert get_first_page_response_data['count'] == 2\n assert get_first_page_response_data['previous'] is None\n assert get_first_page_response_data['next'] is None\n assert get_first_page_response_data['results'] is not None\n assert len(get_first_page_response_data['results']) == 2\n assert get_first_page_response_data['results'][0]['message'] == \\\n new_notification_message_one\n assert get_first_page_response_data['results'][1]['message'] == \\\n new_notification_message_two\n\n get_second_page_url = url_for('service.notificationlistresource', page=2)\n get_second_page_response = client.get(\n get_second_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_second_page_response.status_code == HttpStatus.ok_200.value\n\n get_second_page_response_data = json.loads(\n get_second_page_response.get_data(as_text=True))\n assert get_second_page_response_data['previous'] is not None\n assert get_second_page_response_data['previous'] == url_for(\n 'service.notificationlistresource', page=1)\n assert get_second_page_response_data['next'] is None\n assert get_second_page_response_data['results'] is not None\n assert len(get_second_page_response_data['results']) == 0", "def get_all_tasks(username):\n if not username:\n return []\n task_notif_list = []\n for obj in TaskNotification.objects.filter(username=username):\n task_notif_list.append({\n 'id': obj.task_id,\n 'name': obj.name,\n 'status': obj.status,\n 'payload': obj.payload\n })\n return task_notif_list", "def getUser(self, authenticationToken):\r\n pass", "def get_data_of_token_holder(token):\n response = requests.get(\n f'{GITHUB_API_URL}/user',\n headers={\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': f'token {token}',\n },\n )\n response.raise_for_status()\n return response.json()", "def notify_users(title, message, currentusers=[]):\n if args.test_mode:\n send_pushover_notification(config['pushoverusers'][args.test_mode], \"[TEST] \" + title, message)\n else:\n for u in config['pushoverusers'].keys(): # list of names for those with pushover\n if u not in currentusers:\n send_pushover_notification(config['pushoverusers'][u], title, message)\n time.sleep(0.5) # be nice to the api", "def command_list(self):\n # Get buckets\n project_bucket_mappings = {\n 'all-of-us-rdr-prod': PUBSUB_NOTIFICATION_BUCKETS_PROD,\n 'all-of-us-rdr-stable': PUBSUB_NOTIFICATION_BUCKETS_STABLE,\n 'all-of-us-rdr-sandbox': PUBSUB_NOTIFICATION_BUCKETS_SANDBOX,\n }\n\n bucket_list = [self.args.bucket] if self.args.bucket else project_bucket_mappings[self.gcp_env.project]\n\n notifications_dict = {\n \"notifications\": []\n }\n\n for bucket_name in bucket_list:\n # call storage api\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n notifications = bucket.list_notifications(client)\n\n for notification in notifications:\n # Skip the default topic notification (which won't have an integer ID\"\n try:\n id_int = int(notification.notification_id)\n except ValueError:\n continue\n\n if self.args.id and self.args.id != id_int:\n continue\n\n output_dict = dict()\n\n try:\n output_dict['bucket'] = bucket_name\n output_dict['id'] = notification.notification_id\n output_dict['topic_name'] = notification.topic_name\n output_dict['topic_project'] = notification.topic_project\n output_dict['payload_format'] = notification.payload_format\n output_dict['object_name_prefix'] = notification._properties['object_name_prefix']\n output_dict['event_types'] = notification.event_types\n except KeyError:\n pass\n\n notifications_dict['notifications'].append(output_dict)\n\n pprint(notifications_dict)\n\n return 0", "async def get(self):\n await self.handle_request(self.chats_user_api, 1)", "def user_data(self, access_token, *args, **kwargs):\n data = {'method': 'users.getInfo', 'session_key': access_token}\n return mailru_api(data)[0]", "def get(self, auth):\n admin = access_control.is_admin(auth[\"id\"])\n if admin[\"success\"] is not True:\n return make_response(jsonify({\n \"msg\": auth_error_messages[\"403\"],\n \"status_code\": 403\n }), 403)\n incidents_data = incident_db.get_incidents()\n if isinstance(incidents_data, str):\n return make_response(jsonify({\n \"msg\": incidents_data,\n \"status_code\": 404\n }), 404)\n return make_response(jsonify({\n \"data\": incidents_data,\n \"msg\": self.messages[\"read\"],\n \"status_code\": 200\n }), 200)", "def notifications_processor(request):\r\n\r\n now = datetime.now()\r\n today = date.today()\r\n\r\n # DISABLED--seems confusing to have different behavior\r\n # On Fridays, get notified for the weekend and next Monday\r\n #\r\n # weekday = today.weekday()\r\n # if weekday == 4:\r\n # days_diff = 4\r\n # else:\r\n\r\n # Get notified for classes on the next day\r\n days_diff = 2\r\n\r\n end_day = today + timedelta(days=days_diff)\r\n end_datetime = datetime.combine(end_day, time(0, 0))\r\n \r\n if request.user.is_authenticated:\r\n lessons = Lesson.objects.filter(teacher=request.user, notified=False, start_at__gte=now, start_at__lt=end_datetime)\r\n\r\n # Combine all classes into one message\r\n messages = \"\"\r\n\r\n for lesson in lessons:\r\n lesson.notified = True\r\n lesson.save()\r\n lesson_start_at = datetime.strftime(lesson.start_at, \"%a, %b. %d, %I:%M %p\")\r\n messages += f\"{lesson.student.name}'s class on {lesson_start_at}<br>\"\r\n\r\n if messages != \"\":\r\n Notification.objects.create(teacher=request.user,\r\n message=messages,\r\n due_at=end_datetime)\r\n \r\n notifications = Notification.objects.filter(teacher=request.user, is_new=True)\r\n confirmations = Confirmation.objects.filter(teacher=request.user, is_new=True)\r\n\r\n for c in confirmations:\r\n c.is_new = False\r\n c.save()\r\n\r\n confirmations = confirmations[:1]\r\n \r\n return {'notifications': notifications,\r\n 'confirmations': confirmations}\r\n \r\n return {'notifications': [],\r\n 'confirmations': []}", "def getUsers(client, req):\n client.sendTarget(req[\"id\"], key=\"get.users\", payload={\"payload\": magic.users})", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def get_users(session, access_token, request_params={}):\n endpoint = \"https://graph.microsoft.com/v1.0/users\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]", "def _get_notification_data(\n self, current_notification, last_notification\n ): # pylint: disable=unused-argument\n return {}", "def getCurrentUserNewsfeed():\n if not g.user:\n return redirect(url_for('login'))\n return getUserNewsfeed(g.user)", "async def notify_users():\n if not app['websockets']:\n return\n\n message = build_user_message()\n await wait([user.send(message) for user in app['websockets']])", "async def get_user(\n token: str = Depends(get_token), users: UserRepository = Depends()\n) -> User:\n user = await users.get(token=token)\n if user:\n return user\n raise HTTPException(status_code=403, detail=\"Invalid token\")", "def get_users():\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing user\"', flush=True)\n return jsonify(message=\"Missing user\"), 400\n\n if not Administrator.is_administrator(current_user):\n print('non-admin user error', flush=True)\n return jsonify(message=\"Forbidden\"), 403\n\n try:\n users = User.get_users()\n print(users, flush=True)\n return jsonify(message='{}'.format(json.dumps(users))), 200\n\n except Exception as e:\n print(e, flush=True)\n return jsonify(message='{}'.format(e)), 501", "def get_users(self):\n url = \"%s/api/v1/users\" % self.subdomain\n req = request.get(url, headers=self.api_headers)\n if request.ok(req):\n response_json = req.json()\n return response_json[\"users\"]\n else:\n return None", "def get_by_frequency(frequency, user_email):\n return filter_by_prefix(PendingAlert.all(), frequency + ':' +\n user_email + ':')", "def get_every_user():\r\n connect(\"mongodb://vcm-3594.vm.duke.edu:27017/heart_rate_app\")\r\n user_list = get_all_users()\r\n return_dict = {\r\n \"user_emails\": user_list\r\n }\r\n return jsonify(return_dict),200", "def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "def getInterestedUsers():", "def get_global_tag_based_subscribers(\n self,\n tag_mark_reason = None,\n subscription_records = None\n ):\n if tag_mark_reason == 'good':\n email_tag_filter_strategy = const.INCLUDE_INTERESTING\n user_set_getter = User.objects.filter\n elif tag_mark_reason == 'bad':\n email_tag_filter_strategy = const.EXCLUDE_IGNORED\n user_set_getter = User.objects.exclude\n else:\n raise ValueError('Uknown value of tag mark reason %s' % tag_mark_reason)\n\n #part 1 - find users who follow or not ignore the set of tags\n tag_names = self.get_tag_names()\n tag_selections = MarkedTag.objects.filter(\n tag__name__in = tag_names,\n reason = tag_mark_reason\n )\n subscribers = set(\n user_set_getter(\n tag_selections__in = tag_selections\n ).filter(\n notification_subscriptions__in = subscription_records\n ).filter(\n email_tag_filter_strategy = email_tag_filter_strategy\n )\n )\n\n #part 2 - find users who follow or not ignore tags via wildcard selections\n #inside there is a potentially time consuming loop\n if askbot_settings.USE_WILDCARD_TAGS:\n #todo: fix this \n #this branch will not scale well\n #because we have to loop through the list of users\n #in python\n if tag_mark_reason == 'good':\n empty_wildcard_filter = {'interesting_tags__exact': ''}\n wildcard_tags_attribute = 'interesting_tags'\n update_subscribers = lambda the_set, item: the_set.add(item)\n elif tag_mark_reason == 'bad':\n empty_wildcard_filter = {'ignored_tags__exact': ''}\n wildcard_tags_attribute = 'ignored_tags'\n update_subscribers = lambda the_set, item: the_set.discard(item)\n\n potential_wildcard_subscribers = User.objects.filter(\n notification_subscriptions__in = subscription_records\n ).filter(\n email_tag_filter_strategy = email_tag_filter_strategy\n ).exclude(\n **empty_wildcard_filter #need this to limit size of the loop\n )\n for potential_subscriber in potential_wildcard_subscribers:\n wildcard_tags = getattr(\n potential_subscriber,\n wildcard_tags_attribute\n ).split(' ')\n\n if tags_match_some_wildcard(tag_names, wildcard_tags):\n update_subscribers(subscribers, potential_subscriber)\n\n return subscribers", "def unread_forum_topics(context):\n request = context.get('request', None)\n\n # Get all topics\n all_forum_topics = Topic.objects.all()\n\n # Retrieve the unread topics\n return TrackingHandler(request=request).get_unread_topics(all_forum_topics, request.user)", "def get_queryset(self):\n uuid = self.kwargs.get(\"uuid\")\n user = self.request.user\n\n return models.NotificationSetting.objects.filter(user=user, uuid=uuid)", "def authenticate_with_users_service(gauth_token):\n return requests.post(\n app.config['USERS_ENDPOINT'] + 'authenticate',\n data={'gauth_token': gauth_token})", "def get_all_users():", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def get_slack_users(users=[]):\n api_call = slack_client.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n return users", "def get_user_subscriptions(self, use_threading=False):\r\n \r\n subs = self.get_subscriptions(use_threading)\r\n return list(filter(lambda obj: isinstance(obj, User), subs))", "def test_get_users_eligible_for_fist_notification(self):\n # Given:\n self.batch_setup()\n # When:\n _datetime_24_months_ago = datetime.utcnow() - timedelta(days=750)\n criteria = {\"last_login_date\": _datetime_24_months_ago}\n criteria_one = {\"account_creation_date\": _datetime_24_months_ago}\n self.update_test_data(self.user_0, criteria)\n self.update_test_data(self.user_2, criteria)\n self.update_test_data(self.user_1, criteria_one)\n self.update_test_data(self.user_3, criteria_one)\n response = self.client.get(\"/api/batch/account/users/eligible-for-first-notification\", headers=self.headers)\n # Then:\n self.assertTrue(200, response.status_code)\n users = response.get_json()\n self.assertEqual(4, len(users))\n self.assertIn(self.user_0, users)\n self.assertIn(self.user_2, users)\n self.assertIn(self.user_1, users)\n self.assertIn(self.user_3, users)", "def test_tenants_tenant_id_notifications_get(self):\n pass", "def getInfo(notification):", "def get_users(settings, d):\n if d['env']:\n users = settings['USERS_PRODUCTION']\n else:\n users = settings['USERS_DEVELOPMENT']\n return users", "def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]", "def get_push_access_tokens(orcids):\n return (\n db.session.query(UserIdentity.id, RemoteToken.access_token)\n .filter(\n RemoteToken.id_remote_account == RemoteAccount.id,\n RemoteAccount.user_id == UserIdentity.id_user,\n UserIdentity.id.in_(orcids),\n cast(RemoteAccount.extra_data, JSONB).contains({\"allow_push\": True}),\n )\n .all()\n )", "def get_user_data_by_token(token, include_hash=False):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_data_by_email(token, session['data']['user'], include_hash)", "def get_all():\n return PushManager.query.all()", "def test_topic_notification_list_paginate(self):\n topic2 = utils.create_topic(self.category)\n comment2 = utils.create_comment(topic=topic2)\n topic_notification2 = TopicNotification.objects.create(\n user=self.user, topic=topic2,\n comment=comment2, is_active=True,\n action=COMMENT)\n\n utils.login(self)\n response = self.client.get(\n reverse('spirit:topic:notification:index'))\n self.assertEqual(\n list(response.context['notifications']),\n [topic_notification2, ])", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def get_current_user():\n token = request.headers['token']\n decoded_token = decode_token(token)\n userId = decoded_token[\"userId\"]\n for user_obj in users_table:\n if user_obj.userId == userId:\n return {\"userId\": userId, \"isAdmin\": user_obj.isAdmin}" ]
[ "0.72785807", "0.70493317", "0.6741458", "0.6740879", "0.638995", "0.6368157", "0.62646663", "0.6193626", "0.60971195", "0.6062733", "0.5904651", "0.58698857", "0.58336437", "0.5826497", "0.58177674", "0.5768257", "0.5741692", "0.5732743", "0.5713906", "0.56797826", "0.5661132", "0.5618027", "0.55854285", "0.5567836", "0.55509406", "0.55436176", "0.5517269", "0.5514081", "0.54330105", "0.54215175", "0.54209816", "0.54111564", "0.54111564", "0.54076004", "0.54026806", "0.53985995", "0.53930986", "0.53844744", "0.53784096", "0.53781766", "0.53634006", "0.5353067", "0.5327449", "0.53264403", "0.5322007", "0.5322007", "0.5322007", "0.5322007", "0.53081554", "0.5303797", "0.5299769", "0.5289654", "0.5285288", "0.5282479", "0.5278636", "0.5270188", "0.5268274", "0.52676296", "0.52490544", "0.52450645", "0.5232066", "0.5223703", "0.5221362", "0.5220174", "0.52015835", "0.5173368", "0.51500833", "0.5142103", "0.5138538", "0.5138015", "0.51338625", "0.51222575", "0.5122167", "0.5121744", "0.51121444", "0.5106241", "0.51041484", "0.50996953", "0.5097488", "0.5095767", "0.50915074", "0.5086215", "0.508224", "0.50704134", "0.506828", "0.50670123", "0.5065993", "0.5062869", "0.5053687", "0.50478613", "0.50421035", "0.50361043", "0.50333965", "0.5032284", "0.50300974", "0.5013589", "0.50129455", "0.50011885", "0.49990526", "0.49919763" ]
0.54637754
28
API call for getting notifications
def _notifications( self, all: bool = False, participating: bool = False, since: Optional[datetime] = None, before: Optional[datetime] = None, page: int = 1, per_page: int = 10, ) -> str: headers = { "Authorization": "token {}".format(self.auth_token), "accept": "application/vnd.github.v3+json", } params = { "all": "true" if all else "false", "participating": "true" if participating else "false", "page": page, "per_page": per_page, } if since is not None: params["since"] = since.isoformat() if before is not None: params["before"] = before.isoformat() if per_page > 100: raise Exception( "Github API support maximum 100 notifications per page for api calls" ) res = request("GET", self.NOTIFICATIONS_URL, headers=headers, params=params) return res.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_notifications(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def adc_api_notification_resource():\n json = request.get_json(force=True)\n return jsonify(adc.notification_resource(json))", "def get(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n\n args = pagination_parser.parse_args()\n return get_notifications(user, args['from_id'])", "def notifications(apiKey):\r\n if apiKey is None and os.path.exists(KEY_FILE):\r\n apiKey = _get_saved_key(apiKey)\r\n url = '{}/notifications'.format(USGS_API_ENDPOINT)\r\n payload = {\r\n \"jsonRequest\": payloads.notifications(apiKey)\r\n }\r\n logger.debug(\"API call URL: {}\".format(url))\r\n logger.debug(\"API call payload: {}\".format(payload))\r\n response = requests.post(url, payload).json()\r\n logger.debug(\"Received response:\\n{}\".format(json.dumps(response, indent=4)))\r\n _catch_usgs_error(response)\r\n\r\n return response", "def query_notifications(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/QueryNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"QueryNotificationsV1\",\n keywords=kwargs,\n params=parameters\n )", "def notifications(id):\n return core.query(schema.notify, id)", "def getInfo(notification):", "def get_user_notifications(self, login):", "def notification():\n # pop-up notification\n notifies = NotifyModel.get_notify(current_user.get_id())\n return jsonify(notifications=notifies)", "def get_notifications(self):\n res = self.get_object(\"/integrationServices/v3/notification\")\n return res.get(\"notifications\", [])", "def get_notifications_detailed(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsDetailedV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsDetailedV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def get_notifications(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"notifications\".format(self.name))\n resp = self._imgur._send_request(url, params=locals(), needs_auth=True)\n msgs = [Message(msg_dict, self._imgur, has_fetched=True) for msg_dict\n in resp['messages']]\n replies = [Comment(msg_dict, self._imgur, has_fetched=True) for\n com_dict in resp['replies']]\n return {'messages': msgs, 'replies': replies}", "def command_list(self):\n # Get buckets\n project_bucket_mappings = {\n 'all-of-us-rdr-prod': PUBSUB_NOTIFICATION_BUCKETS_PROD,\n 'all-of-us-rdr-stable': PUBSUB_NOTIFICATION_BUCKETS_STABLE,\n 'all-of-us-rdr-sandbox': PUBSUB_NOTIFICATION_BUCKETS_SANDBOX,\n }\n\n bucket_list = [self.args.bucket] if self.args.bucket else project_bucket_mappings[self.gcp_env.project]\n\n notifications_dict = {\n \"notifications\": []\n }\n\n for bucket_name in bucket_list:\n # call storage api\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n notifications = bucket.list_notifications(client)\n\n for notification in notifications:\n # Skip the default topic notification (which won't have an integer ID\"\n try:\n id_int = int(notification.notification_id)\n except ValueError:\n continue\n\n if self.args.id and self.args.id != id_int:\n continue\n\n output_dict = dict()\n\n try:\n output_dict['bucket'] = bucket_name\n output_dict['id'] = notification.notification_id\n output_dict['topic_name'] = notification.topic_name\n output_dict['topic_project'] = notification.topic_project\n output_dict['payload_format'] = notification.payload_format\n output_dict['object_name_prefix'] = notification._properties['object_name_prefix']\n output_dict['event_types'] = notification.event_types\n except KeyError:\n pass\n\n notifications_dict['notifications'].append(output_dict)\n\n pprint(notifications_dict)\n\n return 0", "def get_notifications(self):\n return self.ws.events['notifications']", "async def get_notification(self, second_try: bool = False) -> Optional[Dict[str, Any]]:\n return await self.sac.call(\n method=\"GET\", resource=NOTIFICATION_RESOURCE, timeout=API_TIMEOUT * 2\n )", "def notify(*values):\r\n data = {\"value\"+str(i+1): value for i, value in enumerate(values[:3])}\r\n\r\n response = requests.request(\"POST\", notification_url, data=data)\r\n response.raise_for_status()", "def get_notifications(ceilometer, base_id):\n\n _filter = [{\"field\": \"base_id\", \"op\": \"eq\", \"value\": base_id}]\n # limit is hardcoded in this code state. Later that will be changed via\n # connection string usage\n return [n.to_dict()\n for n in ceilometer.events.list(_filter, limit=100000)]", "def list_notifications():\n token = request.args.get('token')\n user = User.query.filter_by(token=token).first()\n\n if user is None:\n return jsonify({\"error\": \"Access Denied!\"})\n\n # Filter Posts so the user doesn't have to filter it\n notifications = Notifications.query.filter_by(user_id=user.id).order_by(desc('created'))\n result = notification_schema.dump(notifications)\n\n # Notifications have been read delete them\n toDelete = Notifications.query.filter_by(user_id=user.id)\n toDelete.delete()\n\n return jsonify({\n \"notifications\": result\n })", "def get_notification(limit=10, username=None):\n if username is not None:\n user_code = get_user_code(username)\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n u = models.Log.query.filter_by(user_code=user_code).order_by(models.Log.timestamp.desc()).limit(limit).all()\n else:\n u = models.Log.query.order_by(models.Log.timestamp.desc()).limit(limit).all()\n notifications = []\n for i in u:\n parameters = extract_parameters(i.parameters)\n item_name = get_item_name(i.item_code)\n username = get_username(i.user_code)\n if i.action == 'U':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Updated\", parameters, item_name, i.item_code))\n elif i.action == 'D':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Deleted\", \"entry\", item_name, i.item_code))\n elif i.action == 'IV':\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new variants\", item_name, i.item_code))\n else:\n notifications.append(\"%s: %s %s %s of item %s(code:%s)\" %(i.timestamp, username, \"Inserted\", \"new entry\", item_name, i.item_code))\n return make_response(jsonify(notifications))", "def get_notification(self, id):\n url = \"https://api.imgur.com/3/notification/{0}\".format(id)\n resp = self._send_request(url)\n return Notification(resp, self)", "def _get(self, *args, **kwargs):\n return self.deserialize_notifications(self.request.session.get(self.session_key)), True", "def test_retrieve_notifications_list(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'The winners will be announced in 1 minute'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 15,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n new_notification_message_two = 'There is a problem with one score'\n new_notification_category_two = 'Error'\n post_response = create_notification(\n client,\n new_notification_message_two,\n 10,\n new_notification_category_two)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 2\n\n get_first_page_url = url_for('service.notificationlistresource', _external=True)\n get_first_page_response = client.get(\n get_first_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_first_page_response.status_code == HttpStatus.ok_200.value\n\n get_first_page_response_data = json.loads(\n get_first_page_response.get_data(as_text=True))\n assert get_first_page_response_data['count'] == 2\n assert get_first_page_response_data['previous'] is None\n assert get_first_page_response_data['next'] is None\n assert get_first_page_response_data['results'] is not None\n assert len(get_first_page_response_data['results']) == 2\n assert get_first_page_response_data['results'][0]['message'] == \\\n new_notification_message_one\n assert get_first_page_response_data['results'][1]['message'] == \\\n new_notification_message_two\n\n get_second_page_url = url_for('service.notificationlistresource', page=2)\n get_second_page_response = client.get(\n get_second_page_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_second_page_response.status_code == HttpStatus.ok_200.value\n\n get_second_page_response_data = json.loads(\n get_second_page_response.get_data(as_text=True))\n assert get_second_page_response_data['previous'] is not None\n assert get_second_page_response_data['previous'] == url_for(\n 'service.notificationlistresource', page=1)\n assert get_second_page_response_data['next'] is None\n assert get_second_page_response_data['results'] is not None\n assert len(get_second_page_response_data['results']) == 0", "def send_notification_by_get(request, notification_type, phone_number_receiver, phone_number_sender):\n return try_to_send_notification(notification_type, phone_number_sender, phone_number_receiver)", "def notification_list(request):\n try:\n validator = NotificationListValidator(request.GET)\n valid = validator.validate() # Validate the request\n if valid:\n current_user_id = request.user_id\n page_limit = int(request.GET['page_limit'])\n page_offset = int(request.GET['page_offset'])\n\n # notification listing\n notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]\n serializer = NoitifcationListSerializer(notification_list, many=True)\n\n # set is_read = 1\n Notification.objects.filter(user_id=current_user_id).update(\n is_read=1\n )\n \n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)\n except Exception as exception:\n logerror('notifications/views.py/notification_list', str(exception))\n return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def adc_api_notification_workflow():\n json = request.get_json(force=True)\n return jsonify(adc.notification_workflow(json))", "def list_webhooks(self):\n response = requests.get(\n '%spreferences/notifications' % self._url,\n **self._auth\n )\n\n if response.status_code == 401:\n raise MoipAuthorizationException(response.json())\n else:\n pretty_print(response.json())\n return response.json()", "def get(self):\n FetchNotifications.__init__(self)\n kwargs = self.parser.parse_args()\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('group', {'value': kwargs['group'], 'join_operator': 'AND'})])\n notifications = self.notifications_db.full_text_search('search', query, page=kwargs['page'], limit=10, sort=\"\\_id\")\n self.set_seen(notifications)\n self.logger.info(\"Fetched notifications\")\n return {'notifications': notifications}", "async def get_user_notifications(self):\n self._old_notifications = self.user_notifications # important for keeping track of what is new.\n\n async with self.web_session.get(self._api_notifications_url, headers=self._headers) as resp:\n if self.check_status(resp.status, self._api_notifications_url):\n data = await resp.json()\n self.user_notifications = create_notification_objects(data.get('notifications'))\n for user_notification in self.user_notifications:\n self.all_notifications[user_notification.id] = user_notification\n return self.user_notifications", "def get_notifications(self, from_id):\n def _filter_noop(_):\n return True\n\n return get_page(\n mongoengine_model=Notification,\n extra_query_args={\n 'owner': self\n },\n extra_filter_func=_filter_noop,\n from_id=from_id,\n page_count=NotificationPageSize\n )", "def _get_notifications(self):\r\n student = self._student('GET')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n self._success_response({\r\n 'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],\r\n 'count_required': student.num_required,\r\n 'count_graded': student.num_graded,\r\n 'count_available': student.num_pending\r\n })", "def cmd_notification_all(client, args):\n notifications_all = client.get_notifications(args.new)\n notifications_all['messages'] = [message.__dict__ for message in\n notifications_all['messages']]\n formatted_replies = []\n for reply in notifications_all['replies']:\n formatted_reply = reply.__dict__\n formatted_reply['content'] = format_comment_tree(formatted_reply['content'])\n formatted_replies.append(formatted_reply)\n notifications_all['replies'] = formatted_replies\n generate_output({'notifications_all': notifications_all}, args.output_file)", "def action_show():\n try:\n notification = read_notification()\n except IOError:\n raise HTTPResponse(body=\"Error reading notification IO\", status=400)\n except:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n if notification is not None:\n return dict(msg=\"\", notification=notification.to_json())\n else:\n return dict(msg=\"No notification\")", "def get(self, request, *args, **kwargs):\n timer = Timer()\n try:\n notice_json = get_json_for_notices(\n request, get_notices_for_current_user(request))\n\n log_api_call(timer, request, \"Get Notices\")\n return self.json_response(notice_json)\n except Exception:\n return handle_exception(logger, timer, traceback)", "def notifications(request):\r\n user = request.user\r\n if user.is_authenticated():\r\n notifications = models.Notification.objects.by_user(user).unseen()\r\n return {\r\n 'unseen_notifications': notifications\r\n }\r\n else:\r\n return {}", "def send_notification(id_number, note_type):\n auth = login() \n if note_type == 'ULON':\n url = ulon_url + str(id_number)\n r = requests.get(url, auth=auth)\n return r.status_code", "def notifications(self):\r\n return notifications.Notifications(self)", "def notifications(self):\r\n return notifications.Notifications(self)", "def get_notifications_translated(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsTranslatedV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsTranslatedV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def test_tenants_tenant_id_notifications_get(self):\n pass", "def notification(title, body, email):\n ACCESS_TOKEN = \"o.5ls4UBW48oQ6bm5VI6ABbiySEjIS9enC\"\n data_send = {\"type\": \"note\", \"title\": title, \"body\": body, \"email\":email}\n resp = requests.post('https://api.pushbullet.com/v2/pushes', data=json.dumps(data_send),\n headers={'Authorization': 'Bearer ' + ACCESS_TOKEN,\n 'Content-Type': 'application/json'})", "def get_notifications(\n self,\n all: bool = False,\n participating: bool = False,\n since: Optional[datetime] = None,\n before: Optional[datetime] = None,\n per_page: int = 10,\n page: int = 1,\n ) -> List[Notification]:\n raw_res = self._notifications(\n all=all,\n participating=participating,\n since=since,\n before=before,\n per_page=per_page,\n page=page,\n )\n return Notification.load_from_json_str(raw_res)", "async def get_notifications(\n self,\n profile_id: UUID,\n older_than: Optional[dt.datetime] = Query(None),\n limit: Optional[int] = Query(10, ge=1, le=20)):\n return await self._service.find_notifications_by_profile_id(\n profile_id, older_than=older_than, limit=limit)", "def test_create_and_retrieve_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message = 'Welcome to the eSports Competition'\n new_notification_category = 'Information'\n post_response = create_notification(\n client,\n new_notification_message,\n 15,\n new_notification_category)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n # The notification should have created a new notification category as well\n assert NotificationCategory.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n assert post_response_data['message'] == new_notification_message\n\n new_notification_url = post_response_data['url']\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['message'] == new_notification_message\n assert get_response_data['notification_category']['name'] == \\\n new_notification_category", "def get_notifications(user_id, page):\n notifications = \\\n Notification.objects.select_related('sender',\n 'sender__userprofile',\n 'item',\n 'item__movie') \\\n .filter(recipient=user_id) \\\n .order_by('-created_at')\n paginator = Paginator(notifications, 20)\n\n try:\n next_page = paginator.page(page).next_page_number()\n paginator.page(next_page)\n except (EmptyPage, InvalidPage):\n next_page = ''\n\n response = [_generate_notification_response(notification, next_page) \n for notification in paginator.page(page)]\n\n return simplejson.dumps(response)", "def _get_notification_data(\n self, current_notification, last_notification\n ): # pylint: disable=unused-argument\n return {}", "def check_notifications(func):\n\n def _check_notifications(ctx, *args, **kwargs):\n config = None\n client = None\n # protect against early cli failures\n if ctx.obj and 'config' in ctx.obj and 'client' in ctx.obj:\n config = ctx.obj['config']\n client = ctx.obj['client']\n\n res = func(ctx, *args, **kwargs)\n\n if client and config:\n notifications_resp = client.get_notifications(config.username)\n notification_json = notifications_resp.json()\n urgent_notifications = notification_json[\"urgent_count\"]\n if urgent_notifications > 0:\n logger.info(uxstring.UxString.unread_notifications.format(urgent_notifications))\n\n return res\n\n return functools.update_wrapper(_check_notifications, func)", "def test_get(db, session): # pylint: disable=unused-argument\n # get from method for notf-user\n user_id = 'notf-user'\n method_res = Notification.get(user_id)\n query_res = get_user_notifications(session, user_id) # check if the results are really same\n assert len(query_res) == len(method_res)", "def notification(self, sid):\r\n return notifications.Notification(self, sid)", "def changes(self):\n log.debug(\"==>\")\n notifications = self.get_subscription_notifications()\n responses = [gnmi_pb2.SubscribeResponse(update=notif)\n for notif in notifications]\n log.debug(\"<== responses=%s\", responses)\n return responses", "def get_notification():\n condition.acquire()\n if not notifications:\n ret = condition.wait(2)\n if not ret:\n condition.release()\n raise TimeoutError(\"Timed out while waiting for notification\")\n\n notice = notifications.pop(0)\n condition.release()\n return notice", "def create_notification(notification_name):\n url = CMX_URL + '/api/config/v1/notification'\n print('CMX URL and Resource: ', url)\n payload = {\n \"name\": notification_name,\n \"rules\": [\n {\n \"conditions\": [\n {\n \"condition\": \"inout.deviceType == client\"\n },\n {\n \"condition\": \"inout.in/out == in\"\n },\n {\n \"condition\": \"inout.hierarchy == DevNetCampus>DevNetBuilding>DevNetZone\"\n }\n ]\n }\n ],\n \"subscribers\": [\n {\n \"receivers\": [\n {\n \"uri\": \"http://128.107.70.29:8010\",\n \"messageFormat\": \"JSON\",\n \"qos\": \"AT_MOST_ONCE\"\n }\n ]\n }\n ],\n \"enabled\": True,\n \"enableMacScrambling\": True,\n \"macScramblingSalt\": \"listening\",\n \"notificationType\": \"InOut\"\n }\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n notification_response = requests.put(url, data=json.dumps(payload), headers=header, auth=CMX_AUTH, verify=False)\n print('Notification Status Code: ', notification_response.status_code)\n return notification_response.status_code", "def get_recent_notifications(user_id):\n raw_notifications = cacheAPI._get_notifications(user_id)\n response = [{\n 'user_name': notification['sender_name'],\n 'user_url': reverse('user-profile',\n args=[notification['sender_id']]),\n 'user_avatar': get_thumbnail(\n 'img/users/thumbnails/' + \\\n str(notification['sender_id']) + 't.jpg',\n '25x25',\n crop='center'\n ).url,\n 'message': MESSAGES[notification['notification_type']],\n 'item_name': notification.get('item_name', ''),\n 'item_url': (reverse(notification['item_type']+'-profile',\n args=[slugify(notification.get('item_name', 'none'))])\n if notification.get('item_type') else ''),\n 'time_since': timesince(notification['timestamp'])\n } for notification in raw_notifications]\n\n return simplejson.dumps(response)", "def list_quota_notifications(self, qid, **kwargs):\n\n all_params = ['qid']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_quota_notifications\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'qid' is set\n if ('qid' not in params) or (params['qid'] is None):\n raise ValueError(\"Missing the required parameter `qid` when calling `list_quota_notifications`\")\n\n\n resource_path = '/platform/1/quota/quotas/{Qid}/notifications'.replace('{format}', 'json')\n path_params = {}\n if 'qid' in params:\n path_params['Qid'] = params['qid']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basic_auth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='QuotaNotificationsExtended',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_call_api(url, payload, headers):\n return requests.request(\"GET\", url, headers=headers, data=payload)", "def get(self, request, pk, format=None):\n\n user = self.get_user(pk=pk)\n serializer = TimeReminderSerializer(user.time_reminders, many=True)\n \n if 'get_current' not in request.GET: #this means we just want all of the time reminders\n return Response(serializer.data)\n\n #else, we want to see if any time reminders should be going off\n time_to_display = []\n for element in serializer.data:\n reminder_time = element['time']\n curr_time = datetime.datetime.now()\n\n datetime_of_reminder = datetime.datetime.strptime(reminder_time, \"%Y-%m-%dT%H:%M:00Z\")\n test = datetime_of_reminder - curr_time #get time between event and now\n seconds = test.total_seconds()\n if (seconds < 35):\n time_to_display.append(element) #if less than 35 seconds in between, send out notification\n\n return Response(time_to_display)", "def test_notification(self, mock):\n mock.register_uri(\n CONST_HTTP_METHOD_POST,\n pyflume.constants.URL_OAUTH_TOKEN,\n text=load_fixture(CONST_TOKEN_FILE),\n )\n mock.register_uri(\n \"get\",\n pyflume.constants.API_NOTIFICATIONS_URL.format(user_id=CONST_USER_ID),\n text=load_fixture(\"notification.json\"),\n )\n flume_auth = pyflume.FlumeAuth(\n CONST_USERNAME,\n CONST_PASSWORD,\n CONST_CLIENT_ID,\n CONST_CLIENT_SECRET,\n CONST_FLUME_TOKEN,\n )\n\n flume_notifications = pyflume.FlumeNotificationList(flume_auth)\n notifications = flume_notifications.get_notifications()\n assert len(notifications) == 1 # noqa: S101\n assert notifications[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next # noqa: S101\n\n mock.register_uri(\n \"get\",\n flume_notifications.next_page,\n text=load_fixture(\"notification_next.json\"),\n )\n\n notifications_next = flume_notifications.get_next_notifications()\n assert len(notifications_next) == 1 # noqa: S101\n assert notifications_next[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next is False # noqa: S101\n\n mock.register_uri(\n \"get\",\n pyflume.constants.API_NOTIFICATIONS_URL.format(user_id=CONST_USER_ID),\n text=load_fixture(\"notification_nopage.json\"),\n )\n\n notifications_nopage = flume_notifications.get_notifications()\n assert len(notifications_nopage) == 1 # noqa: S101\n assert notifications_nopage[0][CONST_USER_ID] == 1111 # noqa: S101,WPS432\n assert flume_notifications.has_next is False # noqa: S101", "def cmd_notification_id(client, args):\n notification = client.get_notification(args.notification_id)\n notification = notification.__dict__\n if 'comment' in notification['content']:\n notification['content'] = format_comment_tree(notification['content'])\n generate_output({'notification': notification})", "async def fetch_notifications(self, type=\"all\", sent=False, after=None):\n\n logging.debug(\"Fetching user notifications\")\n\n params = {\n \"type\": type,\n \"sent\": sent\n }\n\n if after is not None:\n params[\"after\"] = after\n\n notifs = await self.client.request.get(\"/auth/user/notifications\")\n return [self.client.BaseNotification.build_notification(\n self.client, notif, self.loop) for notif in notifs[\"data\"]]", "def get_quota_notification(self, quota_notification_id, qid, **kwargs):\n\n all_params = ['quota_notification_id', 'qid']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_quota_notification\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'quota_notification_id' is set\n if ('quota_notification_id' not in params) or (params['quota_notification_id'] is None):\n raise ValueError(\"Missing the required parameter `quota_notification_id` when calling `get_quota_notification`\")\n # verify the required parameter 'qid' is set\n if ('qid' not in params) or (params['qid'] is None):\n raise ValueError(\"Missing the required parameter `qid` when calling `get_quota_notification`\")\n\n\n resource_path = '/platform/1/quota/quotas/{Qid}/notifications/{QuotaNotificationId}'.replace('{format}', 'json')\n path_params = {}\n if 'quota_notification_id' in params:\n path_params['QuotaNotificationId'] = params['quota_notification_id']\n if 'qid' in params:\n path_params['Qid'] = params['qid']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basic_auth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='QuotaNotifications',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "async def update_cache_from_notification(self) -> List[Notification]:\n new_notifications = []\n try:\n notifications = await self.get_user_notifications()\n\n if not notifications:\n return new_notifications\n\n new_notifications = self.get_new_notifications()\n for notification in new_notifications:\n await self.__manage_notification_posts(notification)\n except Exception as e:\n if self.verbose:\n print(f\"Failed to update Weverse Cache - {e}\")\n return new_notifications", "def metrics_get(period):\n return flask.jsonify({\"message\": \"noop\"}), 200", "def sendAllNotifications():\n delta = prefs.getDaysToNotifyMinistriesQuestionsPendingResponse()\n date = datetime.date.today()\n sendNotificationToMinistry(date)\n sendNotificationToClerksOffice(date)\n sendNotificationToMP(date)", "def fetch_my_notifications_sql(request):\n input_json, output_json = request, {}\n try:\n sql = sql_fetch_cursor(\"sp_fetch_my_notifications\", 'invoice_ref',\n ['invoice_ref', input_json['profile_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure',\n f\"Error while fetching details: {ex}\", None]))\n return output_json", "def test_update_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'Fortnite has a new winner'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 30,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n new_notification_url = post_response_data['url']\n new_displayed_times = 1\n data = {'displayed_times': new_displayed_times}\n patch_response = client.patch(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS),\n data=json.dumps(data))\n assert patch_response.status_code == HttpStatus.ok_200.value\n\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['displayed_times'] == new_displayed_times", "def notification_config(self) -> 'outputs.NotificationConfigResponse':\n return pulumi.get(self, \"notification_config\")", "def notification_interface():\n return render_template(\"notifications.html\")", "def get_all_user_notifications(self, user_id):\n dao = NotificationsDAO()\n notifications = dao.get_all_user_notifications(user_id)\n result_list = []\n for row in notifications:\n result = self.build_notifications_dict(row)\n result_list.append(result)\n return jsonify(Notifications=result_list)", "def get_webhooks():\n response = requests.get(f'{KAZOO_SERVER}:8000/v2/webhooks', headers=HEADERS)\n\n return response", "def get_messages(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/notifications/\"\n \"messages\".format(self.name))\n result = self._imgur._send_request(url, params=locals(),\n needs_auth=True)\n return [Notification(msg_dict, self._imgur, has_fetched=True) for\n msg_dict in result]", "def get(self):\n SearchNotifications.__init__(self)\n kwargs = self.parser.parse_args()\n search = '*' + kwargs['search'] + '*'\n query = self.notifications_db.construct_lucene_complex_query([\n ('target_role', {'value': self.role}),\n ('targets', {'value': self.username, 'join_operator': 'OR'}),\n ('action_objects', {'value': search, 'join_operator': 'AND', 'open_parenthesis': True}),\n ('title', {'value': search, 'join_operator': 'OR', 'close_parenthesis': True})])\n notifications = self.notifications_db.full_text_search('search', query, page=kwargs['page'], limit=10, sort=\"\\_id\")\n self.set_seen(notifications)\n self.logger.info(\"Searched text %s in notifications\" % search)\n return notifications", "def _send_req(self, notify_request, content_type, subscription):\n if content_type == \"json\":\n feed = urlopen(\n Request(\n subscription[\"reference\"],\n self.json_writer.serialize(notify_request),\n {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"OAuth \" + self._get_access_token(subscription[\"reference\"])\n }\n ),\n timeout=10)\n if feed.getcode() == 401:\n # Access_token has expired\n self.access_token = None\n feed = urlopen(\n Request(\n subscription[\"reference\"],\n self.json_writer.serialize(notify_request),\n {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"OAuth \" + self._get_access_token(subscription[\"reference\"])\n }\n ),\n timeout=10)\n\n else:\n feed = urlopen(\n Request(\n subscription[\"reference\"],\n self.xml_writer.serialize(notify_request),\n {\"Content-Type\": \"application/xml\"}\n ),\n timeout=10)\n return feed", "def notify(self, **kwargs):\n return self.send(kwargs)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def get_notifications(self, section_id):\n request = NotificationsRequest(section_id=section_id)\n notifications_reply = self.stub.GetNotifications(request, timeout=5)\n assert isinstance(notifications_reply, NotificationsReply)\n return notifications_reply.section", "def update_notifications(self: object, body: dict) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/UpdateNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"UpdateNotificationsV1\",\n body=body\n )", "def sendNotification(noti : Notification) -> NotificationReport:\n report : NotificationReport = NotificationReport(**noti.dict(), failed=[], succeded=[])\n sendLimit = 100 #Expo limit\n tokens = noti.tokens\n noti.tokens = tokens[:sendLimit]\n tokens = tokens[sendLimit:]\n\n while (len(noti.tokens) > 0):\n messages = createMessages(noti)\n (succededTokens, failedTokens) = publishMessages(messages)\n report.succeded = report.succeded + succededTokens\n report.failed = report.failed + failedTokens\n noti.tokens = tokens[:sendLimit]\n tokens = tokens[sendLimit:]\n\n return report", "def get_notifications() -> INotifications:\n notifiers = {\"django\": DjangoNotifier, \"govuk-notify\": GovUKNotifyEmail}\n notifier = getattr(settings, \"NOTIFIER\", \"django\")\n notifier_class = notifiers[notifier]\n return notifier_class()", "def get():\n return jsonify({'events': 'Events API'}), 200", "def query_notifs(user, **kwargs):\n notif_type = kwargs.get(\"notif_type\", None)\n notif_unread = None\n if notif_type == \"unread\":\n notif_unread = 1\n elif notif_type == \"read\":\n notif_unread = 0\n page = kwargs.get(\"page\", 0)\n max_items = kwargs.get(\"max_items\", 5)\n if page and max_items:\n start_item = (page-1)*max_items\n end_item = page*max_items\n else:\n start_item = \"\"\n end_item = \"\"\n\n notif_query = \"\"\"\n SELECT a.*\n FROM notifications_notification a \n WHERE (a.recipient_id = %(user_id)s \n \"\"\"\n if notif_unread != None:\n notif_query += \"\"\"AND a.unread = %(notif_unread)s \"\"\"\n notif_query += \"\"\"\n AND ( NOT EXISTS (\n SELECT 1 \n FROM notifications_notification b\n WHERE b.target_object_id = a.target_object_id \n AND b.timestamp > a.timestamp\n AND b.recipient_id=%(user_id)d\n \"\"\"\n if notif_unread != None:\n notif_query += \"\"\"AND b.unread = %(notif_unread)s \"\"\"\n notif_query += \"\"\"\n ) ) )\n GROUP BY a.target_object_id \n ORDER BY a.unread DESC, a.timestamp DESC\n \"\"\"\n if start_item >= 0 and end_item :\n notif_query += \"LIMIT %(start_item)d,%(end_item)s\"\n\n notif_query = notif_query.replace(\"\\n\", \"\") % { \"user_id\" : user.id, \n \"notif_unread\" : notif_unread,\n \"start_item\" : start_item, \n \"end_item\" : end_item,\n }\n notif_list = Notification.objects.raw(notif_query)\n return notif_list", "def delete_notifications(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/DeleteNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"DeleteNotificationsV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "async def find_notification(db_session: Session, notification_id: int):\n notification = await NotificaitonCRUD.find_notification_by_id(db_session, notification_id=notification_id)\n return notification", "def get(self):\n FetchActionObjects.__init__(self)\n kwargs = self.parser.parse_args()\n notification_id = kwargs['notification_id']\n mcm_notification = self.notifications_db.get(notification_id)\n action_objects_results = self.fetch_action_objects(mcm_notification[\"action_objects\"], mcm_notification[\"object_type\"], kwargs['page'], kwargs['limit'])\n self.logger.info(\"Fetched action objects for notification %s\" % notification_id)\n return action_objects_results", "def update_notifications_status(request):\n input_json = request\n try:\n update_record_var = update_record(IndividualNotifications, input_json['individual_notification_id'],\n notification_status=input_json['notification_status'])\n return update_record_var\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to update Notification Status.{ex}', None]))\n return output_json", "def get(self, request, *args, **kwargs):\n return HttpResponse('The notification service is active. Real ' \\\n 'notifications must use the POST method.')", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def notifications_processor(request):\r\n\r\n now = datetime.now()\r\n today = date.today()\r\n\r\n # DISABLED--seems confusing to have different behavior\r\n # On Fridays, get notified for the weekend and next Monday\r\n #\r\n # weekday = today.weekday()\r\n # if weekday == 4:\r\n # days_diff = 4\r\n # else:\r\n\r\n # Get notified for classes on the next day\r\n days_diff = 2\r\n\r\n end_day = today + timedelta(days=days_diff)\r\n end_datetime = datetime.combine(end_day, time(0, 0))\r\n \r\n if request.user.is_authenticated:\r\n lessons = Lesson.objects.filter(teacher=request.user, notified=False, start_at__gte=now, start_at__lt=end_datetime)\r\n\r\n # Combine all classes into one message\r\n messages = \"\"\r\n\r\n for lesson in lessons:\r\n lesson.notified = True\r\n lesson.save()\r\n lesson_start_at = datetime.strftime(lesson.start_at, \"%a, %b. %d, %I:%M %p\")\r\n messages += f\"{lesson.student.name}'s class on {lesson_start_at}<br>\"\r\n\r\n if messages != \"\":\r\n Notification.objects.create(teacher=request.user,\r\n message=messages,\r\n due_at=end_datetime)\r\n \r\n notifications = Notification.objects.filter(teacher=request.user, is_new=True)\r\n confirmations = Confirmation.objects.filter(teacher=request.user, is_new=True)\r\n\r\n for c in confirmations:\r\n c.is_new = False\r\n c.save()\r\n\r\n confirmations = confirmations[:1]\r\n \r\n return {'notifications': notifications,\r\n 'confirmations': confirmations}\r\n \r\n return {'notifications': [],\r\n 'confirmations': []}", "def aggregate_notifications(self: object, body: dict) -> dict:\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/AggregateNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"AggregateNotificationsV1\",\n body=body\n )", "def test_notification_batch(self):\n req = '''[{\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},\n {\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]}\n ]'''\n resp = ''\n status = 204\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(r_resp, resp)", "async def get(self):\n await self.handle_request(self.chats_api, 1)", "def get_replies(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"notifications/replies\".format(self.name))\n return self._imgur._send_request(url, needs_auth=True)", "def get(self):\n CheckNotifications.__init__(self)\n stats = self.notifications_db.get('notification_stats')['stats']\n if self.username in stats:\n return stats[self.username]\n return {'All': 0, 'unseen': 0}", "def send(id):\n try:\n userIds = json.loads(request.data)\n notification = Notification.objects.filter(id=id).to_json()\n if not notification:\n return jsonify({\n \"message\": \"BAD REQUEST\",\n \"success\": False\n }), 400\n ### Call Send Notification ###\n log = {\n \"notificationId\": id,\n \"userIds\": userIds\n }\n NotificationLog(**log).save()\n return jsonify({\n \"message\": \"Send Successfully\",\n \"success\": True\n }), 200\n except BaseException as e:\n print(e)\n return e.message, 400", "def get(self):\n # req = requests.get(\"\")\n # data = req.json()\n # print(data)\n query = Event.query.all()\n event_first_bill_pay(data=query, event_type=\"bill payment\")\n threading.Thread(target=event_notify_user, args=(\"Notify User\", query,)).start()\n threading.Thread(target=event_post_feedback, args=(\"Post Feeback\",)).start()\n return query", "def find_user_notifications_sql(request):\n input_json, output_json = request, {}\n try:\n sql = sql_fetch_cursor(\"sp_find_user_notifications \", 'invoice_ref',\n ['invoice_ref', input_json['profile_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure',\n f\"Error while fetching details: {ex}\", None]))\n return output_json", "def get(frequency, user_email, subject_name):\n return PendingAlert.get_by_key_name(frequency + ':' + user_email +\n ':' + subject_name)", "def update_notifications_as_seen():\n try:\n update_var = IndividualNotifications.objects.filter(notification_status=1).update(notification_status=2)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n ['Success', 'Notification was updated successfully', None]))\n return output_json\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to update Notification Status.{ex}', None]))\n return output_json", "def action_delete():\n try:\n deleted = delete_notification()\n except:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n if deleted:\n return dict(msg=\"Notification deleted\")\n else:\n return dict(msg=\"No notification to delete\")", "def handle_notification(self, type, notification):\n print \"Notified ! %s\"%type\n if type != \"contentInstances\":\n return super(NGSI_10, self).handle_notification(type, notification)\n\n if not notification[\"currentNrOfInstances\"]:\n return False\n\n container_id = notification[\"subscriptionsReference\"].rsplit(\"/\", 2)[0].rpartition(\"/\")[-1]\n app_id = notification[\"subscriptionsReference\"].rsplit(\"/\", 4)[0].rpartition(\"/\")[-1]\n\n app_type, app_id = self.split_app_id(app_id)\n subscriptions = self.db.find_container_subscriptions(app_type, app_id, attribute=container_id)\n if subscriptions:\n data = self._get_context_attribute_value(notification)\n \"\"\"\n notify_request = NotifyContextRequest(\n contextResponseList = [\n ContextElementResponse(\n statusCode = 200,\n contextElement = ContextElement(\n entityId = EntityId(type = app_type, id = app_id, isPattern = False),\n contextAttributeList = [\n ContextAttribute(\n name = data[\"name\"] or container_id,\n contextValue = data[\"contextValue\"],\n metadata = data[\"metadata\"],\n type = data[\"type\"]\n )\n ]\n )\n )\n ]\n )\n \"\"\"\n update_request = UpdateContextRequest(\n contextElementList=[\n ContextElement(\n entityId=EntityId(type=app_type, id=app_id, isPattern=False),\n contextAttributeList=[\n ContextAttribute(\n name=data[\"name\"] or container_id,\n contextValue=str(data[\"contextValue\"]),\n metadata=data[\"metadata\"],\n type=data[\"type\"]\n )\n ]\n )\n ],\n updateAction=\"UPDATE\"\n )\n\n self._send_notifications(subscriptions, update_request)\n\n #raise Exception(latest, container_id, app_id)\n\n return True", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def NotifyPushEvent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7631317", "0.70797265", "0.7076586", "0.695784", "0.6907027", "0.6840182", "0.6796614", "0.6696409", "0.6680031", "0.6636662", "0.6489067", "0.64789927", "0.6347963", "0.6330483", "0.6290125", "0.6276469", "0.62763685", "0.626309", "0.6229939", "0.61899424", "0.6159832", "0.6143638", "0.61166435", "0.60950494", "0.6055462", "0.6054534", "0.60352945", "0.60332966", "0.6011349", "0.60101867", "0.6005911", "0.59980166", "0.59647834", "0.59575206", "0.5950377", "0.5924937", "0.5924937", "0.59155864", "0.5909232", "0.59077287", "0.5836699", "0.5794635", "0.57911456", "0.57540965", "0.57089794", "0.56980264", "0.56772125", "0.56687015", "0.56489176", "0.5648424", "0.56341755", "0.5623457", "0.56167215", "0.5565365", "0.5561478", "0.55460125", "0.55395365", "0.5507957", "0.5500542", "0.5497666", "0.5490478", "0.5488916", "0.5484263", "0.5481149", "0.547942", "0.5475239", "0.54735297", "0.5459959", "0.5459092", "0.5455508", "0.5454515", "0.5451908", "0.5451908", "0.54415345", "0.54329824", "0.54214877", "0.5406937", "0.5405207", "0.5397797", "0.53908366", "0.5389153", "0.53793657", "0.5357354", "0.53568876", "0.5352485", "0.535194", "0.53513503", "0.5339466", "0.53361464", "0.5326705", "0.5306735", "0.5303793", "0.52956575", "0.5293578", "0.5289969", "0.5288034", "0.52868706", "0.52855533", "0.5283376", "0.52634704" ]
0.6299571
14
Just a Hello World function
def hello(): return 'Hello, World!'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hello_world():\n\n return \"Hello World\"", "def hello():\r\n return 'Hello World!'", "def hello():\n print(\"Hello World\")", "def hello():\n print(\"Hello World.\")", "def print_hello_world():\n print(\"Hello World\")", "def hello_world():\n return 'Hello World!'", "def hello():\n print('Hello world!')", "def hello_world():\n return \"Hello world!\"", "def hello():\n return 'Hello World!'", "def hello():\n return 'Hello World!'", "def hello():\n return 'Hello World!'", "def hello():\n print \"Hello, World!\"", "def hello():\n print(\"GoodBye World\")", "def hello():\n return \"Hello, World!\"", "def say_hello():\n return \"Hello World!\"", "def hello():\n print(\"Hello world, again it sucks\")", "def hello():\n print (\"Hello from Joseph Cheney\")", "def hello():\n return \"Hello, world\"", "def hello_world():\n return 'Hello Folks, I prefer cloudrun!!!\\n'", "def main():\n hello()", "def hello_world():\n return 'MAIN FUNCTION CALLED READY FOR STUFF'", "def say_hello():\n return \"Hello!\"", "def say_hello():\n return \"Hello!\"", "def greet():\n return \"Hello!\"", "def greet():\n return \"Hello!\"", "def hello(): #status: WIP\r\n pass", "def sayhi():\n\tprint(\"hello, python\")", "def hello():\n return \"Hello\"", "def main():\n if len(sys.argv) < 2:\n print \"usage:\" \\\n \"python ./helloworld.py username\"\n return -1\n\n username = sys.argv[1]\n echo_hello(username)", "def hello():\n return 'Hello I like to make AI Apps'", "def say_hello():\n print(\"Assalamualaikum.\")", "def hello():\n return \"Hello\"", "def hello_world():\n return \"\"", "def hello():\n return 'Hello World hahaha !!!!!!'", "def greet_user():\n print(\"Hello!\")", "def greet_user():\n print(\"Hello!\")", "def greet_user():\n print(\"Hello!\")", "def greet_user():\n print(\"Hello\")", "def greet_user():\r\n print(\"hello!\")", "def hello(name):\n print(\"Hello {}\".format(name))", "def index():\n return \"Hello, world!\"", "def hello(name):\n print('hello, {}'.format(name))", "def entry_point() -> None:\n args = parse_args()\n print(hello(args.name))", "def hello():\n return 'Hello HBNB!'", "def usage():\n print(\"Hello World!\")", "def main():\n click.echo(\"Hello, world!\")", "def main() -> None:\n parser = argparse.ArgumentParser(description='Say hello')\n parser.add_argument('name', help='Name to greet')\n args = parser.parse_args()\n print(greet(args.name))", "def main():\r\n print(\"JoJo\")", "def index(self):\n return \"Hello World!\"", "def hello():\n retrun", "def do_hello(self, args):\n if len(args) == 0:\n name = 'stranger'\n else:\n name = args\n print \"Hello, %s\" % name", "def hey_there(name):\n return print('Hello ' + name + '!')", "def greet_guest():\n print('Welcome')", "def intro_test():\n print (\"hello\")", "def sayhi(name):\n print(\"Hello, \" + str(name))", "def get(self):\n return 'Hello World!'", "def index():\n return \"Hello!\"", "def say_hello():\n return \"Static File Server says hello!\"", "def intro_test():\n print(\"hello\")", "def sayHello(self, name):\n print(\"Python>> Hello,\", name, \"!\")", "def get_hello():\n\n return \"Hello\"", "def home():\n return \"hello, world\"", "def hello(self, msg, args):\n yield \"Hello, world!\"", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def greeting(x):\n\tprint(\"Halo \"+ x)", "def get(self):\n self.write(\"Hello, world!\")", "def greeting():\n print('\\n',\n '=' * 45,\n \"\\n Welcome to this Module-Creation-Test-Program!!!\",\n '\\n',\n '=' * 45)", "def say_hi(self):\n print(\"Hi there, everyone!\")", "def hello():\n\n usage_msg = \"<br/>\\n\".join([\"Welcome to WildLife: The REST APIs for \"\n \"ZooKeeper!<br/>\",\n hello.__doc__.replace(\"\\n\", \"<br/>\\n\")])\n\n return make_response(usage_msg, 200)", "def main():\n return", "def greet(name, msg='Good Morning !'):\n print(\"Hello\", name , \".\", msg)", "def main(parameters):\n\n name = parameters.get(\"name\", \"World\")\n\n return \"Hello \" + name", "def home():\n return \"Hello!\"", "def main():\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:\n return", "def say_hello():\n\n return render_template(\"hello.html\")", "def say_hello():\n\n return render_template(\"hello.html\")", "def say_hello():\n\n return render_template(\"hello.html\")", "def say_hello():\n\n return render_template(\"hello.html\")", "def say_hello():\n\n return render_template(\"hello.html\")", "def hello(name=''):\n if not name:\n return \"Hello, World!\"\n else:\n return \"Hello, {}!\".format(name)", "def greeting(name):\n print(\"\")\n print(\n \"Welcome to Mister Sweet's Mad Lib Story Telling Journey \" + name +\n \" :)\")\n print(\"\")", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def hello_printer():\n print('Hello Mustafa')\n print(\"Last Time\")", "def greet_user(username):\r\n print(\"Hello, \" + username + \"!\")", "def greet_user(username):\n print(\"Hello, \" + username + \"!\")", "def main():\n print(\"Everythin is ok\")", "def test():\n print \"hello\"", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass" ]
[ "0.8689984", "0.86293316", "0.8605817", "0.8595374", "0.85618377", "0.85127413", "0.85047764", "0.8472644", "0.84356874", "0.84356874", "0.84356874", "0.8434335", "0.838036", "0.83421856", "0.833462", "0.8314161", "0.8306801", "0.82825506", "0.81582916", "0.805908", "0.7990553", "0.7927622", "0.7927622", "0.79240036", "0.79240036", "0.7908839", "0.7893728", "0.7853924", "0.7852377", "0.78490907", "0.7842101", "0.78272474", "0.78180236", "0.77570325", "0.76953936", "0.76953936", "0.76953936", "0.7681185", "0.7671827", "0.7579788", "0.756388", "0.75105864", "0.7491583", "0.74841666", "0.74011797", "0.73670715", "0.73535883", "0.7352761", "0.7337219", "0.72519916", "0.7225253", "0.72119546", "0.7211518", "0.71959937", "0.71772146", "0.7176092", "0.7168615", "0.71678835", "0.7162118", "0.71481234", "0.7080569", "0.706744", "0.7006016", "0.69911", "0.69911", "0.69911", "0.69911", "0.6984285", "0.6976131", "0.6972194", "0.6946594", "0.69454646", "0.6918342", "0.69045025", "0.68941617", "0.6891918", "0.68830353", "0.68728167", "0.68728167", "0.68728167", "0.68728167", "0.6852132", "0.6847953", "0.6847953", "0.6847953", "0.6847953", "0.6847953", "0.6839982", "0.68154955", "0.6805342", "0.6760272", "0.6760014", "0.6734141", "0.6729846", "0.672388", "0.67209786", "0.67209786", "0.67209786", "0.67209786" ]
0.8384513
13
Function used to reinject values back into forms for accessing by themes
def build_custom_user_fields( form_cls, include_entries=False, fields_kwargs=None, field_entries_kwargs=None, blacklisted_items=(), ): if fields_kwargs is None: fields_kwargs = {} if field_entries_kwargs is None: field_entries_kwargs = {} fields = [] new_fields = UserFields.query.filter_by(**fields_kwargs).all() user_fields = {} # Only include preexisting values if asked if include_entries is True: for f in UserFieldEntries.query.filter_by(**field_entries_kwargs).all(): user_fields[f.field_id] = f.value for field in new_fields: if field.name.lower() in blacklisted_items: continue form_field = getattr(form_cls, f"fields[{field.id}]") # Add the field_type to the field so we know how to render it form_field.field_type = field.field_type # Only include preexisting values if asked if include_entries is True: initial = user_fields.get(field.id, "") form_field.data = initial if form_field.render_kw: form_field.render_kw["data-initial"] = initial else: form_field.render_kw = {"data-initial": initial} fields.append(form_field) return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_tweaks(self):\n pass", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def render_form():", "def get_context_data(self, **kwargs):\n form_context = {}\n for form_key, form_class in self.get_form_classes().items():\n if form_key not in kwargs:\n form = self.get_form(form_key)\n form.fields[\"multiform_key\"] = forms.CharField(widget=forms.HiddenInput())\n form_context[form_key] = form\n else:\n kwargs[form_key].fields[\"multiform_key\"] = forms.CharField(\n widget=forms.HiddenInput()\n )\n return super().get_context_data(**kwargs, **form_context)", "def copyform(form, settings):\r\n for name, value in form.iteritems():\r\n setattr(settings, name, value)\r\n settings.commit()", "def load_theme_values(self): \n pass", "def populate_form(self, **kwargs):\n for name, value in kwargs.items():\n self.populate_field(name, value)", "def prepare(self, form):\n \n return form", "def set_form(self, form):\n self.parameters = form", "def _extra_context(self):\r\n return {\r\n 'input_type': self.html_input_type,\r\n 'choices': self.choices\r\n }", "def make_form(self):", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def updateCode(self, form):\n #\n # Make sure the code structure has the right context\n form.code_structure.classname = self.form_class_name\n form.code_structure.superclasses = self.form_super_classes\n form.code_structure.allow_new_style_class = self.allow_new_style_class\n #\n # Convert it to Python code\n self.code_structure = form.code_structure", "def load(self):\n for field in self.fields:\n value = self.siteconfig.get(field)\n self.fields[field].initial = value\n\n if field in self.disabled_fields:\n self.fields[field].widget.attrs['disabled'] = 'disabled'", "def _reload(self):\n ConfigModel = apps.get_model('djconfig.Config')\n cache = {}\n data = dict(\n ConfigModel.objects\n .all()\n .values_list('key', 'value'))\n\n # populate cache with initial form values,\n # then with cleaned database values,\n # then with raw database file/image paths\n for form_class in self._registry:\n empty_form = form_class()\n cache.update({\n name: field.initial\n for name, field in empty_form.fields.items()})\n form = form_class(data={\n name: _deserialize(data[name], field)\n for name, field in empty_form.fields.items()\n if name in data and not isinstance(field, forms.FileField)})\n form.is_valid()\n cache.update({\n name: _unlazify(value)\n for name, value in form.cleaned_data.items()\n if name in data})\n # files are special because they don't have an initial value\n # and the POSTED data must contain the file. So, we keep\n # the stored path as is\n # TODO: see if serialize/deserialize/unlazify can be used for this instead\n cache.update({\n name: data[name]\n for name, field in empty_form.fields.items()\n if name in data and isinstance(field, forms.FileField)})\n\n cache['_updated_at'] = data.get('_updated_at')\n self._cache = cache", "def resubmit(self, _):\n kw = {}\n for k, v in self.form.items():\n if v.edit_text != PARAM_DEFAULT_VALUE:\n kw[k] = v.edit_text\n try:\n self.execution.update_job_params(**kw)\n except QMapError as e:\n self.widget.original_widget = urwid.Text(e)", "def _populate_widgets(self):\n\n if self.parent.session is None:\n # No point populating the widgets with the default values from the\n # SMH file because these will be updated when a session is loaded.\n return\n\n keys = (\"function\", \"order\", \"low_sigma_clip\", \"high_sigma_clip\",\n \"knot_spacing\", \"max_iterations\")\n self._cache = {\n \"input\": {}\n }\n for key in keys:\n self._cache[\"input\"][key] \\\n = self.parent.session.setting((\"normalization\", key))\n\n # Continuum masks.\n self._cache[\"masks\"] \\\n = self.parent.session.setting((\"normalization\", \"masks\"))\n self._cache[\"default_mask\"] \\\n = self.parent.session.setting((\"normalization\", \"default_mask\")) \\\n or self._cache[\"masks\"].keys()[0]\n\n\n # Put these values into the widgets.\n self.low_sigma_clip.setText(\n str(self._cache[\"input\"][\"low_sigma_clip\"]))\n self.high_sigma_clip.setText(\n str(self._cache[\"input\"][\"high_sigma_clip\"]))\n self.knot_spacing.setText(str(\n self._cache[\"input\"][\"knot_spacing\"]))\n\n functions = [self.function.itemText(i).lower() \\\n for i in range(self.function.count())]\n self.function.setCurrentIndex(functions.index(\n self._cache[\"input\"][\"function\"]))\n\n # Normalization order.\n orders = [int(self.order.itemText(i)) \\\n for i in range(self.order.count())]\n self.order.setCurrentIndex(orders.index(\n self._cache[\"input\"][\"order\"]))\n\n # Normalization maximum iterations.\n norm_max_iters = [int(self.norm_max_iter.itemText(i)) \\\n for i in range(self.norm_max_iter.count())]\n self.norm_max_iter.setCurrentIndex(norm_max_iters.index(\n self._cache[\"input\"][\"max_iterations\"]))\n\n # Mask names.\n for name in self._cache[\"masks\"].keys():\n self.continuum_mask.addItem(name)\n\n self.continuum_mask.setCurrentIndex(\n self._cache[\"masks\"].keys().index(\n self._cache[\"default_mask\"]))\n\n self.order_slide.setMaximum(len(self.parent.session.input_spectra) - 1)\n self.current_order_label.setText(\"Order 1 of {}\".format(\n len(self.parent.session.input_spectra)))\n\n # Draw the widgets.\n try:\n self.order_slide.setValue(0)\n self.update_order_index(0)\n self.update_continuum_mask(refresh=False)\n self.fit_continuum(clobber=False)\n self.draw_order(refresh=False)\n self.draw_continuum(refresh=True)\n\n except (AttributeError, KeyError):\n # HACK\n # when loading a fresh session, it will skip all those blocks\n # I think this is okay?\n pass\n return None", "def _postprocessing(self):\n # (in)validate\n if len(self._var_names) == 0:\n self.invalidate()\n else:\n self.put_param('is_valid', True)\n \n # set type\n self.put_param('type', 'Generic')", "def your_reservation_defaults(self, defaults):\n\n default_email = self.email()\n if default_email:\n defaults['email'] = self.email()\n\n data = self.additional_data()\n\n if not data:\n return defaults\n\n for form in data:\n if form in self.context.formsets:\n for field in data[form]['values']:\n defaults[\"%s.%s\" % (form, field['key'])] = field['value']\n\n return defaults", "def map_from_app(self, app):\n # Store app etag in form\n self.etag.data = app.get('_etag', '')\n\n # Keep the use_custom_identity checked if it was\n if app.get('assumed_account_id', None) and app.get('assumed_role_name', None):\n self.use_custom_identity.data = True\n\n super(EditAppForm, self).map_from_app(app)\n\n self.env_ro.data = self.env.data\n self.role_ro.data = self.role.data", "def get_context_data(self, **kwargs):\n context= super(Reasignar,self).get_context_data(**kwargs)\n current_us = self.get_object().userstory_set.all()\n formset= self.UserStoryFormset(self.request.POST if self.request.method == 'POST' else None, initial=[{'userStory':us, 'flujo':us.actividad.flujo, 'desarrollador':us.desarrollador} for us in current_us])\n self.__filtrar_formset__(formset)\n context['current_action'] = 'Editar'\n context['formset'] = formset\n return context", "def initial_form_data(self, request, step, form):\n return None", "def edit_flavor(thing, request, form_class=FlavorEditForm):\n\n form = form_class(csrf_context=request.session)\n form.summary.data = thing.summary\n form.description.data = thing.description\n form.notes.data = thing.notes\n form.edit_time.data = timestamp(thing.effect)\n\n if hasattr(form, 'energy'):\n form.energy.data = thing.energy\n\n return {'form': form, 'thing': thing}", "def update_fields(self):\n if hasattr(self.day, \"body_composition\"):\n for f in self.get_fields():\n name = f.get_field().name\n value = getattr(self.day.body_composition, name, None)\n if value is not None:\n f.set_field(value)\n else:\n f.set_field(\"\")", "def get_context_with_form(self):\n self.context['form'] = {\n 'profile': ProfileEditForm(),\n 'avatar': AvatarForm(),\n 'cover': CoverForm(),\n 'introduction': IntroductionForm(),\n 'statement': StatementForm(),\n 'experience': ExperienceForm(),\n 'work': WorkForm(),\n 'image': ImageForm(),\n 'url': UrlForm(),\n 'education': EducationForm(),\n }\n self.context['form']['introduction'] = IntroductionForm(\n initial={'introduction': self.context['introduction'].introduction}\n )\n self.context['form']['statement'] = StatementForm(\n initial={'statement': self.context['statement'].statement}\n )\n return self.context", "def initialise(self, FormClass, *args, **kwargs):\n self.taxonomy = kwargs.pop('taxonomy', None)\n # The original/old category, for update view case\n self.old_category = kwargs.pop('category', None)\n super(FormClass, self).__init__(*args, **kwargs)", "def parameters():\n\n params.update()\n # print('reached params.update')\n\n form = ParameterForm(\n request.form,\n obj=params\n )\n\n if request.method == 'POST' and form.validate():\n if form.sync_time.data:\n params.ctrl_date = datetime.now()\n params.ctrl_time = datetime.today().strftime('%H:%M')\n params.ctrl_weekday = datetime.today().isoweekday()\n form.ctrl_date.process(\n MultiDict(\n [('ctrl_date', params.ctrl_date.strftime(form.ctrl_date.format))]\n )\n )\n form.ctrl_time.process_data(params.ctrl_time)\n form.ctrl_weekday.process(\n MultiDict(\n [('ctrl_weekday', params.ctrl_weekday)]\n )\n )\n params.ctrl_date = form.ctrl_date.data\n params.ctrl_time = form.ctrl_time.data\n params.weekday = form.ctrl_weekday.data\n params.set_datetime()\n flash('Time synched')\n\n elif form.set_time.data:\n params.ctrl_date = form.ctrl_date.data\n params.ctrl_time = form.ctrl_time.data\n params.weekday = form.ctrl_weekday.data\n params.set_datetime()\n flash('set time called')\n\n elif form.set_temp.data:\n params.temp_a_day = form.temp_a_day.data\n params.temp_a_night = form.temp_a_night.data\n params.temp_a_antiice = form.temp_a_antiice.data\n\n params.temp_b_day = form.temp_b_day.data\n params.temp_b_night = form.temp_b_night.data\n params.temp_b_antiice = form.temp_b_antiice.data\n params.set_temp_heating()\n flash('set temperatures called')\n\n elif form.set_steepness.data:\n params.steepness_a = form.steepness_a.data\n params.steepness_b = form.steepness_b.data\n params.set_steepness()\n flash('set steepness called')\n\n elif form.set_sumwin.data:\n params.temp_sum_win = form.temp_sum_win.data\n params.set_temp_sumwin()\n flash('set sum/win temp')\n\n elif form.set_ecs.data:\n params.temp_ecs_day = form.temp_ecs_day.data\n params.temp_ecs_night = form.temp_ecs_night.data\n params.set_temp_ecs()\n flash('set warmwater temp')\n\n elif form.set_boiler.data:\n params.temp_boiler_min = form.temp_boiler_min.data\n params.temp_boiler_min = form.temp_boiler_min.data\n params.set_temp_boiler()\n flash('set boiler temp')\n\n elif form.refresh_button.data:\n # enforce to reload the form by redirect and call 'GET' requests\n return redirect(url_for('parameters'))\n else:\n flash('whats going on here', 'error')\n else:\n display_form_errors(form)\n\n return render_template('parameters.html', form=form, params=params, user=current_user)", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget", "def make_fields(self):\n for name, prop in self.edit:\n instance_value = self.model.get(name)\n post_value = self.data[name] if (self.data and self.data.has_key(name)) else instance_value\n form_field_class = self.get_field_type(prop)\n form_field = form_field_class(model=self.model, property=prop, name=name, instance_value=instance_value, post_value=post_value)\n self.add(form_field)", "def formfield(self, **kwargs):\n if self.plugin_class:\n self._choices = self.plugin_class.get_all_choices(field=self)\n return super(TemplateNameField, self).formfield(**kwargs)", "def get_context_data(self, **kwargs):\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form(self.form_class)\n return super(OrganizerDataSetUpdate, self).get_context_data(**kwargs)", "def form_CustomisedFormLayoutFields(request):\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String())\n schema.add( 'surname', schemaish.String())\n schema.add( 'age', schemaish.Integer())\n schema.add( 'sex', schemaish.String())\n\n form = formish.Form(schema, 'form')\n\n return form", "def get_change_form_context(self, context):\n form = context['form']\n request = self.request\n model = self.admin.model\n opts = model._meta\n app_label = opts.app_label\n obj = context.get('object', None)\n form_url = self.get_form_url(context)\n\n view_on_site_url = self.admin.get_view_on_site_url(obj)\n fieldsets = self.get_fieldsets()\n formsets, inline_instances = self.admin._create_formsets(\n request, obj, change=not self.hide_inline_formsets\n )\n readonly_fields = self.get_readonly_fields()\n admin_form = admin_helpers.AdminForm(\n form,\n list(fieldsets),\n self.admin.get_prepopulated_fields(request, obj),\n readonly_fields,\n model_admin=self.admin,\n )\n media = self.admin.media + admin_form.media\n\n # The inline formset code is copied from django's code. It has\n # not been used in practice yet and has no tests\n inline_formsets = self.admin.get_inline_formsets(\n request, formsets, inline_instances, obj\n )\n for inline_formset in inline_formsets: # pragma: no cover\n media = media + inline_formset.media\n\n has_editable_inline_admin_formsets = True if inline_formsets else False\n has_file_field = admin_form.form.is_multipart() or any(\n admin_formset.formset.is_multipart()\n for admin_formset in inline_formsets\n )\n\n # The admin admin also sets this variable\n request.current_app = self.admin.admin_site.name\n\n return {\n **self.admin.admin_site.each_context(request),\n 'title': self.display_name,\n 'adminform': admin_form,\n 'original': obj,\n 'is_popup': False,\n 'to_field': None,\n 'media': media,\n 'inline_admin_formsets': inline_formsets,\n 'errors': admin_helpers.AdminErrorList(form, formsets),\n 'preserved_filters': self.admin.get_preserved_filters(request),\n 'add': False,\n 'change': bool(obj),\n 'has_view_permission': self.admin.has_view_permission(\n request, obj\n ),\n 'has_add_permission': self.admin.has_add_permission(request),\n 'has_change_permission': self.admin.has_change_permission(\n request, obj\n ),\n 'has_delete_permission': self.admin.has_delete_permission(\n request, obj\n ),\n 'has_editable_inline_admin_formsets': (\n has_editable_inline_admin_formsets\n ),\n 'has_file_field': has_file_field,\n 'has_absolute_url': view_on_site_url is not None,\n 'absolute_url': view_on_site_url,\n 'form_url': form_url,\n 'opts': opts,\n 'content_type_id': (\n admin_options.get_content_type_for_model(self.admin.model).pk\n ),\n 'save_as': self.save_as,\n 'save_on_top': self.save_on_top,\n 'to_field_var': admin_options.TO_FIELD_VAR,\n 'is_popup_var': admin_options.IS_POPUP_VAR,\n 'app_label': app_label,\n 'hide_object_tools': self.hide_object_tools,\n }", "def template_CustomisedFormLayout(request):", "def fl_set_form_atactivate(ptr_flform, pyfn_FormAtactivate, userdata):\n #FL_FORM_ATACTIVATE = cty.CFUNCTYPE(None, cty.POINTER(xfdata.FL_FORM), \\\n # cty.c_void_p)\n _fl_set_form_atactivate = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_form_atactivate\", \\\n xfdata.FL_FORM_ATACTIVATE, [cty.POINTER(xfdata.FL_FORM),\n xfdata.FL_FORM_ATACTIVATE, cty.c_void_p], \\\n \"\"\"FL_FORM_ATACTIVATE fl_set_form_atactivate(FL_FORM * form,\n FL_FORM_ATACTIVATE cb, void * data) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.verify_function_type(pyfn_FormAtactivate)\n cfn_FormAtactivate = xfdata.FL_FORM_ATACTIVATE(pyfn_FormAtactivate)\n ptr_vdata = library.convert_userdata_to_ptrvoid(userdata)\n library.keep_cfunc_refs(cfn_FormAtactivate, pyfn_FormAtactivate)\n library.keep_elem_refs(ptr_flform, userdata, ptr_vdata)\n retval = _fl_set_form_atactivate(ptr_flform, cfn_FormAtactivate, \\\n ptr_vdata)\n return retval", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'group_id',\n 'first_name': 'first_name',\n 'starter': 'starter',\n 'main': 'main',\n 'dessert': 'dessert',\n 'special_diet': 'special_diet',\n 'requirements': 'requirements',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def __init__(field, form, content):", "def __init__(field, form, content):", "def get_form_kwargs(self):\n self.object = self.get_object()\n kwargs = super().get_form_kwargs()\n return kwargs", "def setvariables(self, request, contextvars, thevars):\n postdata = {}\n if request.POST:\n postdata = dict(request.POST.dict())\n for var in thevars:\n if postdata.get(\"custom_\"+var):\n contextvars[var] = postdata.get(\"custom_\"+var)\n else:\n try:\n contextvars[var] = thevars[var]\n except Exception:\n pass\n return contextvars", "def apply(self):\n self.name = self.name_box.get()\n self.url = self.url_box.get()\n self.cancelled = False", "def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Fieldset(\n 'The above are database values.',\n 'value'\n ),\n ButtonHolder(\n Submit('submit', 'Click Here!', css_class='button white')\n )\n )\n super().__init__(*args, **kwargs)\n self.helper.form_show_labels = False\n self.fields['value'].initial = random.randint(-100, 100)", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def get_form_kwargs(self, i):\n return dict(request=self.request)", "def updateSettingsUI(self):\n\n pass", "def get_initial(self, **kwargs):\n\n # Get an entity\n entity = get_entity(\n self.request.session.get('token', False),\n self.kwargs['aiid'],\n self.kwargs['entity_name']\n )\n\n # Prepare data for the form\n # TODO: should be a better way to do it in the form itself?\n entity['entity_values'] = settings.TOKENFIELD_DELIMITER.join(\n entity['entity_values']\n )\n\n self.initial = entity\n\n return super(EntitiesUpdateView, self).get_initial(**kwargs)", "def reinit (self):\n #for name, obj in inspect.getmembers (self):\n ## if isinstance (obj, RField):\n # self.keyvals[name] = obj.default\n inspect.getmembers (self)", "def pop_form(env):\n if 'wsgi.input' not in env:\n return None\n post_env = env.copy()\n post_env['QUERY_STRING'] = ''\n form = cgi.FieldStorage(\n fp=env.pop('wsgi.input'),\n environ=post_env,\n keep_blank_values=True\n )\n return {k: form[k].value for k in form}", "def built_form(self, built_form):\n\n self._built_form = built_form", "def test_fieldValueTypes(self):\n # tests for \"method\" and \"datetime\" values follow later on ...\n # booleans are not tested yet\n\n factory = self.root.manage_addProduct['Formulator']\n factory.manage_add('form', 'ValueTest')\n factory.manage_add('form2', 'ValueTest')\n form = self.root.form\n form.manage_addField('int_field', 'Test Integer Field', 'IntegerField')\n form.manage_addField('float_field', 'Test Float Field', 'FloatField')\n form.manage_addField('date_field', 'Test Date Field', 'DateTimeField')\n form.manage_addField('list_field', 'Test List Field', 'ListField')\n form.manage_addField(\n 'multi_field',\n 'Test Checkbox Field',\n 'MultiCheckBoxField')\n form.manage_addField('link_field', 'Test Link Field', 'LinkField')\n form.manage_addField('empty_field', 'Test Empty Field', 'StringField')\n int_field = form.int_field\n float_field = form.float_field\n date_field = form.date_field\n list_field = form.list_field\n multi_field = form.multi_field\n link_field = form.link_field\n empty_field = form.empty_field\n\n # XXX editing fields by messing with a fake request\n # -- any better way to do this?\n # (could assign to \"values\" directly ...)\n\n default_values = {'field_title': 'Test Title',\n 'field_display_width': '92',\n 'field_required': 'checked',\n 'field_enabled': 'checked',\n }\n try:\n form_values = default_values.copy()\n form_values.update({'field_default': 'None',\n 'field_required': '',\n })\n empty_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '42',\n 'field_enabled': 'checked'})\n int_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': '1.7'})\n float_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n # XXX cannot test \"defaults to now\", as this may fail randomly\n form_values = default_values.copy()\n form_values.update({'field_input_style': 'list',\n 'field_input_order': 'mdy',\n 'field_date_only': '',\n 'field_css_class': 'test_css',\n 'field_time_separator': '$'})\n date_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'foo',\n 'field_size': '1',\n 'field_items': 'Foo | foo\\n Bar | bar'})\n list_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update(\n {'field_default': 'foo',\n 'field_size': '3',\n 'field_items': 'Foo | foo\\n Bar | bar\\nBaz | baz',\n 'field_orientation': 'horizontal',\n 'field_view_separator': '<br />\\n'})\n multi_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n form_values = default_values.copy()\n form_values.update({'field_default': 'http://www.absurd.org',\n 'field_required': '1',\n 'field_check_timeout': '5.0',\n 'field_link_type': 'external',\n })\n link_field.manage_edit(REQUEST=TestRequest(form=form_values))\n\n except ValidationError as e:\n self.fail('error when editing field %s; error message: %s' %\n (e.field_id, e.error_text))\n\n form2 = self.root.form2\n\n xml = formToXML(form)\n XMLToForm(xml, form2)\n\n self.assertEqualForms(form, form2)\n\n request = TestRequest()\n request.form['field_int_field'] = '42'\n request.form['field_float_field'] = '2.71828'\n request.form['subfield_date_field_month'] = '11'\n request.form['subfield_date_field_day'] = '11'\n # This field only allows ten years in the future, today 2023-03-14\n request.form['subfield_date_field_year'] = '2033'\n request.form['subfield_date_field_hour'] = '09'\n request.form['subfield_date_field_minute'] = '59'\n request.form['field_list_field'] = 'bar'\n request.form['field_multi_field'] = ['bar', 'baz']\n request.form['field_link_field'] = 'http://www.zope.org'\n try:\n result1 = form.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n\n try:\n result2 = form2.validate_all(request)\n except FormValidationError as e:\n # XXX only render first error ...\n self.fail('error when editing form1, field %s; error message: %s' %\n (e.errors[0].field_id, e.errors[0].error_text))\n self.assertEqual(result1, result2)\n self.assertEqual(42, result2['int_field'])\n self.assertEqual(2.71828, result2['float_field'])\n\n # check link field timeout value\n self.assertEqual(link_field.get_value('check_timeout'),\n form2.link_field.get_value('check_timeout'))\n\n # XXX not tested: equal form validation failure on invalid input", "def form(self, form):\n\n self._form = form", "def form(self, form):\n\n self._form = form", "def _set_attributes(self):", "def custom_actions(self, form_entry, request=None):", "def translate_from(original_form, source):\r\n # retrieve the correct translation dictionary\r\n source_dict = get_dict(source)\r\n # recreate the form with the translated values\r\n common_form = {}\r\n for key in source_dict.keys():\r\n if source_dict[key] in original_form.keys():\r\n common_form[key] = original_form[source_dict[key]]\r\n else:\r\n common_form[key] = \"\"\r\n return common_form", "def get_form_kwargs(self):\n\n press_release_obj = PressRelease.objects.get(\n id=self.kwargs['press_release_pk'])\n\n kwargs = super(PressReleaseUpdateView, self).get_form_kwargs()\n kwargs.update(\n {'field': self.kwargs['field'],\n 'press_release_obj': press_release_obj}\n )\n\n return kwargs", "def form_data(self, clear=[], **kwargs):\n form_data = {\n 'payer_name': 'William Williams',\n 'billing_address': '1 Main Street',\n 'billing_city': 'Anytown',\n 'country': 'USA',\n 'billing_state': 'MD',\n 'billing_zip': '20852',\n 'payment_type': 'CreditCard',\n 'project_code': '15-4FF',\n 'payment_amount': '3000',\n 'information_consent': True,\n }\n for key in clear:\n del form_data[key]\n for key, value in kwargs.items():\n form_data[key] = value\n return form_data", "def __call__(self, *args, **kwargs):\n \n # Store the panel args, in case other methods want to use them.\n self.panel_args = args\n self.panel_kwargs = kwargs\n \n # Then do the default.\n data = super(FormPanel, self).__call__()\n \n # Reset the form if told to.\n if data.get('reset_form'):\n form = data['form_instance']\n data['render_form'] = lambda: form.render(self.default_appstruct)\n \n # And append the flash queue.\n data['flash_queue'] = self.flash_queue\n return data", "def render_form(form):\n return {\n 'form': form,\n }", "def get_context_data(self):\n return {\"form\": self.get_form()}", "def resubmit(self, _):\n kw = {}\n for k,v in self.form.items():\n if v.edit_text != PARAM_DEFAULT_VALUE:\n kw[k] = v.edit_text\n if self.callback is None:\n try:\n self.job.resubmit(**kw)\n except QMapError as e:\n self.widget.original_widget = urwid.Text(e)\n else:\n self.callback(**kw)", "def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget", "def setup_initial_values(self, init_params={}):\n for row in self.panel[1:]:\n for widget in row:\n if widget.name in init_params:\n widget.value = init_params[widget.name]", "def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs) \n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def restore_defaults(self):\n # Entry Widgets\n self.name.delete(0, tk.END)\n self.num_invest.delete(0, tk.END)\n self.num_flows.delete(0, tk.END)\n\n self.name.insert(0, self.ini_name)\n self.num_invest.insert(0, self.ini_num_invest)\n self.num_flows.insert(0, self.ini_num_flows)\n\n # Checkboxes\n self.rand_data.set(self.ini_rand_data)\n self.mult.set(self.ini_mult)\n self.inflation.set(self.ini_inflation)\n self.taxes.set(self.ini_taxes)\n self.uncertainty.set(self.ini_uncertainty)\n\n # Radio Buttons\n self.depreciation.set(self.ini_depreciation)\n self.distribution.set(self.ini_distribution)\n self.estimate.set(self.ini_estimate)", "def test_prep_overrides(self):\n original_data = self.form.data\n test_data = original_data.copy()\n test_data._mutable = False\n self.form.data = test_data # copied only to allow tear-down reverting to original.\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n self.form.fields = test_fields # copied to allow tear-down reverting to original.\n original_get_overrides = self.form.get_overrides\n def replace_overrides(): return self.formfield_attrs_overrides\n self.form.get_overrides = replace_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = {}\n overrides = self.formfield_attrs_overrides.copy()\n DEFAULT = overrides.pop('_default_')\n expected_attrs = {}\n for name, field in test_fields.items():\n attrs = field.widget.attrs.copy()\n if isinstance(field.widget, (RadioSelect, CheckboxSelectMultiple, CheckboxInput, )):\n pass # update if similar section in prep_fields is updated.\n attrs.update(overrides.get(name, {}))\n # TODO: setup structure for using default or defined version for all CharFields.\n no_resize = overrides.get(name, {}).pop('no_size_override', False)\n no_resize = True if isinstance(field.widget, (HiddenInput, MultipleHiddenInput)) else no_resize\n if no_resize:\n expected_attrs[name] = attrs\n continue # None of the following size overrides are applied for this field.\n if isinstance(field.widget, Textarea):\n width_attr_name = 'cols'\n default = DEFAULT.get('cols', None)\n display_size = attrs.get('cols', None)\n if 'rows' in DEFAULT:\n height = attrs.get('rows', None)\n height = min((DEFAULT['rows'], int(height))) if height else DEFAULT['rows']\n attrs['rows'] = str(height)\n if default: # For textarea, we always override. The others depend on different conditions.\n display_size = display_size or default\n display_size = min((int(display_size), int(default)))\n elif issubclass(field.__class__, CharField):\n width_attr_name = 'size' # 'size' is only valid for input types: email, password, tel, text\n default = DEFAULT.get('size', None) # Cannot use float(\"inf\") as an int.\n display_size = attrs.get('size', None)\n else: # This field does not have a size setting.\n width_attr_name, default, display_size = None, None, None\n input_size = attrs.get('maxlength', None)\n possible_size = [int(ea) for ea in (display_size or default, input_size) if ea]\n # attrs['size'] = str(int(min(float(display_size), float(input_size)))) # Can't use float(\"inf\") as an int.\n if possible_size and width_attr_name:\n attrs[width_attr_name] = str(min(possible_size))\n expected_attrs[name] = attrs\n # Expected:\n # formfield_attrs_overrides = {\n # '_default_': {'size': 15, 'cols': 20, 'rows': 4, },\n # 'first': {'maxlength': 191, 'size': 20, },\n # 'second': {'maxlength': 2, }, # 'size': 2,\n # 'last': {'maxlength': 2, 'size': 5, },\n # }\n result_fields = self.form.prep_fields()\n result_attrs = {name: field.widget.attrs.copy() for name, field in result_fields.items()}\n first_maxlength = expected_attrs['first']['maxlength'] # overrides['first']['maxlength']\n first_size = expected_attrs['first']['size'] # overrides['first']['size']\n second_maxlength = expected_attrs['second']['maxlength'] # overrides['second']['maxlength']\n last_maxlength = expected_attrs['last']['maxlength'] # overrides['last']['maxlength']\n last_size = expected_attrs['last']['size'] # overrides['last']['size']\n\n self.assertEqual(first_maxlength, result_fields['first'].widget.attrs.get('maxlength', None))\n self.assertEqual(first_size, result_fields['first'].widget.attrs.get('size', None))\n self.assertEqual(second_maxlength, result_fields['second'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_maxlength, result_fields['last'].widget.attrs.get('maxlength', None))\n self.assertEqual(last_size, result_fields['last'].widget.attrs.get('size', None))\n for key, val in expected_attrs.items():\n self.assertEqual(val, result_attrs[key])\n self.assertDictEqual(expected_attrs, result_attrs)\n\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides", "def __init__(self, *args, **kwargs):\n super(CustomAuthenticationForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def get_form_kwargs(self):\n\n rating_decision_obj = RatingDecision.objects.get(\n id=self.kwargs['rating_decision_pk'])\n\n kwargs = super(RatingDecisionUpdateView, self).get_form_kwargs()\n kwargs.update(\n {'field': self.kwargs['field'],\n 'rating_decision_obj': rating_decision_obj}\n )\n\n return kwargs", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data", "def get_form(self, form_class=None):\n if self._form_instance:\n return self._form_instance\n self._form_instance = super().get_form(form_class)\n return self.get_form(form_class)", "def theme_change(request):\n if request.method == \"POST\":\n form = ThemeForm(request.POST, instance=request.user.settings)\n if form.is_valid():\n form.save()\n return home(request, \"Theme Changed Successfully\")\n else:\n form = ThemeForm(instance=request.user.settings)\n \n ctx = _make_context(request, \"theme_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)", "def _reload_values(self):\r\n raise NotImplementedError", "def _revert(self):\n self.kwargs[\"collect\"].reset_scan_form_data()", "def update_fields(state, **kwargs):\n return state._replace(**kwargs)", "def formfield(self, **kwargs):\n defaults = {\n 'form_class': LocalizedIntegerFieldForm\n }\n\n defaults.update(kwargs)\n return super().formfield(**defaults)", "def post_build(self):", "def formField(self, value):\n raise NotImplementedError('Can\\'t create an abstract preference!!!')", "def get_context_data(self, **kwargs):\n context= super(UpdateSprintView,self).get_context_data(**kwargs)\n current_us = self.get_object().userstory_set.all()\n formset= self.UserStoryFormset(self.request.POST if self.request.method == 'POST' else None)\n self.__filtrar_formset__(formset)\n context['current_action'] = 'Editar'\n context['formset'] = formset\n return context", "def get_initial(self):\n\t\n\t#Getting the initial data and setting it\n initial = super(UpdateView, self).get_initial()\n\timage_ref = default_value.get_setting('compute', 'image_ref') \n flavor_ref = default_value.get_setting('compute', 'flavor_ref')\n initial.update({'test_id': self.kwargs['test_id'], 'image_ref': image_ref, 'flavor_ref': flavor_ref})\n return initial", "def reload_text(self):\n self.language = LANGUAGE.get(self.lang)\n self.setWindowTitle(self.language[\"change_pass\"])\n self.username_label.setText(self.language[\"username\"])\n self.password_label.setText(self.language[\"password\"])\n self.password_text.setPlaceholderText(self.language[\"new_pass\"])\n self.save_button.setText(self.language[\"save\"])\n self.cancel_button.setText(self.language[\"cancel\"])", "def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n if hasattr(self, 'work_ids'):\n form.base_fields['works'].initial = self.work_ids\n return form", "def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )", "def set_cleaned_data(self, request, step, data):\n self._get_state(request).form_data[step.slug] = data", "def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n for name, field in form.base_fields.items():\n field.widget.attrs['autocomplete'] = 'off' \n return form", "def updateFrom(self, form):\n #\n # The main properties of the form\n d = self._rsc['application']['backgrounds'][0]\n self.name = form.name\n d['name'] = form.name\n d['title'] = form.Caption\n\n #\n # Add menu height to form height if it is needed\n if form._getControlsOfType(\"Menu\"):\n height_modifier = form.HeightModifier + form.MenuHeight\n else:\n height_modifier = form.HeightModifier\n\n d['size'] = (form.ClientWidth/twips_per_pixel, form.ClientHeight/twips_per_pixel+height_modifier)\n d['position'] = (form.ClientLeft/twips_per_pixel, form.ClientTop/twips_per_pixel)\n\n #\n # The components (controls) on the form\n c = self._rsc['application']['backgrounds'][0]['components']\n\n for cmp in form._getControlList():\n obj = form._get(cmp)\n entry = obj._getControlEntry()\n if entry:\n c += entry\n\n #\n # The menus\n m = []\n self._rsc['application']['backgrounds'][0]['menubar']['menus'] = m\n\n self.addMenus(form, m)", "def procesos(self):\n for name, value in self.cleaned_data.items():\n yield (name, value)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def test_set_alt_data_collection(self):\n names = list(self.test_data.keys())[1:-1]\n alt_values = {name: f\"alt_value_{name}\" for name in self.test_initial} # some, but not all, will be used.\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data.update({k: v for k, v in self.test_initial.items() if get_html_name(self.form, k) not in names})\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n expected_result = {k: v for k, v in alt_values.items() if get_html_name(self.form, k) not in names}\n expected_data = test_data.copy()\n expected_data.update(expected_result)\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n result = self.form.set_alt_data(test_input)\n\n self.assertDictEqual(expected_result, result)\n self.assertDictEqual(expected_data, self.form.data)\n self.assertNotEqual(initial_data, self.form.data)\n self.assertTrue(expect_updates)\n self.assertIsNot(test_data, self.form.data)\n\n self.form.data = original_form_data", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def form_valid(self, form):\n label = form.cleaned_data[\"label\"]\n\n if \"objects\" not in self.request.session:\n self.request.session[\"objects\"] = OrderedDict()\n if \"forms\" not in self.request.session:\n self.request.session[\"forms\"] = OrderedDict()\n\n self.request.session[\"objects\"].update({label: form.halomod_obj})\n self.request.session[\"forms\"].update({label: form.data})\n\n return super().form_valid(form)", "def _get_form_field_instances(self, form_element_entry=None, origin=None,\n kwargs_update_func=None, return_func=None,\n extra={}, request=None, form_entry=None,\n form_element_entries=None, **kwargs):\n # For the moment, this piece of code has to be present here.\n return_func_results = self.get_origin_return_func_results(\n return_func, form_element_entry, origin\n )\n if return_func_results:\n return return_func_results\n\n # Get form field instances (as defined by ``get_form_field_instances``\n # methods in plugins). In DEBUG mode raise an exception if something\n # goes wrong. Otherwise - skip the element.\n if DEBUG:\n form_field_instances = self.get_form_field_instances(\n request=request,\n form_entry=form_entry,\n form_element_entries=form_element_entries,\n **kwargs\n )\n else:\n try:\n form_field_instances = self.get_form_field_instances(\n request=request,\n form_entry=form_entry,\n form_element_entries=form_element_entries,\n **kwargs\n )\n except AttributeError as err:\n return []\n\n processed_field_instances = []\n for field_name, Field, field_kwargs in form_field_instances:\n Widget = None\n if isinstance(Field, (list, tuple)):\n Field, Widget = Field\n\n # Consider using context for resolving some variables.\n # For instance, if user is logged in, ``request.user.username``\n # as an initial value should put the current users' username\n # as initial value in the form.\n if 'initial' in field_kwargs and field_kwargs['initial']:\n try:\n\n # For security reasons we're not using the original request\n # here.\n stripped_request = StrippedRequest(request)\n context = RequestContext(stripped_request)\n\n # In order to be sure, that no accidental sensitive data\n # is exposed in the forms, we only vales from the\n # fobi specific context processor. By automatically\n # force-prefixing all dynamic value definitions with\n # \"fobi_dynamic_values.\" string. See the docs for\n # more (\"Dynamic initial values\" section).\n initial = field_kwargs['initial']\n\n # For the moment, only string types are dynamic\n if isinstance(initial, string_types):\n # Strip down the whitespaces we don't need.\n initial = re.sub(\"{{\\s+\", \"{{\", initial)\n initial = re.sub(\"\\s+}}\", \"}}\", initial)\n\n # Prefix all {{ variable }} occurrences with\n # \"fobi_dynamic_values.\" so that there's no risk of\n # exposing sensitive data. Further security of\n # template context processor variables within\n # \"fobi_dynamic_values.\" is a developer responsibility.\n initial = re.sub(\"{{\", \"{{fobi_dynamic_values.\",\n initial)\n # Strip loading or executing any complicated template\n # tags.\n initial = re.sub(\"{%.*%}\", \"\", initial)\n\n field_kwargs['initial'] = \\\n Template(initial).render(context)\n\n except Exception as err:\n logger.debug(err)\n\n # Data to update field instance kwargs with\n kwargs_update = self.get_origin_kwargs_update_func_results(\n kwargs_update_func,\n form_element_entry,\n origin,\n extra=extra,\n widget_cls=Widget\n )\n\n # if 'widget' in field_kwargs:\n # field_kwargs['widget'] = assemble_form_field_widget_class(\n # base_class=field_kwargs['widget'],\n # plugin=self\n # )\n if kwargs_update:\n field_kwargs.update(kwargs_update)\n\n processed_field_instances.append(\n (field_name, Field(**field_kwargs))\n )\n\n return processed_field_instances", "def replace(self, **kwargs: Any) -> UiField:\n return dc.replace(self, **kwargs)", "def update_control_widgets(self):\n logger.info(f'Loading settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name, value in section.items():\n self.set_control_value(setting_name, value)", "def getModifyForm(request):\n\t\n\tlogger = logging.getLogger(__name__)\n\t\n\tcontext = {}\n\t\n\ttry:\n\t\t# Get a complete list of sensors.\n\t\tcontext['allsensors'] = Sensor.objects.all()\n\t\n\texcept Sensor.DoesNotExist:\n\t\tlogger.warning(\"No sensors found.\")\n\t\traise Http404\n\t\n\t# Send to template.\n\treturn render(request, 'tuning/modifyForm.tpl', context)", "def process_show_form(self, request, step, form):\n pass", "def get_ui_field_behaviour() -> Dict:\n return {\n \"hidden_fields\": ['schema', 'port', 'extra', 'host'],\n \"relabeling\": {\n 'login': 'Fivetran API Key',\n 'password': 'Fivetran API Secret',\n },\n \"placeholders\": {\n 'login': 'api key',\n 'password': 'api secret',\n },\n }", "def _get_plugin_form_data(self, fields):\n form_data = {}\n for field, default_value in fields:\n try:\n form_data.update(\n {field: self.plugin_data.get(field, default_value)}\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n return form_data", "def set_field_attributes(fields, errors):\n for field in fields:\n field_instance = fields[field]\n widget = field_instance.widget\n if isinstance(field_instance, forms.DateField) and isinstance(widget, forms.TextInput):\n field_instance.format = '%d/%m/%Y'\n add_class_to_widget(widget, 'date')\n widget.attrs['type'] = 'text'\n elif isinstance(field_instance, forms.DateTimeField):\n field_instance.format = '%d/%m/%Y %H:%M'\n if isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'datetime')\n elif isinstance(widget, DatetimeInput):\n add_class_to_widget(widget.widgets[0], 'date')\n elif isinstance(field_instance, forms.FloatField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'float')\n elif isinstance(field_instance, forms.IntegerField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'int')\n elif isinstance(field_instance, forms.CharField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'char')\n\n if isinstance(widget, forms.CheckboxSelectMultiple):\n add_class_to_widget(widget, 'checkbox-multiple-select')\n\n if field in errors:\n add_class_to_widget(widget, 'with_errors')\n if 'title' not in widget.attrs:\n widget.attrs['title'] = '; '.join(errors[field])\n\n add_class_to_widget(widget, 'form-control')" ]
[ "0.7147261", "0.62756866", "0.5939157", "0.57501096", "0.5736135", "0.57243216", "0.5698981", "0.55505884", "0.55351985", "0.5531961", "0.55112284", "0.54195195", "0.54063284", "0.539079", "0.5381074", "0.53664094", "0.53374904", "0.5327692", "0.5265599", "0.5257497", "0.5251901", "0.52337426", "0.5199848", "0.5192469", "0.51825714", "0.5180995", "0.517033", "0.5162611", "0.5162036", "0.51362395", "0.5131131", "0.5123595", "0.51127356", "0.51124835", "0.5104992", "0.508327", "0.5082343", "0.508225", "0.5077516", "0.5077516", "0.50767344", "0.50645936", "0.5062824", "0.5050327", "0.5042399", "0.50421554", "0.5034192", "0.5030597", "0.50276005", "0.5026773", "0.50172615", "0.5016615", "0.5014549", "0.5014549", "0.50107", "0.5000226", "0.49970323", "0.49947062", "0.49929273", "0.4992481", "0.49866277", "0.4978561", "0.49711913", "0.4963355", "0.49594608", "0.4949954", "0.49471664", "0.49451688", "0.49412742", "0.49389407", "0.49387452", "0.491995", "0.4912789", "0.4912143", "0.49114442", "0.49077475", "0.49075955", "0.49067566", "0.49040943", "0.48985472", "0.48909765", "0.48904246", "0.48895687", "0.48884368", "0.48868293", "0.48861948", "0.48782995", "0.48703444", "0.4870101", "0.4869329", "0.48674235", "0.48667073", "0.48662442", "0.48602012", "0.48599362", "0.4859354", "0.48572886", "0.48572132", "0.4853717", "0.48502034", "0.48497027" ]
0.0
-1
Function used to attach form fields to wtforms. Not really a great solution but is approved by wtforms.
def attach_custom_user_fields(form_cls, **kwargs): new_fields = UserFields.query.filter_by(**kwargs).all() for field in new_fields: validators = [] if field.required: validators.append(InputRequired()) if field.field_type == "text": input_field = StringField( field.name, description=field.description, validators=validators ) elif field.field_type == "boolean": input_field = BooleanField( field.name, description=field.description, validators=validators ) setattr(form_cls, f"fields[{field.id}]", input_field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_CustomisedFormLayoutFields(request):\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String())\n schema.add( 'surname', schemaish.String())\n schema.add( 'age', schemaish.Integer())\n schema.add( 'sex', schemaish.String())\n\n form = formish.Form(schema, 'form')\n\n return form", "def individual_formfields():\n # Instantiate Consent Tracker\n consent = s3db.auth_Consent(processing_types = VOL_CONSENT_OPTIONS)\n\n formfields = [utable.first_name,\n utable.last_name,\n Field(\"addr_L3\",\n label = T(\"Location\"),\n requires = IS_IN_SET(districts_and_uk),\n ),\n Field(\"addr_street\",\n label = T(\"Street Address\"),\n ),\n Field(\"addr_postcode\",\n label = T(\"Postcode\"),\n ),\n Field(\"mobile\",\n label = T(\"Contact Number (Preferred)\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Contact Number (Preferred)\"),\n T(\"Ideally a Mobile Number, so that we can send you Text Messages.\")),\n ),\n ),\n Field(\"home\",\n label = T(\"Contact Number (Secondary)\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n utable.email,\n utable[passfield],\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n ),\n\n # Skills\n s3db.hrm_multi_skill_id(empty = False,\n label = T(\"Volunteer Offer\"),\n ),\n Field(\"skills_details\",\n label = T(\"Please specify details\"),\n ),\n Field(\"certificates\", \"list:string\",\n label = T(\"Qualifications\"),\n requires = IS_IN_SET(certificates, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"experience\",\n label = T(\"Skills and Experience\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Co-ordination, Event Management, PCV qualified.\")\n ),\n Field(\"resources\",\n label = T(\"Offers of Resources\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Minibus.\")\n ),\n Field(\"where_operate\", \"list:string\",\n label = T(\"Where would you be willing to volunteer?\"),\n requires = IS_IN_SET(districts, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"travel\", \"integer\",\n label = T(\"Willing to Travel?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"slots\", \"list:string\",\n label = T(\"Times\"),\n requires = IS_IN_SET(slots, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"significant_physical\", \"integer\",\n label = T(\"That require significant physical activity (including lifting and carrying) and may involve being outdoors (e.g. clean up of affected properties)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"some_physical\", \"integer\",\n label = T(\"That require some physical activity and may involve being outdoors (e.g. door knocking)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"little_physical\", \"integer\",\n label = T(\"That require little physical activity and are based indoors (e.g. preparing refreshments)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"health_details\",\n label = T(\"If you wish, you can give us some further information on any fitness, medical or mobility issues that might limit the kind of activities you are able to volunteer for; this will help us to suggest suitable opportunities for you\"),\n ),\n Field(\"faith_requirements\", \"integer\",\n label = T(\"Do you have any faith requirements that you would like help with if you are coming to Support Cumbria?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"faith_requirements_details\",\n label = T(\"If Yes please outline\"),\n ),\n Field(\"emergency_contact_name\",\n label = T(\"Contact Name\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"emergency_contact_number\",\n label = T(\"Contact Number\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n ),\n Field(\"emergency_contact_relationship\",\n label = T(\"Relationship\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"workplace\", \"integer\",\n label = T(\"Are you volunteering under your workplace volunteering scheme?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"workplace_details\",\n label = T(\"If yes please name your employer\"),\n ),\n Field(\"dbs\", \"integer\",\n label = T(\"Are you DBS checked?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n #Field(\"convictions\", \"integer\",\n # label = T(\"Do you have any unspent convictions?\"),\n # comment = T(\"Please tick 'Yes' if you have any convictions that are not yet spent under the Rehabilitation of Offenders Act 1974. The term 'convictions' is used to refer to any sentence or disposal issued by a court. If all your convictions are spent, you can tick 'No'. If you're not sure if your convictions are unspent or spent, you can use a tool available at www.disclosurecalculator.org.uk and read guidance at hub.unlock.org.uk/roa\"),\n # requires = IS_IN_SET({0: T(\"No\"),\n # 1: T(\"Yes\"),\n # }),\n # widget = lambda f, v: \\\n # SQLFORM.widgets.radio.widget(f, v,\n # style=\"divs\"),\n # ),\n # Consent (GDPR + FOC)\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n required_fields = [\"first_name\",\n \"last_name\",\n \"addr_L3\",\n \"addr_street\",\n \"addr_postcode\",\n \"mobile\",\n \"emergency_contact\",\n \"where_operate\",\n ]\n\n return formfields, required_fields", "def form_tweaks(self):\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def make_fields(self):\n for name, prop in self.edit:\n instance_value = self.model.get(name)\n post_value = self.data[name] if (self.data and self.data.has_key(name)) else instance_value\n form_field_class = self.get_field_type(prop)\n form_field = form_field_class(model=self.model, property=prop, name=name, instance_value=instance_value, post_value=post_value)\n self.add(form_field)", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def formfields():\n\n T = current.T\n request = current.request\n\n auth = current.auth\n auth_settings = auth.settings\n auth_messages = auth.messages\n\n utable = auth_settings.table_user\n passfield = auth_settings.password_field\n\n # Last name is required\n utable.last_name.requires = IS_NOT_EMPTY(error_message=T(\"input required\"))\n\n # Don't check for duplicate email (will be done in onvalidation)\n # => user might choose to use the current email address of the account\n # => if registration key or code are invalid, we don't want to give away\n # any existing email addresses\n utable.email.requires = [IS_EMAIL(error_message = auth_messages.invalid_email),\n IS_LOWER(),\n ]\n\n # Instantiate Consent Tracker\n consent = ConsentTracking(processing_types=[\"STORE\", \"RULES_ISS\"])\n\n # Form fields\n formfields = [utable.first_name,\n utable.last_name,\n utable.email,\n utable[passfield],\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (auth_messages.verify_password,\n T(\"Enter the same password again\"),\n ),\n ),\n ),\n Field(\"code\",\n label = T(\"Registration Code\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n\n # Required fields\n required_fields = [\"first_name\",\n \"last_name\",\n ]\n\n return formfields, required_fields", "def test_make_form_field():", "def formfields(cls):\n\n T = current.T\n request = current.request\n\n auth = current.auth\n auth_settings = auth.settings\n auth_messages = auth.messages\n\n utable = auth_settings.table_user\n passfield = auth_settings.password_field\n\n # Instantiate Consent Tracker\n consent = ConsentTracking(processing_types=[\"SHARE\", \"RULES_PRO\", \"TPNDO\"])\n\n # Last name is required\n utable.last_name.requires = IS_NOT_EMPTY(error_message=T(\"input required\"))\n\n #ltable = s3db.gis_location\n\n # Lookup projects with provider self-registration\n projects = cls.selectable_projects()\n\n # Lookup site services\n services = cls.selectable_services()\n\n # Lookup applicable organisation types\n org_types = applicable_org_types(None, group=TESTSTATIONS, represent=True)\n\n # Form fields\n formfields = [# -- User account ---\n utable.first_name,\n utable.last_name,\n utable.email,\n utable[passfield],\n\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (auth_messages.verify_password,\n T(\"Enter the same password again\"),\n ),\n ),\n ),\n # -- Test Station ---\n Field(\"organisation\",\n label = T(\"Name\"),\n requires = [IS_NOT_EMPTY(), IS_LENGTH(60)],\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Test Station Name\"),\n T(\"Specify the name of the test station (max 60 characters)\"),\n ),\n ),\n ),\n Field(\"organisation_type\", \"integer\",\n label = T(\"Organization Type\"),\n requires = IS_IN_SET(org_types),\n ),\n\n # -- Address --\n Field(\"location\", \"json\",\n widget = LocationSelector(\n levels = (\"L1\", \"L2\", \"L3\", \"L4\"),\n required_levels = (\"L1\", \"L2\", \"L3\"),\n show_address = True,\n address_required = True,\n show_postcode = True,\n postcode_required = True,\n show_map = True,\n ),\n ),\n # -- Service Offer --\n Field(\"opening_times\",\n label = T(\"Opening Hours\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"service_mode\", \"integer\",\n label = T(\"Service Mode\"),\n requires = IS_IN_SET(cls.selectable_services_modes()),\n ),\n Field(\"services\", \"list:integer\",\n label = T(\"Services\"),\n requires = IS_IN_SET(services,\n multiple = True,\n zero = None,\n ),\n widget = WithAdvice(S3GroupedOptionsWidget(cols=1),\n # Widget intro from CMS\n text = (\"org\",\n \"facility\",\n \"SiteServiceIntro\",\n ),\n ),\n ),\n # -- Contact and Appointments --\n Field(\"facility_phone\",\n label = T(\"Telephone\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n Field(\"facility_email\",\n label = T(\"Email\"),\n requires = IS_EMPTY_OR(IS_EMAIL()),\n ),\n Field(\"facility_website\",\n label = T(\"Website\"),\n ),\n Field(\"booking_mode\", \"integer\",\n label = T(\"Appointments via\"),\n requires = IS_EMPTY_OR(IS_IN_SET(\n cls.selectable_booking_modes(),\n )),\n ),\n Field(\"comments\", \"text\",\n label = T(\"Comments\"),\n widget = s3_comments_widget,\n ),\n\n # -- Administrative --\n Field(\"projects\", \"list:integer\",\n label = T(\"Programs\"),\n requires = [IS_IN_SET(projects,\n multiple = True,\n zero = None,\n ),\n IS_NOT_EMPTY(),\n ],\n widget = WithAdvice(S3GroupedOptionsWidget(cols=1),\n # Widget intro from CMS\n text = (\"org\",\n \"organisation\",\n \"ProjectParticipationIntro\",\n ),\n ),\n ),\n # -- Privacy and Consent --\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n # Required fields\n required_fields = [\"first_name\",\n \"last_name\",\n ]\n\n # Subheadings\n subheadings = ((0, T(\"User Account\")),\n (5, T(\"Test Station\")),\n (7, T(\"Address\")),\n (8, T(\"Service Offer\")),\n (11, T(\"Contact and Appointments\")),\n (16, T(\"Administrative\")),\n (17, \"%s / %s\" % (T(\"Privacy\"), T(\"Terms of Service\"))),\n )\n\n # Geocoder\n current.response.s3.scripts.append(\"/%s/static/themes/RLP/js/geocoderPlugin.js\" % request.application)\n\n return formfields, required_fields, subheadings", "def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n self.fields['first_name'].required = True\n self.fields['password'].widget = forms.PasswordInput() \n\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def add_field(self, name, value):\n if not isinstance(value, str):\n value = json.dumps(value, ensure_ascii=False)\n self.form_fields.append((name, value))\n return", "def make_form(self):", "def __init__(field, form, content):", "def __init__(field, form, content):", "def formfields(form, *fields, **kwargs):\n from bigfoot import elements\n wrapper_class = kwargs.pop('wrapper_class', elements.ElementSet)\n field_class = kwargs.pop('field_class', elements.FormField)\n if not fields:\n fields = form.fields.keys()\n res = [field_class(form, field, **kwargs) for field in fields]\n if wrapper_class:\n res = wrapper_class(*res)\n return res", "def add_field(self, **kwargs):\n field = {\n 'name': kwargs.get('name'),\n 'value': kwargs.get('value'),\n 'inline': kwargs.get('inline', False)\n }\n\n self.fields.append(field)", "def form_RestishExample(request):\n form = formish.Form(SimpleSchema())\n form['comments'].widget = formish.TextArea()\n return form", "def __init__(self, *args, **kwargs):\n super(CustomAuthenticationForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def _generate_form_fields(self):\n params = list(filter(lambda x: (x.precedence is None or x.precedence >= 0) and not x.constant,\n self.param.params().values()))\n for p in sorted(params, key=lambda p: p.precedence or 9999):\n # TODO: Pass p.__dict__ as second argument instead of arbitrary\n p_name = p.name\n\n # Preserve param tuple type.\n if self.data:\n if isinstance(getattr(self.param, p.name), tuple):\n p.default = tuple(self.data.getlist(p.name))\n\n # Preserve initial options for Selector\n if isinstance(self.param.params()[p_name], (param.FileSelector, param.MultiFileSelector)):\n p.default = \"\"\n\n self.fields[p_name] = self.widget_map[type(p)](self.param, p, p.name)\n self.fields[p_name].label = p.name.replace(\"_\", \" \").title()\n if self.read_only is None:\n widget_attribute = {'class': 'form-control'}\n else:\n # TODO: Should this be readonly instead of disable?\n widget_attribute = {'class': 'form-control', 'disabled': self.read_only}\n self.fields[p_name].widget.attrs.update(widget_attribute)\n self.fields[p_name].required = not self.param.params()[p_name].allow_None\n self.fields[p_name].disabled = self.param.params()[p_name].constant\n self.fields[p_name].help_text = self.param.params()[p_name].doc\n # self.fields = self.base_fields", "def addProductFields(form, forCreation=False, restWriter=None, hasOptions=False):\n form.addField('code', formal.String(required=True, strip=True))\n form.addField('title', formal.String(required=True, strip=True))\n\n images = formal.Group('images')\n form.add( images )\n images.add( formal.Field('mainImage', formal.File(required=forCreation), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n images.add( formal.Field('ndgrad', formal.File(), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n\n\n availability = formal.Group('availability')\n form.add( availability )\n\n availability.add( formal.Field('show', formal.Boolean()))\n availability.add( formal.Field('available', formal.Boolean()) )\n availability.add( formal.Field('availabilityDescription', formal.String()) )\n\n metadata = formal.Group('metadata')\n form.add( metadata )\n\n metadata.add( formal.Field('date', formal.Date(), formal.widgetFactory(formal.DatePartsInput, dayFirst=True)))\n metadata.add( formal.Field('location', formal.String()) )\n \n lensOptions = [\n \"80mm Schneider Super Symmar XL f/4.5\",\n \"110mm Schneider Super Symmar XL f/5.6\",\n \"150mm Rodenstock Sironar S f/5.6\",\n \"240mm Fujinon A f/9\",\n \"360mm Nikkor T*ED f/8\",\n \"360mm Nikkor T*ED f/11\",\n ]\n metadata.add( formal.Field('lens', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=lensOptions) ) )\n \n # this is a redundant field... need to remove if possible\n metadata.add( formal.Field('speedaperture', formal.String()) )\n \n speedOptions = ['1/500', '1/250','1/125','1/60','1/30','1/15','1/8','1/4','1/2','1s','2s','4s','8s','15s','30s','1m','2m']\n metadata.add( formal.Field('speed', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=speedOptions),description='If you enter a text value please use the same format as the existing values e.g. 6s, 1/3, 2m' ) )\n \n \n apertureOptions = ['f/5.6','f/6.3','f/8','f/8⅓','f/8½','f/8⅔','f/16','f/16⅓','f/16½','f/16⅔','f/22','f/22⅓','f/22½','f/22⅔','f/32','f/32⅓','f/32½','f/32⅔','f/45','f/45⅓','f/45½','f/45⅔']\n metadata.add( formal.Field('aperture', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=apertureOptions) ) ) \n metadata.add( formal.Field('tiltswing', formal.String()) )\n metadata.add( formal.Field('fronttilt', formal.Integer()) )\n metadata.add( formal.Field('reartilt', formal.Integer()) )\n metadata.add( formal.Field('risefall', formal.String()) )\n ndfilters = ['0.3S','0.45S','0.6S','0.75S','0.9S','0.3H','0.45H','0.6H','0.75H','0.9H']\n metadata.add( formal.Field('ndfilters', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=ndfilters)) )\n otherfilters=['81A','81B','81C','Polariser']\n metadata.add( formal.Field('otherfilters', formal.String(), formal.widgetFactory(formal.SelectOtherChoice, options=otherfilters)) )\n\n \n \n \n data_strings = [\n (0, '-'),\n (1, '*'),\n (2, '**'),\n (3, '***'),\n (4, '****'),\n (5, '*****'),\n ] \n \n metadata.add( formal.Field('rating', formal.Integer(), formal.widgetFactory(formal.SelectChoice, options=data_strings)) )\n\n\n description = formal.Group('description')\n form.add( description )\n parsers = [('markdown','MarkDown'),('xhtml','XHTML'),('plain','Plain Text')]\n description.add( formal.Field('summary', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('description', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('categories', formal.Sequence(formal.String()), \n widgetFactory=categorieswidget.FormalCheckboxTreeMultichoice ) )\n\n\n\n if not hasOptions:\n pricing = formal.Group('pricing')\n form.add( pricing )\n pricing.add( formal.Field('price', formal.Decimal(required=True)) )\n\n\n seo = formal.Group('seo')\n form.add( seo )\n seo.add( formal.Field('titleTag', formal.String()) )\n seo.add( formal.Field('metaDescription', formal.String()) )\n seo.add( formal.Field('metaKeywords', formal.String()) )", "def render_form(form: wtforms.Form) -> Markup:\n # the defaults for checkboxes and submits are weird and the API limited,\n # hence this hacky fix\n checkboxes = [field.name for field in form if isinstance(field.widget, wtforms.widgets.CheckboxInput)]\n submits = [field.name for field in form if isinstance(field.widget, wtforms.widgets.SubmitInput)]\n return (\n wtforms_bootstrap5.RendererContext()\n .form()\n .default_field(\n row_class=\"row mb-3\",\n label_class=\"form-label col-sm-3 col-form-label\",\n field_wrapper_class=\"col-sm-9\",\n field_wrapper_enabled=True,\n )\n .field(\n *checkboxes,\n wrapper_class=\"offset-sm-3 col-sm-9\",\n wrapper_enabled=True,\n field_wrapper_enabled=False,\n )\n .field(\n *submits,\n field_wrapper_class=\"offset-sm-3 col-sm-9\",\n field_wrapper_enabled=True,\n )\n ).render(form)", "def boots_field(field):\n\n field.field.widget.attrs['placeholder'] = field.label\n\n if type(field.field) in BOOTSTRAP_TEMPLATE_SWITCH:\n t = template.loader.get_template(BOOTSTRAP_TEMPLATE_SWITCH[type(field.field)])\n else:\n t = template.loader.get_template(\"bootstrap_tags/form_field.html\")\n\n return t.render(template.Context({\"field\": field}))", "def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs) \n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults.update(\n {\"form\": self.add_form, \"fields\": flatten_fieldsets(self.add_fieldsets)}\n )\n defaults.update(kwargs)\n return super().get_form(request, obj, **defaults)", "def render_form():", "def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields", "def model_form_factory(base=Form, meta=ModelFormMeta, **defaults):\n\n class ModelForm(six.with_metaclass(meta, base)):\n \"\"\"\n A function that returns SQLAlchemy session. This should be\n assigned if you wish to use Unique validator. If you are using\n Flask-SQLAlchemy along with WTForms-Alchemy you don't need to\n set this.\n \"\"\"\n get_session = None\n\n class Meta:\n model = None\n\n default = None\n\n #: Whether or not to skip unknown types. If this is set to True,\n #: fields with types that are not present in FormGenerator type map\n #: will be silently excluded from the generated form.\n #:\n #: By default this is set to False, meaning unknown types throw\n #: exceptions when encountered.\n skip_unknown_types = defaults.pop('skip_unknown_types', False)\n\n #: Whether or not to assign all fields as optional, useful when\n #: creating update forms for patch requests\n all_fields_optional = defaults.pop('all_fields_optional', False)\n\n validators = defaults.pop('validators', {})\n\n #: A dict with keys as field names and values as field arguments.\n field_args = defaults.pop('field_args', {})\n\n #: A dict with keys as field names and values as widget options.\n widget_options = defaults.pop('widget_options', {})\n\n #: Whether or not to include only indexed fields.\n only_indexed_fields = defaults.pop('only_indexed_fields', False)\n\n #: Whether or not to include primary keys.\n include_primary_keys = defaults.pop('include_primary_keys', False)\n\n #: Whether or not to include foreign keys. By default this is False\n #: indicating that foreign keys are not included in the generated\n #: form.\n include_foreign_keys = defaults.pop('include_foreign_keys', False)\n\n #: Whether or not to strip string fields\n strip_string_fields = defaults.pop('strip_string_fields', False)\n\n #: Whether or not to include datetime columns that have a default\n #: value. A good example is created_at column which has a default\n #: value of datetime.utcnow.\n include_datetimes_with_default = defaults.pop(\n 'include_datetimes_with_default', False\n )\n\n #: The default validator to be used for not nullable columns. Set\n #: this to `None` if you wish to disable it.\n not_null_validator = defaults.pop(\n 'not_null_validator',\n InputRequired()\n )\n\n #: A dictionary that overrides not null validation on type level.\n #: Keys should be valid SQLAlchemy types and values should be valid\n #: WTForms validators.\n not_null_validator_type_map = defaults.pop(\n 'not_null_validator_type_map',\n ClassMap(\n [(sa.String, [InputRequired(), DataRequired()])]\n )\n )\n\n #: Default email validator\n email_validator = Email\n\n #: Default length validator\n length_validator = Length\n\n #: Default unique validator\n unique_validator = Unique\n\n #: Default number range validator\n number_range_validator = NumberRange\n\n #: Default date range validator\n date_range_validator = DateRange\n\n #: Default time range validator\n time_range_validator = TimeRange\n\n #: Default optional validator\n optional_validator = Optional\n\n #: Which form generator to use. Only override this if you have a\n #: valid form generator which you want to use instead of the\n #: default one.\n form_generator = defaults.pop(\n 'form_generator', FormGenerator\n )\n\n #: Default date format\n date_format = defaults.pop('date_format', '%Y-%m-%d')\n\n #: Default datetime format\n datetime_format = defaults.pop(\n 'datetime_format', '%Y-%m-%d %H:%M:%S'\n )\n\n #: Dictionary of SQLAlchemy types as keys and WTForms field classes\n #: as values. The key value pairs of this dictionary override\n #: the key value pairs of FormGenerator.TYPE_MAP.\n #:\n #: Using this configuration option one can easily configure the\n #: type conversion in class level.\n type_map = defaults.pop('type_map', ClassMap())\n\n #: Whether or not to raise InvalidAttributExceptions when invalid\n #: attribute names are given for include / exclude or only\n attr_errors = defaults.pop('attr_errors', True)\n\n #: Additional fields to include in the generated form.\n include = defaults.pop('include', [])\n\n #: List of fields to exclude from the generated form.\n exclude = defaults.pop('exclude', [])\n\n #: List of fields to only include in the generated form.\n only = defaults.pop('only', [])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets object as form attribute.\"\"\"\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if defaults:\n raise UnknownConfigurationOption(\n list(defaults.keys())[0]\n )\n\n return ModelForm", "def populate_form(self, **kwargs):\n for name, value in kwargs.items():\n self.populate_field(name, value)", "def get_connection_form_widgets():\n from flask_appbuilder.fieldwidgets import (\n BS3TextFieldWidget,\n BS3PasswordFieldWidget,\n )\n from wtforms import StringField\n\n return {\n \"extra__ewah_amazon_ads__lwa_client_id\": StringField(\n \"AWS LWA Client ID\", widget=BS3TextFieldWidget()\n ),\n \"extra__ewah_amazon_ads__lwa_client_secret\": StringField(\n \"AWS LWA CLient Secret\", widget=BS3PasswordFieldWidget()\n ),\n \"extra__ewah_amazon_ads__region\": StringField(\n \"Amazon Ads Region (one of: NA, EU, FE)\", widget=BS3TextFieldWidget()\n ),\n \"extra__ewah_amazon_ads__refresh_token\": StringField(\n \"Refresh Token\", widget=BS3PasswordFieldWidget()\n ),\n }", "def _create_forms_py(self, form_class_name):\n return '''\n \"\"\"Configuration forms for the extension.\"\"\"\n\n from django import forms\n from djblets.extensions.forms import SettingsForm\n\n\n class %(form_class_name)s(SettingsForm):\n my_field_1 = forms.CharField()\n my_field_2 = forms.BooleanField()\n ''' % {\n 'form_class_name': form_class_name,\n }", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)\n # Put the first and last name at the top\n new_order = self.fields.keyOrder[:-2]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n self.fields.keyOrder = new_order", "def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data", "def show_fieldsetform(form):\n return {'form': form, 'required_fields': True}", "def set_field_attributes(fields, errors):\n for field in fields:\n field_instance = fields[field]\n widget = field_instance.widget\n if isinstance(field_instance, forms.DateField) and isinstance(widget, forms.TextInput):\n field_instance.format = '%d/%m/%Y'\n add_class_to_widget(widget, 'date')\n widget.attrs['type'] = 'text'\n elif isinstance(field_instance, forms.DateTimeField):\n field_instance.format = '%d/%m/%Y %H:%M'\n if isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'datetime')\n elif isinstance(widget, DatetimeInput):\n add_class_to_widget(widget.widgets[0], 'date')\n elif isinstance(field_instance, forms.FloatField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'float')\n elif isinstance(field_instance, forms.IntegerField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'int')\n elif isinstance(field_instance, forms.CharField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'char')\n\n if isinstance(widget, forms.CheckboxSelectMultiple):\n add_class_to_widget(widget, 'checkbox-multiple-select')\n\n if field in errors:\n add_class_to_widget(widget, 'with_errors')\n if 'title' not in widget.attrs:\n widget.attrs['title'] = '; '.join(errors[field])\n\n add_class_to_widget(widget, 'form-control')", "def add_field(self, name, hint_text, helper_text=\"\", required=False):\n field = MyTextField(hint_text=hint_text, helper_text=helper_text, required=required)\n self.add_widget(field)\n self.fields[name] = field", "def insert_onlineformfield_from_form(dbo, username, post):\n formfieldid = db.get_id(dbo, \"onlineformfield\")\n sql = db.make_insert_sql(\"onlineformfield\", ( \n ( \"ID\", db.di(formfieldid)),\n ( \"OnlineFormID\", post.db_integer(\"formid\")),\n ( \"FieldName\", post.db_string(\"fieldname\")),\n ( \"FieldType\", post.db_integer(\"fieldtype\")),\n ( \"Label\", post.db_string(\"label\")),\n ( \"DisplayIndex\", post.db_integer(\"displayindex\")),\n ( \"Mandatory\", post.db_boolean(\"mandatory\")),\n ( \"Lookups\", post.db_string(\"lookups\")),\n ( \"Tooltip\", post.db_string(\"tooltip\"))\n ))\n db.execute(dbo, sql)\n audit.create(dbo, username, \"onlineformfield\", str(formfieldid))\n return formfieldid", "def update_form_fields(form, required=None, hidden=None, validators=None, fields_data=None):\n required = required or []\n hidden = hidden or []\n validators = validators or {}\n fields_data = fields_data or {}\n\n # Mark fields as required on both the form and widget\n for field in required:\n form.fields[field].required = True\n form.fields[field].widget.is_required = True\n\n # Mark fields as hidden on the widget\n for field in hidden:\n form.fields[field].widget = HiddenInput()\n\n # Set validators on fields.\n for field, data in validators.items():\n form.fields[field].validators = data\n\n # Update field and widget attributes.\n for field, data in fields_data.items():\n if data.get(\"attributes\", None):\n widget = form.fields[field].widget\n field = form.fields[field]\n\n # Special case, allow for the assignment of a different input type.\n if data[\"attributes\"].get(\"type\"):\n widget.input_type = data[\"attributes\"].pop(\n \"type\", widget.input_type\n )\n\n # Widgets for the most part make use of a dictionary structure, so\n # just update the dictionary blindly.\n widget.attrs.update(data[\"attributes\"])\n\n # Fields make use of instance attributes, so it requires a\n # different approach.\n for attr, val in data[\"attributes\"].items():\n setattr(field, attr, val)", "def get_form_fields(url):\n page = urlopen(url)\n soup = BeautifulSoup(page)\n form = soup.form\n\n # Setting up data structure\n form_data = dict(fields=[])\n form_attr = dict(form.attrs)\n\n form_data['title'] = soup.h1 and soup.h1.text or soup.title.text\n form_data['action'] = urljoin(url, form_attr['action'])\n form_data['method'] = form_attr['method']\n \n # Get a list of the entry labels\n labels = form.findAll(['label'], {\"class\": \"ss-q-title\"})\n\n label_contents = []\n for label in labels:\n label_contents.append({label.attrs[1][0]: label.attrs[1][1], 'contents': label.contents[0]})\n \n #print(label_contents)\n \n #\n # Handle text input boxes\n #\n textboxes = form.findAll(['input'], {\"type\": \"text\"})\n \n #textbox_description = {}\n\n for textbox in textboxes: \n textbox_description = {} \n for index, label in enumerate(label_contents):\n if label_contents[index]['for'] == textbox['id']:\n #print(label_contents[index]['contents'].strip())\n textbox_description['label'] = label_contents[index]['contents'].strip()\n break\n \n abbreviated_attributes = dict((k,v) for (k,v) in textbox.attrs if k == \"type\" or k == \"name\")\n # abbreviated_attributes = {k : v for k in textbox.attrs} # 2.7 and above\n \n # Merge abbreviated attributes with textbox description\n textbox_description = dict(textbox_description.items() + abbreviated_attributes.items())\n \n form_data['fields'].append(textbox_description)\n \n #\n # Handle the textareas\n #\n textareas = form.findAll(['textarea'])\n \n for textarea in textareas:\n textarea_description = {}\n for index, label in enumerate(label_contents):\n if label_contents[index]['for'] == textarea['id']:\n textarea_description['label'] = label_contents[index]['contents'].strip()\n break\n \n abbreviated_attributes = dict((k,v) for (k,v) in textarea.attrs if k == \"name\")\n abbreviated_attributes['type'] = textarea.name\n \n textarea_description = dict(textarea_description.items() + abbreviated_attributes.items())\n \n form_data['fields'].append(textarea_description)\n \n \"\"\"\n Ignore groups of checkboxes for now\n \n ####\n # Handle groups of checkboxes\n ####\n \n checkboxes = form.findAll(['input'], {'type': 'checkbox'})\n\n # Get your checkbox groups\n checkbox_groups = []\n for checkbox in checkboxes:\n if checkbox['name'] not in checkbox_groups:\n checkbox_groups.append(checkbox['name'])\n\n checkbox_questions = {}\n\n for group in checkbox_groups:\n checkbox_questions[group] = {'label': {}, 'options': []}\n \n for checkbox in checkboxes:\n for group in checkbox_groups:\n if checkbox['name'] == group:\n checkbox_questions[group]['options'].append({'attributes': dict(checkbox.attrs)})\n \n # Handle the label\n checkbox_name_pieces = checkbox['name'].split('.')\n checkbox_name_map = checkbox_name_pieces[0] + '_' + checkbox_name_pieces[1]\n \n for label in label_contents:\n if label['for'] == checkbox_name_map:\n checkbox_questions[group]['label'] = label\n page_data['form_contents'].append({'checkbox_groups': checkbox_questions})\n \"\"\"\n \n return form_data", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def add_field(self, field, field_data):\n self.extra_fields[field] = field_data", "def mongofield_to_formfield(mongo_field, widget=None, **kwargs):\n if mongo_field.choices:\n return forms.ChoiceField(\n choices = mongo_field.choices,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n if mongo_field.validation and callable(mongo_field.validation):\n validators = [mongo_field.validation]\n else:\n validators = []\n if mongo_field.__class__.__name__ == 'StringField':\n if mongo_field.regex:\n validators.append(RegexValidator(regex = mongo_field.regex))\n return forms.CharField(\n label = mongo_field.verbose_name,\n max_length = mongo_field.max_length,\n min_length = mongo_field.min_length,\n validators = validators,\n widget = widget,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'IntField':\n return forms.IntegerField(\n min_value = mongo_field.min_value,\n max_value= mongo_field.max_value,\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'FloatField':\n return forms.FloatField(\n max_value = mongo_field.max_value,\n min_value = mongo_field.min_value,\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'BooleanField':\n return forms.BooleanField(\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'DateTimeField':\n return forms.DateTimeField(\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'DecimalField':\n return forms.DecimalField(\n max_value = mongo_field.max_value,\n min_value = mongo_field.min_value,\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'URLField':\n return forms.URLField(\n verify_exists = mongo_field.verify_exists,\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'FileField':\n return forms.FileField(\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'EmailField':\n return forms.EmailField(\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n elif mongo_field.__class__.__name__ == 'ImageField':\n return forms.ImageField(\n validators = validators,\n widget = widget,\n label = mongo_field.verbose_name,\n required = mongo_field.required,\n help_text = mongo_field.help_text\n )\n else:\n return None", "def build_custom_user_fields(\n form_cls,\n include_entries=False,\n fields_kwargs=None,\n field_entries_kwargs=None,\n blacklisted_items=(),\n):\n if fields_kwargs is None:\n fields_kwargs = {}\n if field_entries_kwargs is None:\n field_entries_kwargs = {}\n\n fields = []\n new_fields = UserFields.query.filter_by(**fields_kwargs).all()\n user_fields = {}\n\n # Only include preexisting values if asked\n if include_entries is True:\n for f in UserFieldEntries.query.filter_by(**field_entries_kwargs).all():\n user_fields[f.field_id] = f.value\n\n for field in new_fields:\n if field.name.lower() in blacklisted_items:\n continue\n\n form_field = getattr(form_cls, f\"fields[{field.id}]\")\n\n # Add the field_type to the field so we know how to render it\n form_field.field_type = field.field_type\n\n # Only include preexisting values if asked\n if include_entries is True:\n initial = user_fields.get(field.id, \"\")\n form_field.data = initial\n if form_field.render_kw:\n form_field.render_kw[\"data-initial\"] = initial\n else:\n form_field.render_kw = {\"data-initial\": initial}\n\n fields.append(form_field)\n return fields", "def ajax_extra_fields_form(request, host_type_id, prefix=\"\", blank=False):\n try:\n host_type = get_object_or_404(HostType, pk=host_type_id)\n except:\n return HttpResponse(\"<tr></tr>\")\n form = AdditionnalFieldForm(host_type=host_type, blank=bool(blank),\n prefix=prefix)\n return HttpResponse(form.as_table())", "def _form(self, r, widget, **attr):\n\n widget_get = widget.get\n\n label = widget_get(\"label\", \"\")\n # Activate if-required\n #if label and isinstance(label, str):\n if label:\n label = current.T(label)\n icon = widget_get(\"icon\", \"\")\n if icon:\n icon = ICON(icon)\n\n context = widget_get(\"context\", None)\n tablename = widget_get(\"tablename\", None)\n resource, context = self._resolve_context(r, tablename, context)\n\n # Widget filter option\n widget_filter = widget_get(\"filter\", None)\n if widget_filter:\n resource.add_filter(widget_filter)\n\n record = resource.select([\"id\"], limit=1, as_rows=True).first()\n if record:\n record_id = record.id\n else:\n record_id = None\n\n if record_id:\n readonly = not current.auth.s3_has_permission(\"update\", tablename, record_id)\n else:\n readonly = not current.auth.s3_has_permission(\"create\", tablename)\n\n sqlform = widget.get(\"sqlform\", None)\n if not sqlform:\n sqlform = resource.get_config(\"crud_form\")\n if not sqlform:\n from ..ui import S3SQLDefaultForm\n sqlform = S3SQLDefaultForm()\n\n get_config = current.s3db.get_config\n if record_id:\n # Update form\n onvalidation = get_config(tablename, \"create_onvalidation\") or \\\n get_config(tablename, \"onvalidation\")\n onaccept = get_config(tablename, \"create_onaccept\") or \\\n get_config(tablename, \"onaccept\")\n else:\n # Create form\n onvalidation = get_config(tablename, \"create_onvalidation\") or \\\n get_config(tablename, \"onvalidation\")\n onaccept = get_config(tablename, \"create_onaccept\") or \\\n get_config(tablename, \"onaccept\")\n\n form = sqlform(request = r,\n resource = resource,\n record_id = record_id,\n readonly = readonly,\n format = \"html\",\n onvalidation = onvalidation,\n onaccept = onaccept,\n )\n _class = self._lookup_class(r, widget)\n\n # Render the widget\n output = DIV(H4(icon,\n label,\n _class = \"profile-sub-header\",\n ),\n DIV(form,\n _class = \"form-container thumbnail\",\n ),\n _class = _class,\n )\n\n return output", "def formField(type_, caption, action, object_='', instruction='', *args, **kwargs):\n html = '<div class=\"block\">'\n html += '<strong>%s</strong>' % caption\n if instruction:\n html += elementInstruc(instruction)\n html += '</div>'\n html += '<div class=\"block\">'\n if type_ == 'select':\n html += select(action, object_, *args, **kwargs)\n elif type_ == 'richTextArea':\n html += richTextArea(action+object_, *args, **kwargs)\n elif type_ == 'textArea':\n html += textArea(action+object_, *args, **kwargs)\n elif type_ == 'textInput':\n html += textInput(action+object_, *args, **kwargs)\n elif type_ == 'checkbox':\n html += checkbox(*args, **kwargs)\n html += '</div>'\n return html", "def get_connection_form_widgets() -> dict[str, Any]:\n from flask_appbuilder.fieldwidgets import BS3TextFieldWidget\n from flask_babel import lazy_gettext\n from wtforms import PasswordField, StringField\n\n return {\n \"fully_qualified_namespace\": StringField(\n lazy_gettext(\"Fully Qualified Namespace\"), widget=BS3TextFieldWidget()\n ),\n \"credential\": PasswordField(lazy_gettext(\"Credential\"), widget=BS3TextFieldWidget()),\n }", "def form_String(request):\n schema = schemaish.Structure()\n schema.add('myStringField', schemaish.String())\n form = formish.Form(schema, 'form')\n return form", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)", "def prepare(self, form):\n \n return form", "def fl_addto_form(ptr_flform):\n _fl_addto_form = library.cfuncproto(\n library.load_so_libforms(), \"fl_addto_form\",\n cty.POINTER(xfdata.FL_FORM), [cty.POINTER(xfdata.FL_FORM)],\\\n \"\"\"FL_FORM * fl_addto_form(FL_FORM * form)\"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n retval = _fl_addto_form(ptr_flform)\n return retval", "def get_extra_payload(form):", "def formfield(self, **kwargs): # pylint:disable=arguments-differ\n from .forms import CompositeTypeField\n\n defaults = {\n \"form_class\": CompositeTypeField,\n \"model\": self.Meta.model,\n }\n defaults.update(kwargs)\n\n return super().formfield(**defaults)", "def form_RequiredStringAndFile(request):\n schema = schemaish.Structure()\n schema.add('required', schemaish.String(validator=validatish.Required()))\n schema.add('myFileField', schemaish.File())\n form = formish.Form(schema, 'form')\n form['myFileField'].widget = formish.FileUpload(\n filestore=CachedTempFilestore(),\n show_image_thumbnail=True,\n image_thumbnail_default='/images/nouploadyet.png',\n show_download_link=True\n )\n return form", "def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n\n self.fields['nick'].widget.attrs.update({\n 'label': 'Přezdívka',\n 'placeholder': 'Mirek'\n })\n\n self.fields['name'].widget.attrs.update({\n 'label': 'Jméno',\n 'placeholder': 'Mirek'\n })\n\n self.fields['surname'].widget.attrs.update({\n 'label': 'Příjmení',\n 'placeholder': 'Dušín'\n })\n\n self.fields['email'].widget.attrs.update({\n 'label': 'E-mail',\n 'placeholder': 'mirek@rychlesipy.cz'\n })\n\n self.fields['age'].widget.attrs.update({'label': 'Věk'})\n self.fields['age'].initial = 18\n\n self.fields['race'].widget.attrs.update({'label': 'Rasa'})\n self.fields['race'].queryset = Race.objects.filter(\n active=True).only('id', 'name')\n\n self.fields['group'].widget.attrs.update({\n 'label': 'Skupina',\n 'placeholder': 'Rychlé Šípy'\n })\n\n for field in self.fields.keys():\n self.fields[field].widget.attrs.update({\n 'required': self.fields[field].required,\n 'title': '',\n 'class': 'form-control'\n })", "def __init__(self, *args, **kwargs):\n # defaults to \"text\" and \"identity\"\n \n self.text_name = kwargs.pop('text_name', 'text')\n self.identity_name = kwargs.pop('identity_name', 'identity')\n \n super(EnvayaSMSIncomingForm, self).__init__(*args, **kwargs)\n \n #The following two fields are non-mandatory because actions other than 'incoming' won't have them.\n self.fields[self.text_name] = forms.CharField(required=False)\n self.fields[self.identity_name] = forms.CharField(required=False)\n\n self.fields['phone_number'] = forms.CharField() #which envaya phone forwarded us the msg?\n self.fields['action'] = forms.CharField() #what is the action?", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'business_name': 'Please enter your business name',\n 'user_type': 'Please select the type of user',\n 'phone': 'Phone Number',\n 'postcode': 'Postcode',\n 'city': 'City',\n 'street_address': 'Street Address',\n 'street_address2': 'Street Address 2',\n 'county': 'County',\n 'country': 'Country'\n }\n\n # to force cursor to start in business name field\n self.fields['business_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = False\n self.fields[field].label = placeholder\n self.fields[field].widget.attrs['class'] = 'form-control'", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def _wrap_form(self, parent_form_class):\n steptitle = pd_mf(u'Add ${name}',\n mapping={'name': self.fti.Title()})\n\n form_class = self._create_form_class(parent_form_class, steptitle)\n\n form_class.__name__ = 'WizardForm: %s' % parent_form_class.__name__\n return form_class", "def add_fields(self, *fields: Field):\n self.fields.extend(fields)", "def listfield_formfield_method(self, **kwargs):\r\n #Largely lifted straight from Field.formfield() in django.models.__init__.py\r\n defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}\r\n if self.has_default(): #No idea what this does\r\n if callable(self.default):\r\n True\r\n defaults['initial'] = self.default\r\n defaults['show_hidden_initial'] = True\r\n else:\r\n defaults['initial'] = self.get_default()\r\n #if self.choices:\r\n if self.choices:\r\n form_field_class = forms.MultipleChoiceField\r\n defaults['choices'] = self.choices\r\n else:\r\n form_field_class = ListFormField\r\n defaults.update(**kwargs)\r\n return form_field_class(**defaults)", "def form(title, fields, methods, data, icon=None, id=None, **context):\n f = {}\n\n for field_id, _field in fields.items():\n f[field_id] = {\"value\": data.get(field_id, None)}\n f[field_id].update(_field)\n\n result = {\n \"class\": \"form\",\n \"title\": title,\n \"fields\": f,\n \"icon\": icon,\n \"methods\": {method_id: _method for method_id, _method in methods.items()},\n \"context\": context\n }\n\n if id:\n result[\"id\"] = id\n\n return result", "def decorator(field):\n field._jeeves_label_for = field_names\n return field", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'group_id',\n 'first_name': 'first_name',\n 'starter': 'starter',\n 'main': 'main',\n 'dessert': 'dessert',\n 'special_diet': 'special_diet',\n 'requirements': 'requirements',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'", "def register_form(self, fid, cls, *args, **keywords):\n keywords[\"name\"] = self.ftitle(keywords[\"name\"])\n self._forms.append((fid, cls, args, keywords, keywords.pop(\"title\"), keywords.pop(\"shortcut\"),\n \"on_load_{}\".format(cls.__name__.lower())))", "def show_fieldsetform_nrf(form):\n return {'form': form, 'required_fields': False}", "def __init__(self, *args, **kwargs):\n\n\t\tsuper(SchadensmeldungForm, self).__init__(*args, **kwargs)\n\n\t\tfor field in self.fields:\n\t\t\tself.fields[field].widget.attrs.update({\n\t\t\t\t'class': 'form-control'\n\t\t\t\t})", "def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"email\"].required = True", "def __init__(self, *args, **kwargs):\n super(MonkeyTyperForm, self).__init__(*args, **kwargs)\n self.fields['typedField'].widget = forms.Textarea(attrs={'cols': self.instance.dieImage.bitWidth+2,\n 'rows': self.instance.dieImage.bitHeight+2,\n 'style': 'font-family:monospace;'})", "def field_wrapper(field):\n return {'field': field}", "def get_minimal_add_form(self):\r\n\r\n # Get all the required fields and make a modelform for it.\r\n exclude_fields = []\r\n\r\n for field in self.model._meta.fields:\r\n if field.blank:\r\n exclude_fields.append(field.name)\r\n\r\n instance_form = modelform_factory(self.model, form=self.add_form,\r\n exclude=exclude_fields)\r\n\r\n return instance_form", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def introFieldWidgetFactory(field, request):\n return widget.FieldWidget(field, IntroWidget(request))", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super(UserAdmin, self).get_form(request, obj, **defaults)", "def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super(EventAdmin, self).get_form(request, obj, **defaults)", "def __init__(self, corp_app, field_objs, *args, **kwargs):\n self.corp_app = corp_app\n self.field_objs = field_objs\n super(CorpMembForm, self).__init__(*args, **kwargs)\n\n for field in field_objs:\n if field.field_type not in ['section_break', 'page_break']:\n if field.field_name:\n field_key = field.field_name\n else:\n field_key = \"field_%s\" % field.id\n\n # if field is display only, remove it from the form\n # for example, on the edit page, we\n # set corporate_membership_type\n # and payment_method as the display only fields\n if hasattr(field, 'display_only') and field.display_only:\n del self.fields[field_key]\n else:\n # get field class and set field initial\n self.fields[field_key] = field.get_field_class()\n if ((not field.field_name) \\\n or field.field_name == 'authorized_domains') \\\n and self.instance:\n initial = field.get_value(self.instance)\n if field.field_type in [\n 'MultipleChoiceField/django.forms.CheckboxSelectMultiple',\n 'MultipleChoiceField']:\n if initial:\n self.fields[field_key].initial = [\n item.strip() for item in initial.split(',')]\n else:\n self.fields[field_key].initial = initial\n\n #self.fields['captcha'] = CaptchaField(label=_('Type the code below'))", "def initialize(context):\n # register field classes\n FieldRegistry.registerField(StandardFields.StringField,\n 'www/StringField.gif')\n FieldRegistry.registerField(StandardFields.CheckBoxField,\n 'www/CheckBoxField.gif')\n FieldRegistry.registerField(StandardFields.IntegerField,\n 'www/IntegerField.gif')\n FieldRegistry.registerField(StandardFields.TextAreaField,\n 'www/TextAreaField.gif')\n FieldRegistry.registerField(StandardFields.RawTextAreaField,\n 'www/TextAreaField.gif')\n FieldRegistry.registerField(StandardFields.LinesField,\n 'www/LinesField.gif')\n FieldRegistry.registerField(StandardFields.ListField,\n 'www/ListField.gif')\n FieldRegistry.registerField(StandardFields.MultiListField,\n 'www/MultiListField.gif')\n FieldRegistry.registerField(StandardFields.RadioField,\n 'www/RadioField.gif')\n FieldRegistry.registerField(StandardFields.MultiCheckBoxField,\n 'www/MultiCheckBoxField.gif')\n FieldRegistry.registerField(StandardFields.PasswordField,\n 'www/PasswordField.gif')\n FieldRegistry.registerField(StandardFields.EmailField,\n 'www/EmailField.gif')\n FieldRegistry.registerField(StandardFields.PatternField,\n 'www/PatternField.gif')\n FieldRegistry.registerField(StandardFields.FloatField,\n 'www/FloatField.gif')\n FieldRegistry.registerField(StandardFields.DateTimeField,\n 'www/DateTimeField.gif')\n FieldRegistry.registerField(StandardFields.FileField,\n 'www/FileField.gif')\n FieldRegistry.registerField(StandardFields.LinkField,\n 'www/LinkField.gif')\n FieldRegistry.registerField(StandardFields.LabelField,\n 'www/LabelField.gif')\n\n # some helper fields\n FieldRegistry.registerField(HelperFields.ListTextAreaField)\n FieldRegistry.registerField(HelperFields.MethodField)\n FieldRegistry.registerField(HelperFields.TALESField)\n\n # some other fields\n FieldRegistry.registerField(\n EmailLinesField.EmailLinesField, 'www/EmailField.gif')\n FieldRegistry.registerField(\n InterfaceField.InterfaceField, 'www/BasicField.gif')\n\n # obsolete field (same as helper; useable but not addable)\n FieldRegistry.registerField(\n StandardFields.RangedIntegerField, 'www/RangedIntegerField.gif')\n\n # register the form itself\n context.registerClass(\n Form.ZMIForm,\n constructors=(Form.manage_addForm,\n Form.manage_add),\n icon='www/Form.gif')\n\n # make Dummy Fields into real fields\n FieldRegistry.initializeFields()\n\n # do initialization of Form class to make fields addable\n Form.initializeForm(FieldRegistry)", "def update_onlineformfield_from_form(dbo, username, post):\n formfieldid = post.integer(\"formfieldid\")\n sql = db.make_update_sql(\"onlineformfield\", \"ID=%d\" % formfieldid, ( \n ( \"FieldName\", post.db_string(\"fieldname\")),\n ( \"FieldType\", post.db_integer(\"fieldtype\")),\n ( \"Label\", post.db_string(\"label\")),\n ( \"DisplayIndex\", post.db_integer(\"displayindex\")),\n ( \"Mandatory\", post.db_boolean(\"mandatory\")),\n ( \"Lookups\", post.db_string(\"lookups\")),\n ( \"Tooltip\", post.db_string(\"tooltip\"))\n ))\n preaudit = db.query(dbo, \"SELECT * FROM onlineformfield WHERE ID = %d\" % formfieldid)\n db.execute(dbo, sql)\n postaudit = db.query(dbo, \"SELECT * FROM onlineformfield WHERE ID = %d\" % formfieldid)\n audit.edit(dbo, username, \"onlineformfield\", audit.map_diff(preaudit, postaudit))", "def __init__(self, *args, **kwargs):\n super(CreateForm, self).__init__(*args, **kwargs)\n\n # set form fields order\n self.fields.keyOrder = ['to_user', 'subject', 'message']", "def set_up_fields(self, fields):\n self.fields = {\n 'name': self.name,\n 'email': self.email\n }\n for key in fields.keys():\n # special keys first, not to be used in the template\n if key.upper() == 'CC':\n self.is_cc = fields[key]\n elif key.upper() == 'BCC':\n self.is_bcc = fields[key]\n else:\n self.fields[key] = fields[key]", "def show_sendform(form):\n return {'form': form, 'required_fields': True}", "def db_fields(self):", "def fields(self):\n ...", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def form_Password(request):\n schema = schemaish.Structure()\n schema.add('Password', schemaish.String())\n\n form = formish.Form(schema, 'form')\n form['Password'].widget = formish.Password()\n return form", "def IntroFieldWidget(field, request):\n return introFieldWidgetFactory(field, request)", "def __init__(self, handler=None, formdata=None, obj=None, prefix='', **kwargs):\n if handler:\n self._handler = handler\n super(Form, self).__init__(formdata=TornadoInputWrapper(self._handler), obj=obj, prefix=prefix, **kwargs)", "def add_field(request):\n try:\n bpo_topic = BPOTopic.objects.get(id=ObjectId(request.POST.get('topic_id')))\n except Exception as e:\n return HttpResponseBadRequest(e)\n else:\n input_type = request.POST.get('type')\n input_label = request.POST.get('label')\n if input_type == 'date_input':\n bpo_field = BPODateField(\n label=input_label,\n type=input_type,\n )\n elif input_type == 'number_input':\n bpo_field = BPOFloatField(\n label=input_label,\n type=input_type\n )\n elif input_type == 'auto_sum':\n bpo_field = BPOAutoSumField(\n label=input_label,\n type=input_type,\n sub_fields={}\n )\n elif input_type == 'multiple_choice':\n bpo_field = BPOMultipleChoiceField(\n label=input_label,\n type=input_type,\n choices=['New Choice 1', 'New Choice 2']\n )\n else:\n bpo_field = BPOField(\n label=input_label,\n type=input_type,\n )\n bpo_field.save()\n bpo_field_id = ObjectId(bpo_field.id)\n if not bpo_topic.fields:\n bpo_topic.fields = [bpo_field_id]\n else:\n bpo_topic.fields.append(bpo_field_id)\n bpo_topic.save()\n return HttpResponse(json.dumps(bpo_field.to_json), mimetype=\"application/x-javascript\")", "def formfield(self, form_class=HstoreCheckboxInput, **kwargs):\n defaults = {\n 'required': not self.blank,\n 'label': capfirst(self.verbose_name),\n 'help_text': self.help_text,\n 'html_attrs': self.html_attrs,\n }\n\n if self.has_default():\n defaults['initial'] = self.default\n\n if self.choices:\n # Fields with choices get special treatment.\n include_blank = (self.blank or\n not (self.has_default() or 'initial' in kwargs))\n defaults['choices'] = self.get_choices(include_blank=include_blank)\n defaults['coerce'] = self.to_python\n if self.null:\n defaults['empty_value'] = \"\"\n\n defaults.update(kwargs)\n formfield = form_class(**defaults)\n\n if self.html_attrs:\n formfield.widget.build_attrs(self.html_attrs)\n\n return formfield", "def get_connection_form_widgets() -> dict:\n from wtforms import StringField\n from flask_appbuilder.fieldwidgets import BS3TextFieldWidget\n\n return {\n \"extra__ewah_metabase__http_string\": StringField(\n \"Use http instead of https?\",\n widget=BS3TextFieldWidget(),\n )\n }", "def render_fields(form, args):\n output = \"\"\n fields = get_fields(form, args)\n for field in fields:\n output += render_field(field)\n return mark_safe(output)", "def get_form_fields(self, exclude_html=False):\n fields = []\n for field in self.form.get_prep_value():\n\n field_type = field['type']\n if exclude_html and field_type == consts.FIELD_TYPE_HTML:\n continue\n\n html_value = field['value'].get('html', None)\n if html_value:\n html_value = RichText(html_value)\n\n rule_action = ''\n rules = field['value'].get('rules', None)\n if rules:\n conditions = rules.get('conditions', None)\n if len(conditions):\n rule_action = rules.get('action')\n\n form_field = self.form_field(\n field_type=field['type'],\n label=field['value']['label'],\n required=field['value'].get('required', False),\n choices=field['value'].get('choices', None),\n help_text=field['value'].get('help_text', None),\n default_value=field['value'].get('default_value', None),\n empty_label=field['value'].get('empty_label', None),\n max_length=field['value'].get('max_length', None),\n display_side_by_side=field['value'].get('display_side_by_side', False),\n display_checkbox_label=field['value'].get('display_checkbox_label', False),\n html_value=html_value,\n rule_action=rule_action,\n )\n\n fields.append(form_field)\n\n return fields", "def __call__(self, func):\n # Set or extend the function's \"custom_fields\" attribute\n func.required_fields = getattr(func, \"required_fields\", {})\n func.required_fields[self.fieldname] = self.input_type\n # The decorated function is unchanged\n return func", "def render_field(field, form, form_style, context, template=None, labelclass=None, show_labels=None, layout_object=None, attrs=None, template_pack=TEMPLATE_PACK):\n if field is None:\n return ''\n with KeepContext(context):\n if hasattr(field, 'render'):\n return force_text(field.render(form, form_style, context, template_pack=template_pack))\n else:\n # In Python 2 form field names cannot contain unicode characters without ASCII mapping\n if PY2:\n # This allows fields to be unicode strings, always they don't use non ASCII\n try:\n if isinstance(field, text_type):\n field = field.encode('ascii').decode()\n # If `field` is not unicode then we turn it into a unicode string, otherwise doing\n # str(field) would give no error and the field would not be resolved, causing confusion\n else:\n field = text_type(field)\n\n except (UnicodeEncodeError, UnicodeDecodeError):\n raise Exception(\"Field '%s' is using forbidden unicode characters\" % field)\n\n # Injecting HTML attributes into field's widget, Django handles rendering these\n bound_field = get_bound_field(context, form, field)\n if not bound_field:\n return ''\n field_instance = bound_field.field\n if attrs is not None:\n widgets = getattr(field_instance.widget, 'widgets', [field_instance.widget])\n\n # We use attrs as a dictionary later, so here we make a copy\n list_attrs = attrs\n if isinstance(attrs, dict):\n list_attrs = [attrs] * len(widgets)\n\n for index, (widget, attr) in enumerate(zip(widgets, list_attrs)):\n if hasattr(field_instance.widget, 'widgets'):\n if 'type' in attr and attr['type'] == \"hidden\":\n field_instance.widget.widgets[index].is_hidden = True\n field_instance.widget.widgets[index] = field_instance.hidden_widget()\n\n field_instance.widget.widgets[index].attrs.update(attr)\n else:\n if 'type' in attr and attr['type'] == \"hidden\":\n field_instance.widget.is_hidden = True\n field_instance.widget = field_instance.hidden_widget()\n\n field_instance.widget.attrs.update(attr)\n\n if hasattr(form, 'rendered_fields'):\n if field not in form.rendered_fields:\n form.rendered_fields.add(field)\n else:\n if not FAIL_SILENTLY:\n raise Exception(\"A field should only be rendered once: %s\" % field)\n else:\n logging.warning(\"A field should only be rendered once: %s\" % field, exc_info=sys.exc_info())\n\n if field_instance is None:\n html = ''\n else:\n if template is None:\n if form.crispy_field_template is None:\n template = default_field_template(template_pack)\n else: # FormHelper.field_template set\n template = get_template(form.crispy_field_template)\n else:\n template = get_template(template)\n\n # We save the Layout object's bound fields in the layout object's `bound_fields` list\n if layout_object is not None:\n if hasattr(layout_object, 'bound_fields') and isinstance(layout_object.bound_fields, list):\n layout_object.bound_fields.append(bound_field)\n else:\n layout_object.bound_fields = [bound_field]\n\n context.update({\n 'field': bound_field,\n 'labelclass': labelclass,\n 'flat_attrs': flatatt(attrs if isinstance(attrs, dict) else {}),\n })\n if show_labels is not None:\n context['form_show_labels'] = show_labels\n html = template.render(context)\n\n return force_text(html)", "def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget", "def render_field(field, form, form_style, context, template, labelclass=None, layout_object=None, attrs=None):\n FAIL_SILENTLY = False #getattr(settings, 'CRISPY_FAIL_SILENTLY', True)\n\n if not hasattr(form, 'rendered_fields'):\n form.rendered_fields = set()\n\n if hasattr(field, 'render'):\n return field.render(form, form_style, context)\n else:\n # This allows fields to be unicode strings, always they don't use non ASCII\n try:\n if isinstance(field, basestr):\n field = str(field)\n # If `field` is not unicode then we turn it into a unicode string, otherwise doing\n # str(field) would give no error and the field would not be resolved, causing confusion \n else:\n field = str(field)\n \n except (UnicodeEncodeError, UnicodeDecodeError):\n raise Exception(\"Field '%s' is using forbidden unicode characters\" % field)\n\n try:\n # Injecting HTML attributes into field's widget, Django handles rendering these\n field_instance = form.fields[field]\n if attrs is not None:\n field_instance.widget.attrs.update(attrs)\n except KeyError:\n if not FAIL_SILENTLY:\n raise Exception(\"Could not resolve form field '%s'.\" % field)\n else:\n field_instance = None\n logging.warning(\"Could not resolve form field '%s'.\" % field, exc_info=sys.exc_info())\n \n if not field in form.rendered_fields:\n form.rendered_fields.add(field)\n else:\n if not FAIL_SILENTLY:\n raise Exception(\"A field should only be rendered once: %s\" % field)\n else:\n logging.warning(\"A field should only be rendered once: %s\" % field, exc_info=sys.exc_info())\n\n if field_instance is None:\n html = ''\n else:\n bound_field = BoundField(form, field_instance, field)\n\n template = get_template(template)\n\n # We save the Layout object's bound fields in the layout object's `bound_fields` list\n if layout_object is not None:\n layout_object.bound_fields.append(bound_field)\n\n context.update({'field': bound_field, 'labelclass': labelclass, 'flat_attrs': flatatt(attrs or {})})\n html = template.render(context)\n\n return html", "def fl_addto_formbrowser(ptr_flobject, ptr_flform):\n _fl_addto_formbrowser = library.cfuncproto(\n library.load_so_libforms(), \"fl_addto_formbrowser\",\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT),\n cty.POINTER(xfdata.FL_FORM)],\n \"\"\"int fl_addto_formbrowser(FL_OBJECT * ob, FL_FORM * form)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flobject, ptr_flform)\n retval = _fl_addto_formbrowser(ptr_flobject, ptr_flform)\n return retval" ]
[ "0.698078", "0.6600586", "0.659325", "0.6585638", "0.6533927", "0.6412825", "0.6412825", "0.6412825", "0.629907", "0.62634355", "0.62466544", "0.6142649", "0.6102095", "0.60763484", "0.60400486", "0.5991732", "0.5991732", "0.5946256", "0.5897828", "0.5873216", "0.58706456", "0.5866613", "0.5850554", "0.5831659", "0.5808978", "0.5762365", "0.57622373", "0.5726104", "0.5725978", "0.5718044", "0.57108295", "0.56972754", "0.5688911", "0.5668392", "0.5658076", "0.56542546", "0.56346065", "0.56242305", "0.5609186", "0.5602134", "0.5594882", "0.5575164", "0.5573675", "0.55665445", "0.55410516", "0.5536648", "0.55358905", "0.5523766", "0.5520959", "0.55007774", "0.5499924", "0.54849637", "0.5472186", "0.54675007", "0.5450101", "0.54463077", "0.5442748", "0.5442057", "0.5437748", "0.5424948", "0.5422803", "0.54131037", "0.54105693", "0.5402336", "0.54020375", "0.53964126", "0.5395508", "0.5385171", "0.53761935", "0.53752047", "0.53676724", "0.5358742", "0.53575504", "0.535594", "0.53473794", "0.5345711", "0.5344555", "0.53418046", "0.53328466", "0.53297263", "0.532149", "0.5311036", "0.53108996", "0.53101283", "0.5300348", "0.5286924", "0.5281398", "0.5280828", "0.5280501", "0.52733445", "0.5270313", "0.5264804", "0.5264519", "0.5263771", "0.52633864", "0.5253768", "0.52519935", "0.5244578", "0.5239548", "0.523067" ]
0.6660687
1
Build the appropriate field so we can render it via the extra property. Add field_type so Jinja knows how to render it.
def build_registration_code_field(form_cls): if Configs.registration_code: field = getattr(form_cls, "registration_code") # noqa B009 field.field_type = "text" return [field] else: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_field(field, bulk_nullable=False, label=None):\n return {\n 'field': field,\n 'label': label or field.label,\n 'bulk_nullable': bulk_nullable,\n }", "def field_wrapper(field):\n return {'field': field}", "def build_standard_field(self, field_name, model_field_type):\n field_mapping = self.serializer_field_mapping\n field_class = field_mapping[model_field_type]\n field_kwargs = get_field_kwargs(field_name, model_field_type)\n\n if \"choices\" in field_kwargs:\n # Fields with choices get coerced into `ChoiceField`\n # instead of using their regular typed field.\n field_class = self.serializer_choice_field\n # Some model fields may introduce kwargs that would not be valid\n # for the choice field. We need to strip these out.\n # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)\n valid_kwargs = {\n \"read_only\",\n \"write_only\",\n \"required\",\n \"default\",\n \"initial\",\n \"source\",\n \"label\",\n \"help_text\",\n \"style\",\n \"error_messages\",\n \"validators\",\n \"allow_null\",\n \"allow_blank\",\n \"choices\",\n }\n for key in list(field_kwargs):\n if key not in valid_kwargs:\n field_kwargs.pop(key)\n\n if not issubclass(field_class, fields.CharField) and not issubclass(\n field_class, fields.ChoiceField\n ):\n # `allow_blank` is only valid for textual fields.\n field_kwargs.pop(\"allow_blank\", None)\n\n return field_class, field_kwargs", "def field(self, fields, field):\n\n values = self.values(fields)\n\n if not self.satisfied(fields, field, values):\n if field[\"name\"] in fields.values:\n del fields.values[field[\"name\"]]\n return\n\n default = field.get(\"default\")\n\n if isinstance(default, str):\n default = self.env.from_string(field[\"default\"]).render(**values)\n\n fields.append({**field, \"default\": default})", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def boots_field(field):\n\n field.field.widget.attrs['placeholder'] = field.label\n\n if type(field.field) in BOOTSTRAP_TEMPLATE_SWITCH:\n t = template.loader.get_template(BOOTSTRAP_TEMPLATE_SWITCH[type(field.field)])\n else:\n t = template.loader.get_template(\"bootstrap_tags/form_field.html\")\n\n return t.render(template.Context({\"field\": field}))", "def add_field(self, field, field_data):\n self.extra_fields[field] = field_data", "def make_fields(self):\n for name, prop in self.edit:\n instance_value = self.model.get(name)\n post_value = self.data[name] if (self.data and self.data.has_key(name)) else instance_value\n form_field_class = self.get_field_type(prop)\n form_field = form_field_class(model=self.model, property=prop, name=name, instance_value=instance_value, post_value=post_value)\n self.add(form_field)", "def gen_config_field(name_of_field, name_of_type, the_type):\n return _gen_basic_field(name_of_field, name_of_type, the_type)", "def create_field(dj_field, **kwargs):\n if isinstance(dj_field, dj_models.OneToOneField):\n return field.OneToOne.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ForeignKey):\n return field.ForeignKey.from_dj_field(dj_field, **kwargs)\n elif isinstance(dj_field, dj_models.ManyToManyField):\n return field.ManyToMany.from_dj_field(dj_field, **kwargs)\n else:\n return field.Field.from_dj_field(dj_field, **kwargs)", "def render_field(field, template=None):\n classes = ['field', field.field.__class__.__name__]\n if field.errors:\n classes.append('with_errors')\n if field.field.required:\n classes.append('required')\n \n if field.help_text:\n help_text_html = u'<p class=\"help\">%s</p>' % field.help_text\n else:\n help_text_html = u''\n \n field_templates = [\n 'forms/%s.html' % field.field.__class__.__name__,\n 'forms/field.html',\n ]\n if template:\n if '.' in template:\n field_templates.insert(0, template)\n else:\n field_templates.insert(0, 'forms/%s_field.html' % template)\n data = {\n 'classes': \" \".join(classes),\n 'label': field.label_tag(),\n 'errors': field.errors,\n 'field': field,\n 'help_text': help_text_html,\n 'field_id': '#TODO'\n }\n rendered_field = render_to_string(field_templates, data)\n return mark_safe(rendered_field)", "def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field", "def build_custom_user_fields(\n form_cls,\n include_entries=False,\n fields_kwargs=None,\n field_entries_kwargs=None,\n blacklisted_items=(),\n):\n if fields_kwargs is None:\n fields_kwargs = {}\n if field_entries_kwargs is None:\n field_entries_kwargs = {}\n\n fields = []\n new_fields = UserFields.query.filter_by(**fields_kwargs).all()\n user_fields = {}\n\n # Only include preexisting values if asked\n if include_entries is True:\n for f in UserFieldEntries.query.filter_by(**field_entries_kwargs).all():\n user_fields[f.field_id] = f.value\n\n for field in new_fields:\n if field.name.lower() in blacklisted_items:\n continue\n\n form_field = getattr(form_cls, f\"fields[{field.id}]\")\n\n # Add the field_type to the field so we know how to render it\n form_field.field_type = field.field_type\n\n # Only include preexisting values if asked\n if include_entries is True:\n initial = user_fields.get(field.id, \"\")\n form_field.data = initial\n if form_field.render_kw:\n form_field.render_kw[\"data-initial\"] = initial\n else:\n form_field.render_kw = {\"data-initial\": initial}\n\n fields.append(form_field)\n return fields", "def get_field(self, field):\n return self.extra_fields[field]", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def build_string_field(self, field_name: str, field: dict):\n\n field_layout = [sg.Text(self.build_label_text(field_name, field), size=(15, 1)),\n sg.InputText(field.get(\"default\"), key=field_name)]\n\n return field_layout", "def add(self, field_create_information):\n field = Field.create_field_from_type(self.context, field_create_information)\n self.add_child(field)\n qry = CreateEntityQuery(self, field, field)\n self.context.add_query(qry)\n return field", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def _gen_basic_field(name_of_field, name_of_type, the_type):\n def validate(self, x):\n return None if x is None else the_type(x)\n\n doc = \"A field which can be {name_of_type} or None\".format(name_of_type=name_of_type)\n\n return Field(name_of_field, (), {'validate': validate, '__doc__': doc})", "def __init__(self, field):\n super().__init__()\n self.field = str(field)", "def output_field(self):\n Field = self.original_field.__class__\n if isinstance(self.original_field, fields.CharField):\n return Field(max_length=self.original_field.max_length)\n\n return Field()", "def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID", "def add_field(self, **kwargs):\n field = {\n 'name': kwargs.get('name'),\n 'value': kwargs.get('value'),\n 'inline': kwargs.get('inline', False)\n }\n\n self.fields.append(field)", "def create_field(self, label, value_type, key=None):\n payload = self._build_params(label=label, value_type=value_type, key=key)\n return Field.deserialize(self._post('fields', None, payload))", "def _create_fields(property_, alias_dictionary):\n fields = []\n # Only generate properties that have a field template\n if property_['field_template'] is not None:\n # If the property is independent, add the single-bit sized isInherited flag\n # to the list of Fields as well.\n if property_['independent']:\n fields.append(_create_inherited_flag_field(property_))\n\n fields.append(_create_property_field(property_, alias_dictionary))\n\n return fields", "def _generate(self, custom_data: typing.Dict) -> typing.Dict:\n info = {}\n for field in self.fields:\n if field.name in custom_data:\n info[field.name] = custom_data[field.name]\n else:\n info[field.name] = field.generate(info)\n\n return info", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def render_field(field, form, form_style, context, template, labelclass=None, layout_object=None, attrs=None):\n FAIL_SILENTLY = False #getattr(settings, 'CRISPY_FAIL_SILENTLY', True)\n\n if not hasattr(form, 'rendered_fields'):\n form.rendered_fields = set()\n\n if hasattr(field, 'render'):\n return field.render(form, form_style, context)\n else:\n # This allows fields to be unicode strings, always they don't use non ASCII\n try:\n if isinstance(field, basestr):\n field = str(field)\n # If `field` is not unicode then we turn it into a unicode string, otherwise doing\n # str(field) would give no error and the field would not be resolved, causing confusion \n else:\n field = str(field)\n \n except (UnicodeEncodeError, UnicodeDecodeError):\n raise Exception(\"Field '%s' is using forbidden unicode characters\" % field)\n\n try:\n # Injecting HTML attributes into field's widget, Django handles rendering these\n field_instance = form.fields[field]\n if attrs is not None:\n field_instance.widget.attrs.update(attrs)\n except KeyError:\n if not FAIL_SILENTLY:\n raise Exception(\"Could not resolve form field '%s'.\" % field)\n else:\n field_instance = None\n logging.warning(\"Could not resolve form field '%s'.\" % field, exc_info=sys.exc_info())\n \n if not field in form.rendered_fields:\n form.rendered_fields.add(field)\n else:\n if not FAIL_SILENTLY:\n raise Exception(\"A field should only be rendered once: %s\" % field)\n else:\n logging.warning(\"A field should only be rendered once: %s\" % field, exc_info=sys.exc_info())\n\n if field_instance is None:\n html = ''\n else:\n bound_field = BoundField(form, field_instance, field)\n\n template = get_template(template)\n\n # We save the Layout object's bound fields in the layout object's `bound_fields` list\n if layout_object is not None:\n layout_object.bound_fields.append(bound_field)\n\n context.update({'field': bound_field, 'labelclass': labelclass, 'flat_attrs': flatatt(attrs or {})})\n html = template.render(context)\n\n return html", "def _make_field(index, field_desc, names):\n field_schema = schema_from_json_data(\n json_data=field_desc['type'],\n names=names,\n )\n other_props = (\n dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))\n return Field(\n data_type=field_schema,\n name=field_desc['name'],\n index=index,\n has_default=('default' in field_desc),\n default=field_desc.get('default', _NO_DEFAULT),\n order=field_desc.get('order', None),\n doc=field_desc.get('doc', None),\n other_props=other_props,\n )", "def PopulateCommonFieldValues(self, field, mojom_field):\n field.name = mojom_field.decl_data.short_name\n field.kind = self.KindFromMojom(mojom_field.type)\n field.attributes = self.AttributesFromMojom(mojom_field)", "def _new_field(self):\n field = self.domain.new_field()\n return field", "def add_deal_field(embed_dict, info_dict, field_key, heading):\n if field_key in info_dict:\n field_dict = info_dict[field_key]\n field = {\n 'name': heading,\n 'value': \"{value} ({percent} off) on {store}\".format(\n value=field_dict.get('value', '??'),\n percent=field_dict.get('percent', '0%'), # TODO don't default to 0% off\n store=field_dict.get('store', '_unknown_')\n ),\n }\n embed_dict['fields'].append(field)", "def create_field(self, field, dim_translation=None):\n raise NotImplementedError", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def makeField(self,field_name,field_type,field_precision,field_scale,field_length):\n \n new_field = self.GP.CreateObject(\"field\")\n new_field.Name = field_name\n new_field.Type = field_type\n new_field.Precision = field_precision\n new_field.Scale = field_scale\n new_field.Length = field_length\n new_field.IsNullable = True\n \n return new_field", "def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget", "def render_field(field, form, form_style, context, template=None, labelclass=None, show_labels=None, layout_object=None, attrs=None, template_pack=TEMPLATE_PACK):\n if field is None:\n return ''\n with KeepContext(context):\n if hasattr(field, 'render'):\n return force_text(field.render(form, form_style, context, template_pack=template_pack))\n else:\n # In Python 2 form field names cannot contain unicode characters without ASCII mapping\n if PY2:\n # This allows fields to be unicode strings, always they don't use non ASCII\n try:\n if isinstance(field, text_type):\n field = field.encode('ascii').decode()\n # If `field` is not unicode then we turn it into a unicode string, otherwise doing\n # str(field) would give no error and the field would not be resolved, causing confusion\n else:\n field = text_type(field)\n\n except (UnicodeEncodeError, UnicodeDecodeError):\n raise Exception(\"Field '%s' is using forbidden unicode characters\" % field)\n\n # Injecting HTML attributes into field's widget, Django handles rendering these\n bound_field = get_bound_field(context, form, field)\n if not bound_field:\n return ''\n field_instance = bound_field.field\n if attrs is not None:\n widgets = getattr(field_instance.widget, 'widgets', [field_instance.widget])\n\n # We use attrs as a dictionary later, so here we make a copy\n list_attrs = attrs\n if isinstance(attrs, dict):\n list_attrs = [attrs] * len(widgets)\n\n for index, (widget, attr) in enumerate(zip(widgets, list_attrs)):\n if hasattr(field_instance.widget, 'widgets'):\n if 'type' in attr and attr['type'] == \"hidden\":\n field_instance.widget.widgets[index].is_hidden = True\n field_instance.widget.widgets[index] = field_instance.hidden_widget()\n\n field_instance.widget.widgets[index].attrs.update(attr)\n else:\n if 'type' in attr and attr['type'] == \"hidden\":\n field_instance.widget.is_hidden = True\n field_instance.widget = field_instance.hidden_widget()\n\n field_instance.widget.attrs.update(attr)\n\n if hasattr(form, 'rendered_fields'):\n if field not in form.rendered_fields:\n form.rendered_fields.add(field)\n else:\n if not FAIL_SILENTLY:\n raise Exception(\"A field should only be rendered once: %s\" % field)\n else:\n logging.warning(\"A field should only be rendered once: %s\" % field, exc_info=sys.exc_info())\n\n if field_instance is None:\n html = ''\n else:\n if template is None:\n if form.crispy_field_template is None:\n template = default_field_template(template_pack)\n else: # FormHelper.field_template set\n template = get_template(form.crispy_field_template)\n else:\n template = get_template(template)\n\n # We save the Layout object's bound fields in the layout object's `bound_fields` list\n if layout_object is not None:\n if hasattr(layout_object, 'bound_fields') and isinstance(layout_object.bound_fields, list):\n layout_object.bound_fields.append(bound_field)\n else:\n layout_object.bound_fields = [bound_field]\n\n context.update({\n 'field': bound_field,\n 'labelclass': labelclass,\n 'flat_attrs': flatatt(attrs if isinstance(attrs, dict) else {}),\n })\n if show_labels is not None:\n context['form_show_labels'] = show_labels\n html = template.render(context)\n\n return force_text(html)", "def field_type(self):\n return \"\"", "def get_field_info(self, field):\n field_info = super(SimpleMetadata, self).get_field_info(field)\n if self.should_detail_choices(field, field_info):\n field_info['choices'] = [\n {\n 'value': choice_value,\n 'display_name': force_text(choice_name, strings_only=True)\n }\n for choice_value, choice_name in field.choices.items()\n ]\n\n return field_info", "def build_date_field(self, field_name: str, field: dict):\n now = (datetime.datetime.now()).strftime(\n self.config.get(\"date_format\"))\n field_layout = [sg.Text(self.build_label_text(field_name, field), size=(15, 1)),\n sg.InputText(now, key=field_name,\n enable_events=False, visible=True),\n sg.CalendarButton('Calendar', target=field_name,\n key='CALENDAR', format=(self.config.get(\"date_format\")))]\n\n return field_layout", "def get_field(self, bib_entry, field):\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)", "def getFieldValue(self, field, no_html=False, external_id=True, depth=1, optimize=False):\n if field[\"type\"] == \"category\":\n if field[\"config\"][\"settings\"][\"multiple\"]:\n values = []\n for category in field[\"values\"]:\n values.append(category[\"value\"][\"text\"])\n return values\n else:\n return field[\"values\"][0][\"value\"][\"text\"]\n elif field[\"type\"] == \"image\":\n values = []\n for image in field['values']:\n values.append([image[\"value\"][\"mimetype\"], image[\"value\"][\"file_id\"]])\n return values\n elif field[\"type\"] == \"date\":\n return field[\"values\"][0]\n elif field[\"type\"] == \"app\":\n itemID = field[\"values\"][0][\"value\"][\"item_id\"]\n appID = field[\"values\"][0][\"value\"][\"app\"][\"app_id\"]\n if depth<=0:\n return itemID\n else:\n if optimize:#Si es necesario optimizar la carga del item\n try: #Intenta buscar la lista de items como un atributo en self\n items = getattr(self, str(appID))\n except AttributeError:\n #Como no los encontró, crea una nueva PodioAPI con la appID de destino y le pide los items\n if self.client:\n nested_api = self\n else:\n try:\n nested_api = self.__class__(appID)\n except: #TODO: Especificar la excepcion que es de tipo \"DoesNotExist\"\n raise Exception(\"Hubo un error creando el nuevo objeto 'PodioApi' para el item relacionado con app_id %s. Por favor agregar el app_id y el app_token de esa aplicacion a la base de datos\" % appID)\n items = nested_api.get_filtered_items(None, depth=depth-1)\n #Luego crea el atributo para que esta llamada no se repita\n setattr(self, str(appID), items)\n #Ya teniendo a todos los items, busca entre la lista aquel cuya ID es igual al item ID de la referencia, y lo pone como valor del campo.\n item = None\n for i in items:\n if i[\"item\"] == int(itemID):\n item = i\n\n else:\n data = self._client.Item.find(int(itemID))\n if not external_id:\n item = self.make_dict(data, external_id=external_id, depth=depth-1)\n else:\n item = self.makeDict(data, nested=True)\n return item\n elif field[\"type\"] == \"text\":\n text = field[\"values\"][0][\"value\"]\n if no_html and field[\"config\"][\"settings\"][\"format\"] == 'html':\n print (text.encode('utf-8'))\n html_text = BeautifulSoup(text, \"html5lib\")\n for p_tag in html_text.find_all('p'):\n p_tag.unwrap()\n for br_tag in html_text.find_all('br'):\n br_tag.name=\"text:line-break\"\n html_text.find('html').unwrap()\n html_text.find('head').unwrap()\n html_text.find('body').unwrap()\n text = unicode(html_text)\n #text = strip_tags(text)\n return text\n elif field[\"type\"] == \"embed\":\n return field[\"values\"][0][\"embed\"][\"url\"]\n else:\n #print field[\"type\"]\n return field[\"values\"][0][\"value\"]", "def _add_type_specific_repr_fields(self, repr_parts):", "def _generate_form_fields(self):\n params = list(filter(lambda x: (x.precedence is None or x.precedence >= 0) and not x.constant,\n self.param.params().values()))\n for p in sorted(params, key=lambda p: p.precedence or 9999):\n # TODO: Pass p.__dict__ as second argument instead of arbitrary\n p_name = p.name\n\n # Preserve param tuple type.\n if self.data:\n if isinstance(getattr(self.param, p.name), tuple):\n p.default = tuple(self.data.getlist(p.name))\n\n # Preserve initial options for Selector\n if isinstance(self.param.params()[p_name], (param.FileSelector, param.MultiFileSelector)):\n p.default = \"\"\n\n self.fields[p_name] = self.widget_map[type(p)](self.param, p, p.name)\n self.fields[p_name].label = p.name.replace(\"_\", \" \").title()\n if self.read_only is None:\n widget_attribute = {'class': 'form-control'}\n else:\n # TODO: Should this be readonly instead of disable?\n widget_attribute = {'class': 'form-control', 'disabled': self.read_only}\n self.fields[p_name].widget.attrs.update(widget_attribute)\n self.fields[p_name].required = not self.param.params()[p_name].allow_None\n self.fields[p_name].disabled = self.param.params()[p_name].constant\n self.fields[p_name].help_text = self.param.params()[p_name].doc\n # self.fields = self.base_fields", "def get_expected_format(self, setup):\n form = setup.pop('form', self.form)\n as_type = setup['as_type']\n setup.update(attrs='')\n alt_field_info = {}\n if issubclass(self.form_class, FormOverrideMixIn):\n size_default = form.get_overrides().get('_default_', {}).get('size', None)\n override_attrs = '' if not size_default else f'size=\"{size_default}\" '\n setup.update(attrs=override_attrs)\n alt_field_info = self.form.get_alt_field_info()\n if issubclass(self.form_class, FocusMixIn): # has method: assign_focus_field\n focused = getattr(self.form, 'given_focus', None) or getattr(self.form, 'named_focus', None)\n if not focused:\n ls = [name for name, field in self.form.fields.items()\n if not field.disabled and not isinstance(field.widget, (HiddenInput, MultipleHiddenInput))]\n focused = ls[0] if ls else None\n if focused: # Using alt_field_info to track assigning focus here, but 'autofocus' is not a field property.\n alt_field_info[focused] = alt_field_info.get(focused, {})\n alt_field_info[focused].update({'autofocus': True})\n field_formats = FIELD_FORMATS.copy()\n if issubclass(self.form_class, ComputedUsernameMixIn):\n name_for_email = form.name_for_email or form._meta.model.get_email_field() or 'email'\n name_for_user = form.name_for_user or form._meta.model.USERNAME_FIELD or 'username'\n if 'email' in field_formats:\n field_formats[name_for_email] = field_formats.pop('email')\n if 'username' in field_formats:\n field_formats[name_for_user] = field_formats.pop('username')\n order = ['first_name', 'last_name', name_for_email, name_for_user, 'password1', 'password2', ]\n form.order_fields(order)\n form_list, hidden_list = [], []\n top_errors = form.non_field_errors().copy() # If data not submitted, this will trigger full_clean method.\n if issubclass(self.form_class, FormFieldsetMixIn):\n setup['error_kwargs'] = self.make_error_kwargs(setup)\n if top_errors:\n html_args = setup['error_kwargs']['html_args']\n col_attr = ' id=\"top_errors\"'\n row_attr = ''\n data = ' '.join(top_errors)\n form_col_count = setup['error_kwargs']['form_col_count']\n error_row = form.make_headless_row(html_args, data, form_col_count, col_attr, row_attr)\n form_list.append(error_row)\n elif top_errors:\n error_row = self.error_format(as_type, top_errors, **setup.get('error_kwargs', {}))\n form_list.append(error_row)\n\n for name, field in form.fields.items():\n if isinstance(field.widget, (HiddenInput, MultipleHiddenInput, )):\n hide_re = DEFAULT_RE.copy()\n hide_re.update({'name': name, 'input_type': 'hidden', 'end_tag': ''})\n hide_re['attrs'] = f'value=\"{field.initial}\" '\n txt = BASE_INPUT % hide_re\n hidden_list.append(txt) # TODO: Account for hidden field errors being added to top errors.\n continue\n cur_replace = DEFAULT_RE.copy()\n cur_replace.update({'name': name, 'pretty': field.label or pretty_name(name)})\n cur_replace['required'] = REQUIRED if field.required else ''\n if field.disabled:\n cur_replace['required'] += 'disabled '\n cur_replace['attrs'] = self.get_format_attrs(name, field, alt_field_info)\n if isinstance(field, EmailField) and name not in field_formats:\n cur_replace['input_type'] = 'email'\n elif isinstance(field.widget, Textarea):\n cur_replace['initial'] = getattr(field, 'initial', None) or ''\n attrs = ''\n cols = field.widget.attrs.get('cols', None)\n rows = field.widget.attrs.get('rows', None)\n if cols:\n attrs += f'cols=\"{cols}\" '\n if rows:\n attrs += f'rows=\"{rows}\" '\n cur_replace['attrs'] = attrs\n field_formats[name] = AREA_TXT\n elif isinstance(field.widget, (CheckboxSelectMultiple, RadioSelect)):\n input_type = 'radio' if isinstance(field.widget, RadioSelect) else 'checkbox'\n required = REQUIRED if field.required else ''\n if isinstance(field.widget, CheckboxSelectMultiple):\n required = ''\n options_re = {'name': name, 'required': required, 'input_type': input_type}\n option_list = []\n for num, each in enumerate(field.choices):\n val, display = each\n opt_replace = options_re.copy()\n opt_replace.update({'num': str(num), 'val': str(val), 'display_choice': str(display)})\n option = OTHER_OPTION_TXT % opt_replace\n option_list.append(option)\n cur_replace['options'] = ''.join(option_list)\n field_formats[name] = RADIO_TXT if isinstance(field.widget, RadioSelect) else CHECK_TXT\n elif isinstance(field, BooleanField) or isinstance(field.widget, CheckboxInput):\n cur_replace['input_type'] = 'checkbox'\n cur_replace['attrs'] = ''\n if field.initial or form.data.get(get_html_name(form, name), None):\n cur_replace['last'] = ' checked'\n elif isinstance(field.widget, (Select, SelectMultiple)):\n option_list = []\n for num, each in enumerate(field.choices):\n val, display = each\n option = OPTION_TXT % {'val': str(val), 'display_choice': str(display)}\n option_list.append(option)\n cur_replace['options'] = ''.join(option_list)\n cur_replace['multiple'] = MULTIPLE\n if not isinstance(field.widget, SelectMultiple):\n cur_replace['multiple'] = ''\n cur_replace['required'] = ''\n field_formats[name] = SELECT_TXT\n field_error = form.errors.get(name, None)\n if field_error:\n error_string = self.error_format(as_type, field_error, **setup.get('error_kwargs', {}))\n if as_type == 'as_table':\n cur_replace['label_end'] += error_string\n elif as_type in ('as_ul', 'as_fieldset'):\n cur_replace['start_tag'] += error_string\n elif as_type == 'as_p':\n cur_replace['start_tag'] = error_string + cur_replace['start_tag']\n else:\n cur_replace['error'] = error_string\n txt = field_formats.get(name, DEFAULT_TXT) % cur_replace\n form_list.append(txt)\n str_hidden = ''.join(hidden_list)\n if len(form_list) > 0:\n last_row = form_list[-1]\n default_re = DEFAULT_RE.copy()\n default_re.update({'attrs': '%(attrs)s', 'end_tag': str_hidden + '%(end_tag)s'})\n form_list[-1] = last_row % default_re\n else:\n form_list.append(str_hidden)\n expected = ''.join(form_list) % setup\n return expected.strip()", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def editable_metadata_fields(self):\r\n def jsonify_value(field, json_choice):\r\n if isinstance(json_choice, dict):\r\n json_choice = dict(json_choice) # make a copy so below doesn't change the original\r\n if 'display_name' in json_choice:\r\n json_choice['display_name'] = get_text(json_choice['display_name'])\r\n if 'value' in json_choice:\r\n json_choice['value'] = field.to_json(json_choice['value'])\r\n else:\r\n json_choice = field.to_json(json_choice)\r\n return json_choice\r\n\r\n def get_text(value):\r\n \"\"\"Localize a text value that might be None.\"\"\"\r\n if value is None:\r\n return None\r\n else:\r\n return self.runtime.service(self, \"i18n\").ugettext(value)\r\n\r\n metadata_fields = {}\r\n\r\n # Only use the fields from this class, not mixins\r\n fields = getattr(self, 'unmixed_class', self.__class__).fields\r\n\r\n for field in fields.values():\r\n\r\n if field.scope != Scope.settings or field in self.non_editable_metadata_fields:\r\n continue\r\n\r\n # gets the 'default_value' and 'explicitly_set' attrs\r\n metadata_fields[field.name] = self.runtime.get_field_provenance(self, field)\r\n metadata_fields[field.name]['field_name'] = field.name\r\n metadata_fields[field.name]['display_name'] = get_text(field.display_name)\r\n metadata_fields[field.name]['help'] = get_text(field.help)\r\n metadata_fields[field.name]['value'] = field.read_json(self)\r\n\r\n # We support the following editors:\r\n # 1. A select editor for fields with a list of possible values (includes Booleans).\r\n # 2. Number editors for integers and floats.\r\n # 3. A generic string editor for anything else (editing JSON representation of the value).\r\n editor_type = \"Generic\"\r\n values = field.values\r\n if isinstance(values, (tuple, list)) and len(values) > 0:\r\n editor_type = \"Select\"\r\n values = [jsonify_value(field, json_choice) for json_choice in values]\r\n elif isinstance(field, Integer):\r\n editor_type = \"Integer\"\r\n elif isinstance(field, Float):\r\n editor_type = \"Float\"\r\n elif isinstance(field, List):\r\n editor_type = \"List\"\r\n elif isinstance(field, Dict):\r\n editor_type = \"Dict\"\r\n elif isinstance(field, RelativeTime):\r\n editor_type = \"RelativeTime\"\r\n metadata_fields[field.name]['type'] = editor_type\r\n metadata_fields[field.name]['options'] = [] if values is None else values\r\n\r\n return metadata_fields", "def prep_field(request, obj, field, manyToManySep=';'):\n if '__' in field:\n bits = field.split('__')\n field = bits.pop()\n\n for bit in bits:\n obj = getattr(obj, bit, None)\n\n if obj is None:\n return \"\"\n\n attr = getattr(obj, field)\n\n if isinstance(attr, (FieldFile,) ):\n attr = request.build_absolute_uri(attr.url)\n\n output = attr() if callable(attr) else attr\n\n if isinstance(output, (list, tuple, QuerySet)):\n output = manyToManySep.join([str(item) for item in output])\n\n return str(output) if output else \"\"", "def field(base : SetupVal, field_name : str) -> SetupVal:\n if not isinstance(base, SetupVal):\n raise ValueError('field expected a SetupVal, but got {base!r}')\n if not isinstance(field_name, str):\n raise ValueError('field expected a str, but got {field_name!r}')\n return FieldVal(base, field_name)", "def handle_field(self, obj, field):\n self.indent(3)\n internal_type = field.get_internal_type()\n attrs = {\n \"id\": field.name,\n \"resname\": field.name,\n \"restype\": \"x-%s\" % internal_type,\n \"translate\": \"no\",\n }\n if internal_type in (\"CharField\", \"TextField\"):\n attrs[\"translate\"] = \"yes\"\n\n if internal_type == \"CharField\":\n attrs[\"size-unit\"] = \"char\"\n attrs[\"maxwidth\"] = str(field.max_length)\n\n self.xml.startElement(\"trans-unit\", attrs)\n self.indent(4)\n self.xml.startElement(\"source\", {})\n # Get a \"string version\" of the object's data.\n if getattr(obj, field.name) is not None:\n self.xml.characters(field.value_to_string(obj))\n else:\n self.xml.addQuickElement(\"None\")\n\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")", "def extend_or_add_fields(cls, subfields, dbmanager, flag_mixin_atroot, propname, proplabel):\n import mdbmodel_fieldset\n if (flag_mixin_atroot):\n # prepare extra fields that will be added at root; this doesnt actually create any prerequisites\n cls.extend_fields(subfields)\n else:\n # add a special sub table that will contain some fields, using a helper class object attached to us\n # create (AND REGISTER) the new helper object\n backrefname = cls.get_dbtablename_pure()\n mdbmodel_fieldset.MewloDbFieldset.make_fieldset_dbobjectclass(cls, propname, proplabel, backrefname, dbmanager, subfields)", "def introFieldWidgetFactory(field, request):\n return widget.FieldWidget(field, IntroWidget(request))", "def add_field(self, name, value):\n if not isinstance(value, str):\n value = json.dumps(value, ensure_ascii=False)\n self.form_fields.append((name, value))\n return", "def __init__(field, form, content):", "def __init__(field, form, content):", "def test_make_form_field():", "def get_field_info(self, field):\n field_info = {}\n serializer = field.parent\n\n if isinstance(field, serializers.ManyRelatedField):\n field_info[\"type\"] = self.type_lookup[field.child_relation]\n else:\n field_info[\"type\"] = self.type_lookup[field]\n\n try:\n serializer_model = serializer.Meta.model\n field_info[\"relationship_type\"] = self.relation_type_lookup[\n getattr(serializer_model, field.field_name)\n ]\n except KeyError:\n pass\n except AttributeError:\n pass\n else:\n field_info[\"relationship_resource\"] = get_related_resource_type(field)\n\n field_info[\"required\"] = getattr(field, \"required\", False)\n\n attrs = [\n \"read_only\",\n \"write_only\",\n \"label\",\n \"help_text\",\n \"min_length\",\n \"max_length\",\n \"min_value\",\n \"max_value\",\n \"initial\",\n ]\n\n for attr in attrs:\n value = getattr(field, attr, None)\n if value is not None and value != \"\":\n field_info[attr] = force_str(value, strings_only=True)\n\n if getattr(field, \"child\", None):\n field_info[\"child\"] = self.get_field_info(field.child)\n elif getattr(field, \"fields\", None):\n field_info[\"children\"] = self.get_serializer_info(field)\n\n if (\n not field_info.get(\"read_only\")\n and not field_info.get(\"relationship_resource\")\n and hasattr(field, \"choices\")\n ):\n field_info[\"choices\"] = [\n {\n \"value\": choice_value,\n \"display_name\": force_str(choice_name, strings_only=True),\n }\n for choice_value, choice_name in field.choices.items()\n ]\n\n if (\n hasattr(serializer, \"included_serializers\")\n and \"relationship_resource\" in field_info\n ):\n field_info[\"allows_include\"] = (\n field.field_name in serializer.included_serializers\n )\n\n return field_info", "def add_field(request):\n try:\n bpo_topic = BPOTopic.objects.get(id=ObjectId(request.POST.get('topic_id')))\n except Exception as e:\n return HttpResponseBadRequest(e)\n else:\n input_type = request.POST.get('type')\n input_label = request.POST.get('label')\n if input_type == 'date_input':\n bpo_field = BPODateField(\n label=input_label,\n type=input_type,\n )\n elif input_type == 'number_input':\n bpo_field = BPOFloatField(\n label=input_label,\n type=input_type\n )\n elif input_type == 'auto_sum':\n bpo_field = BPOAutoSumField(\n label=input_label,\n type=input_type,\n sub_fields={}\n )\n elif input_type == 'multiple_choice':\n bpo_field = BPOMultipleChoiceField(\n label=input_label,\n type=input_type,\n choices=['New Choice 1', 'New Choice 2']\n )\n else:\n bpo_field = BPOField(\n label=input_label,\n type=input_type,\n )\n bpo_field.save()\n bpo_field_id = ObjectId(bpo_field.id)\n if not bpo_topic.fields:\n bpo_topic.fields = [bpo_field_id]\n else:\n bpo_topic.fields.append(bpo_field_id)\n bpo_topic.save()\n return HttpResponse(json.dumps(bpo_field.to_json), mimetype=\"application/x-javascript\")", "def generate_object_field(symbols, length, object, field):\n string = generate_random_string(symbols, length)\n try:\n instance = object.objects.get(**{field:string})\n if instance:\n string = generate_object_field(symbols, length, object, field)\n except object.DoesNotExist:\n pass\n return string", "def get_extra_field_value(field_type, payment):\n try:\n return payment.extra_fields.get(field_type=field_type).value\n except ExtraField.DoesNotExist:\n return mark_safe('&nbsp;')", "def subfield():\n return Subfield()", "def add_field(self, table, field, field_type, field_subtype=None, properties=None):\n self._check_field(table, field, exists=False)\n\n field_details = {\n 'type': field_type\n }\n\n if field_subtype:\n field_details['subtype'] = field_subtype\n\n if properties:\n field_details.update(properties)\n\n self._metadata['tables'][table]['fields'][field] = field_details", "def build_expression(self, field_name, sub_field_name, null_value):\n expression = ''\n if field_name:\n expression = \"${object.\" + field_name\n if sub_field_name:\n expression += \".\" + sub_field_name\n if null_value:\n expression += \" or '''%s'''\" % null_value\n expression += \"}\"\n return expression", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def createFormatMap(self, form, renderable, **extras):\n\n fmtmap = renderable.__dict__.copy()\n fmtmap.update(extras)\n\n def replaceVars(match):\n\n try:\n var = match.group()[2:-1]\n if var and var.endswith(\":lexical\"):\n var = var[:-len(\":lexical\")]\n value = form.getFieldValue(var, lexical=True) or ''\n else:\n value = form.getFieldValue(var) or ''\n\n if not isinstance(value, str):\n if not hasattr(value, \"decode\"):\n value = str(value)\n value = value.decode('utf-8')\n return value\n except:\n return match.group()\n\n # process labels and hints\n if 'label' in fmtmap and fmtmap['label'] != None:\n fmtmap['label'] = VAREXP.sub(replaceVars, fmtmap['label'])\n if 'hint' in fmtmap and fmtmap['hint'] != None:\n fmtmap['hint'] = VAREXP.sub(replaceVars, fmtmap['hint'])\n if 'text' in fmtmap and fmtmap['text'] != None:\n fmtmap['text'] = VAREXP.sub(replaceVars, fmtmap['text'])\n if 'placeholder' in fmtmap and fmtmap['placeholder'] != None:\n fmtmap['placeholder'] = VAREXP.sub(replaceVars,\n fmtmap['placeholder'])\n\n # defaults\n extra_classes = {'relevant': True, 'required': False,\n 'readonly': False, 'error': False}\n\n # Let's see whether we got properties here...\n try:\n if hasattr(renderable, 'bind') and renderable.bind:\n # Requiredness\n if form.model.isRequired(renderable.bind, form.data):\n extra_classes[\"required\"] = True\n\n if not form.model.isRelevant(renderable.bind, form.data):\n extra_classes[\"relevant\"] = False\n\n # Read only\n if form.model.isReadonly(renderable.bind, form.data):\n extra_classes[\"readonly\"] = True\n\n elif hasattr(renderable, 'getRenderables') and \\\n callable(renderable.getRenderables):\n\n # Group relevance\n if not form.model.isGroupRelevant(renderable, form.data):\n extra_classes[\"relevant\"] = False\n\n except:\n pass\n\n if extras.get(\"errors\", None) and \\\n hasattr(renderable, 'bind') and renderable.bind and \\\n extras['errors'].get(renderable.bind, None):\n\n extra_classes['error'] = True\n\n if getattr(renderable, 'alert', ''):\n fmtmap['alert'] = renderable.alert\n else:\n fmtmap['alert'] = \"; \".join(extras['errors'][renderable.bind])\n\n else:\n\n fmtmap['alert'] = ''\n\n if \"extra_classes\" in fmtmap:\n fmtmap['extra_classes'] = \" \".join([fmtmap['extra_classes']] + \\\n [key for key in\n list(extra_classes.keys())\n if extra_classes[key]])\n else:\n fmtmap['extra_classes'] = \" \".join([key for key in\n list(extra_classes.keys()) if\n extra_classes[key]])\n\n fmtmap['type'] = self.getType(renderable)\n\n return fmtmap", "def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")", "def __update_custom_field_settings(self,\n eachfield, #field etree\n resourcetablename,\n fieldname\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n unikey = \"%s__%s\" % (resourcetablename, fieldname)\n field_property = self.custom_field_properties.get(unikey, {})\n\n cust_fieldtype = field_property.get(\"fieldtype\", None)\n cust_readable = field_property.get(\"readable\", None)\n cust_writable = field_property.get(\"writable\", None)\n cust_label = field_property.get(\"label\", None)\n cust_hint = field_property.get(\"hint\", None)\n cust_default = field_property.get(\"default\", None)\n cust_lines = field_property.get(\"lines\", None)\n cust_boxes = field_property.get(\"boxes\", None)\n cust_has_options = field_property.get(\"has_options\", None)\n cust_options = field_property.get(\"options\", None)\n\n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def _get_field_type_converter(pipeline_builder):\n converter_config = [\n {\n 'fields': ['/id'],\n 'targetType': 'LONG',\n 'dataLocale': 'en,US'\n }\n ]\n field_type_converter = pipeline_builder.add_stage('Field Type Converter')\n field_type_converter.set_attributes(conversion_method='BY_FIELD',\n field_type_converter_configs=converter_config)\n return field_type_converter, pipeline_builder", "def _uifield_from_dataclass(field: dc.Field) -> UiField:\n default = field.default if field.default is not dc.MISSING else Undefined\n dfactory = (\n field.default_factory if field.default_factory is not dc.MISSING else None\n )\n extra = {k: v for k, v in field.metadata.items() if k in _UI_FIELD_NAMES}\n\n return UiField(\n name=field.name,\n type=field.type,\n default=default,\n default_factory=dfactory,\n _native_field=field,\n **extra,\n )", "def _create_fields(self, init=None):\n\t\t# don't require the user to define this, hardcode it in\n\t\tif \"id\" not in self.fields:\n\t\t\tself.fields[\"id\"] = int\n\n\t\tif self.__fields is None:\n\t\t\tself.__fields = {}\n\t\tif self.__field_types is None:\n\t\t\tself.__field_types = self.fields.copy()\n\n\t\tfor k,v in self.fields.iteritems():\n\t\t\tif type(v) is type:\n\t\t\t\t# do NOT instantiate this at this moment, leave the values\n\t\t\t\t# as None\n\t\t\t\tv = None\n\t\t\telse:\n\t\t\t\tself.__field_types[k] = v.__class__\n\n\t\t\tif init is not None and k in init:\n\t\t\t\tcls = self._get_class(self.__field_types[k])\n\n\t\t\t\t# make sure it's the appropriate type\n\t\t\t\t# also don't try to cast it to something if it is None\n\t\t\t\tif init[k] is not None:\n\t\t\t\t\tif cls is unicode:\n\t\t\t\t\t\tv = cls(init[k]).encode(\"utf-8\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tv = cls(init[k])\n\t\t\t\telse:\n\t\t\t\t\tv = None\n\n if k in self.__fields and self.__fields[k] is not None and v is None:\n continue\n\n\t\t\tself.__fields[k] = v\n\n\t\t# add any non-defined fields to self.__fields\n\t\tif init and self.accept_all_fields:\n\t\t\tfor k,v in init.iteritems():\n\t\t\t\tif k not in self.__fields:\n\t\t\t\t\tself.__fields[k] = v\n\n\t\tif init is not None and \"attachments\" in init:\n\t\t\tself._create_attachments(init[\"attachments\"])\n\n\t\tif init:\n\t\t\tself._add_std_fields(init)", "def make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,typ,inst):\n\tspecialfields = config[\"default\"][\"specialfields\"]\n\tif \"specialfields\" in config[inst].keys():\n\t\tif hgf_field in config[inst][\"specialfields\"].keys():\n\t\t\tspecialfields = config[inst][\"specialfields\"]\n\t\telse: \n\t\t\twarning(\"Please define %s in specialfields. we take %s from the default\" %(hgf_field,hgf_field))\n\t\t\n\telse: \n\t\twarning(\"Please define specialfields under config['%s']. we take specialfields from default\" %inst)\n\t\t\t\n\tvalues = specialfields[hgf_field] #get special values for radio buttons\n\tgroupclass = get_groupclass(sbmfield[2])\n\tmog = \"\" # this variable is set for group mandatory fields\n\tif groupclass != \"\": mog = \"MOG\"\n\tif sbmfield[2].startswith(\"m\"): #fieldlevel\n\t\tif unique_id == sbmfield[0].replace(\"hgf_\",\"\"): #no marccode but Input-field \n\t\t\tspanclass = '<span class=\"MG MG%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"MI %s\"' % groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\t\telse:\n\t\t\tspanclass = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\telse:\n\t\tif unique_id == sbmfield[0].replace(\"hgf_\",\"\"): #no marccode but Input-field\n\t\t\tspanclass = '<span class=\"G G%(unique_id)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \t \n\t\telse:\n\t\t\tspanclass = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s'\n\t\t\tinputclass = 'class=\"I%(id2)s I%(id1)s I %s\"' %groupclass\n\t\t\tinputfield = get_input(values,inputclass,typ) \n\tend = '</span>'\n\tspan_field = spanclass + inputfield + end\n\tspan_field = span_field %{'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'inputclass':inputclass,'mog':mog}\n\treturn span_field", "def create_user_defined_fielddesc(sbmfield,config,inst):\n\tel_dict = {\"alephcode\":0,\\\n\t\t\t\"marccode\":1,\\\n\t\t\t\"type\":2,\\\n\t\t\t\"size\":3,\\\n\t\t\t\"rows\":4,\\\n\t\t\t\"cols\":5,\\\n\t\t\t\"maxlength\":6,\\\n\t\t\t\"val\":7,\\\n\t\t\t\"fidesc\":8,\\\n\t\t\t\"cd\":9,\\\n\t\t\t\"md\":10,\\\n\t\t\t\"modifytext\":11,\\\n\t\t\t\"fddfi2\":12,\\\n\t\t\t\"cookie\":13}\n\t\n\tsbm_dict = {\"fieldname\":0,\\\n\t\t\t \"fielddesc\":1,\\\n\t\t\t \"mo\":2,\\\n\t\t\t \"order\":3,\\\n\t\t\t \"placeholder\":4}\n\t\n\thgf_field = sbmfield[sbm_dict[\"fieldname\"]]\n\tif hgf_field.startswith(\"hgf\"): \n\t\telement = config[\"fielddesc\"][hgf_field] # we have to read the fielddescriptor from confg file, because all fielddescriptors in database will be redefined to \"user defined fields\" at the end of this function\n\telse: \n\t\tif hgf_field in config[\"default_form\"]: element = get_field_from_sbmfielddesc(hgf_field)[1:]\n\t\telse: return \"\",\"O\" #non hgf-fields (defined collections,...)\n\tplaceholder = \"\" #initialise\n\tfieldlabel = \"\" #initialise\n\tif len(sbmfield) == sbm_dict[\"placeholder\"] +1: placeholder = sbmfield[sbm_dict[\"placeholder\"]] #get placeholder\n\t\n\tif hgf_field == \"hgf_start\": \t\t\n \t# define a fieldset which can then be used for internal element\n \t# placement relative to that div so we end up with a table-less\n \t# form doing arrangement entirely in CSS\n\t\tif read_javascript_includes():\n\t\t\tfieldlabel = read_javascript_includes()\n\t\tfieldlabel += '<fieldset id=\"submissionfields\"><legend id=\"submissionlegend\">%s</legend><div id=\"loadingMsg\"><img src=\"/img/search.png\" alt=\"Loading...\" />Loading data. Please stand by... </div>' %sbmfield[sbm_dict[\"fielddesc\"]]\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\n\tif hgf_field == \"hgf_end\":\n \t\t# close the main fieldset\n\t\tfieldlabel = '</fieldset>'\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\n\tif hgf_field == \"hgf_comment\": #technical field\n\t\tif sbmfield[1] == \"hidden\": pass# 'hidden' is generated by create_mask function\n\t\telse:\t \n\t\t\tfieldlabel = \"<span class=\\\"Comment\\\" id=\\\"hgf_comment\\\">%s</span>\" % sbmfield[sbm_dict[\"fielddesc\"]] \n\t\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\t\t\n\tif hgf_field == \"hgf_preview\": #mathjax title preview\n\t\tfieldlabel = \"\"\n\t\treturn fieldlabel,sbmfield[sbm_dict[\"mo\"]].upper()\n\t\n\tif element[el_dict[\"marccode\"]] == \"\": #no marccode\n\t\tunique_id = sbmfield[sbm_dict[\"fieldname\"]] # i.e. hgf_import is Input-field, but not MARC\n\t\tid1 = \"\"\n\t\tid2 = \"\"\n\telse : \n\t\tid1 = element[el_dict[\"marccode\"]][0:3]\n\t\tid2 = element[el_dict[\"marccode\"]]\n\t\tunique_id = hgf_field.replace(\"hgf_\",\"\")\n\tsize,rows,cols = element[3:6]\n\tvalue = element[el_dict[\"val\"]]\n\tif value == \"NULL\": value = \"\"\n\tfieldtext = sbmfield[sbm_dict[\"fielddesc\"]]\n\tfieldtype = \"D\" #change fieldtype to user defined input. IMPORTANT: whole information about the field (spans, fieldname, input-field, textarea) are stored in the fieldlabel in the sbmFIELD herefore fidesc in sbmFIELDDESC has to be \"\" and eltype \"D\")\n\t\n\t\n\tif inst != \"default\":\n\t\tsuffix = \"#\" + inst.upper() + \"_font\" # suffix for twiki page at GSI\t\n\telse: suffix = \"\"\n\t#Insert Helptext#\n\twiki_base = \"\"\n\tif (\"CFG_HGF_WIKI_BASE_URL\" in globals()):\n \t# Twiki needs all page titles to start with a capital letter.\n \t# Therefore, capitalize() the uniq_id when constructing the URL.\n \t\twiki_base = CFG_HGF_WIKI_BASE_URL \n else:\n\t\twiki_base = \"http://invenio-wiki.gsi.de/cgi-bin/view/Main/\"\n\thelp_text = '<span class=\"Helptext\" id=\"%(unique_id)s%(suffix)s\"><a href=\"%(wiki_base)s%(unique_id)s%(suffix)s\" alt=\"Help\" target=\"_blank\"><img src=\"/img/hgfinfo.png\"></a></span>' %{'unique_id':unique_id.capitalize(),\"suffix\":suffix,\"wiki_base\":wiki_base}\n\n\tmog = \"\" #this variable is set for group dependent mandatory fields \n\tif element[el_dict[\"type\"]].upper() == \"I\": #Input text box\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]]) #get groupclass in case of fieldlevel=m1,m2,m3...... if no groupclass, then return \"\"\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].lower().startswith(\"m\"):#fieldlevel\n\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\"></input></span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse: \t\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"G G%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I %(groupclass)s\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\t\telse:\n\t\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I %(groupclass)s\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"T\":\t# Textarea\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]])\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].lower().startswith(\"m\"):#fieldlevel\n\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\" >%(fieldtext)s</label> %(help_text)s <textarea name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\" cols=\"%(cols)s\" rows=\"%(rows)s\"></textarea> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'rows':rows,'cols':cols,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse:\n\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G G%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <textarea name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I %(groupclass)s\" cols=\"%(cols)s\" rows=\"%(rows)s\"></textarea> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'rows':rows,'cols':cols,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"H\": #hidden field\n\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]:\n\t\t\tfieldlabel = '<span class=\"G\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\"></label> <input type=\"hidden\" name=\"%(hgf_name)s\" id=\"I%(unique_id)s\" value=\"%(value)s\" class=\"I\"></input> </span>' % {'unique_id':unique_id,'value':value,'hgf_name':hgf_field}\n\t\telse:\n\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L%(id2)s L%(id1)s L\"></label> <input type=\"hidden\" name=\"%(hgf_name)s\" id=\"I%(unique_id)s\" value=\"%(value)s\" class=\"I%(id2)s I%(id1)s I\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'value':value,'hgf_name':hgf_field}\n\telif element[el_dict[\"type\"]].upper() == \"F\": #File field\n\t\tgroupclass = get_groupclass(sbmfield[sbm_dict[\"mo\"]])\n\t\tif groupclass != \"\": mog = \"MOG\"\n\t\tif sbmfield[sbm_dict[\"mo\"]].startswith(\"m\"):#fieldlevel\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"MG MG%(unique_id)s %(mog)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s ML\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI %(groupclass)s\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\t\telse:\t\n\t\t\t\tfieldlabel = '<span class=\"MG%(id2)s G%(id2)s MG%(id1)s G%(id1)s MG G %(mog)s\"><label for=\"I%(unique_id)s\" class=\"ML%(id2)s L%(id2)s ML%(id1)s L%(id1)s ML L\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"MI%(id2)s I%(id2)s MI%(id1)s I%(id1)s MI I %(groupclass)s\"></input></span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'groupclass':groupclass,'mog':mog,'placeholder':placeholder}\n\t\telse: \t\n\t\t\tif unique_id == sbmfield[sbm_dict[\"fieldname\"]]: #no marccode but Input-field\n\t\t\t\tfieldlabel = '<span class=\"G G%(unique_id)s\"> <label for=\"I%(unique_id)s\" class=\"L%(unique_id)s L\">%(fieldtext)s</label> %(help_text)s <input type=\"file\" name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" id=\"I%(unique_id)s\" class=\"I\"></input> </span>' % {'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'placeholder':placeholder}\n\t\t\telse:\n\t\t\t\tfieldlabel = '<span class=\"G%(id2)s G%(id1)s G\"> <label for=\"I%(unique_id)s\" class=\"L%(id2)s L%(id1)s L\">%(fieldtext)s</label> %(help_text)s <input name=\"%(hgf_name)s\" placeholder=\"%(placeholder)s\" type=\"file\" id=\"I%(unique_id)s\" class=\"I%(id2)s I%(id1)s I\"></input> </span>' % {'id1':id1,'id2':id2,'unique_id':unique_id,'size':size,'fieldtext':fieldtext,'hgf_name':hgf_field,'help_text':help_text,'placeholder':placeholder}\n\telif element[el_dict[\"type\"]].upper() == \"C\": #check box\n\t\tfieldlabel = make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,\"checkbox\",inst)\n\telif element[el_dict[\"type\"]].upper() == \"R\": #Radio button Warninig invenio default for \"R\" would be Response Element\n\t\tfieldlabel = make_specialfields(unique_id,id1,id2,size,fieldtext,hgf_field,help_text,sbmfield,config,\"radio\",inst)\n\telse: \treturn \"\",\"O\" #other hgf-field with marccode (if exists)\n\t\n\teltype = get_eltype_from_sbmfielddesc(hgf_field)\n\tfidesc = \"\"\n\tmodification_text = fieldlabel #modification text\n\tif eltype != fieldtype: update_eltype_in_sbmfielddesc(hgf_field,fieldtype,modification_text,fidesc) #redefine fielddescriptor in database\n\t\n\tif len(sbmfield[sbm_dict[\"mo\"]])>1: fieldlevel = sbmfield[sbm_dict[\"mo\"]][0].upper() #prevent submitting irregular values into DB for fieldlevel, only M,O possible \n\telse: fieldlevel = sbmfield[sbm_dict[\"mo\"]].upper() \n\treturn fieldlabel,fieldlevel", "def build_label_text(field_name: str, field: dict):\n\n label = \"\"\n if \"required\" in field:\n label = \" * \" if field.get(\"required\") else \"\"\n\n # If we don't have a label defined, used the field name\n if \"label\" not in field:\n field.update({\"label\": field_name.upper()})\n\n label += field[\"label\"]\n\n return label", "def render_fields(form, args):\n output = \"\"\n fields = get_fields(form, args)\n for field in fields:\n output += render_field(field)\n return mark_safe(output)", "def generate_field_name(container, field):\n if \"standard_name\" in container.fields[field]:\n field_name = container.fields[field][\"standard_name\"]\n elif \"long_name\" in container.fields[field]:\n field_name = container.fields[field][\"long_name\"]\n else:\n field_name = str(field)\n field_name = field_name.replace(\"_\", \" \")\n field_name = field_name[0].upper() + field_name[1:]\n return field_name", "def format_data(self, _item_fields, special=None):\n\n if special:\n _item_fields[\"special\"] = special\n\n return _item_fields", "def fields(self, forge, values):\n\n values[\"forge\"] = forge['id']\n\n fields = opengui.Fields(\n values=values,\n fields=FIELDS,\n ready=True\n )\n\n fields[\"forge\"].description = forge[\"description\"]\n\n if os.path.exists(\"/opt/service/forge/fields.yaml\"):\n with open(\"/opt/service/forge/fields.yaml\", \"r\") as fields_file:\n fields.extend(yaml.safe_load(fields_file).get(\"fields\", []))\n\n for field in forge.get(\"input\", {}).get(\"fields\", []):\n if field[\"name\"] in RESERVED:\n raise Exception(f\"field name '{field['name']}' is reserved\")\n self.field(fields, field)\n\n return fields", "def _build_field_request(self):\n include = []\n for field in self.fields:\n if self.fields[field] is True:\n include.append(field)\n include = '&printFields=' + ','.join(include)\n return include", "def __init__(self, field_info):\n self.field_info = field_info", "def render(self, **kwargs):\r\n return h.text_field(self.name, value=self.value, **kwargs)", "def _uifield_from_attrs(field: Attribute) -> UiField:\n from attrs import NOTHING, Factory\n\n default = field.default if field.default is not NOTHING else Undefined\n default_factory = None\n if isinstance(default, Factory):\n default_factory = default.factory\n default = Undefined\n\n extra = {k: v for k, v in field.metadata.items() if k in _UI_FIELD_NAMES}\n\n return UiField(\n name=field.name,\n type=field.type,\n default=default,\n default_factory=default_factory,\n _native_field=field,\n **extra,\n )", "def attach_custom_user_fields(form_cls, **kwargs):\n new_fields = UserFields.query.filter_by(**kwargs).all()\n for field in new_fields:\n validators = []\n if field.required:\n validators.append(InputRequired())\n\n if field.field_type == \"text\":\n input_field = StringField(\n field.name, description=field.description, validators=validators\n )\n elif field.field_type == \"boolean\":\n input_field = BooleanField(\n field.name, description=field.description, validators=validators\n )\n\n setattr(form_cls, f\"fields[{field.id}]\", input_field)", "def createField(selected_layer, newFieldName, newFieldType):\r\n field = ogr.FieldDefn(newFieldName, newFieldType)\r\n selected_layer.CreateField(field)", "def _create_user_defined_field(address_book, field_type, field_value):\n field_name = FieldFactory(\n address_book, IPostalAddress, field_type, u'distance').__name__\n return PostalAddressFactory(\n UpdateablePersonFactory(address_book),\n **{field_name: field_value, 'set_as_default': True})", "def crispy_tbx_field(parser, token):\n token = token.split_contents()\n field = token.pop(1)\n attrs = {}\n\n # We need to pop tag name, or pairwise would fail\n token.pop(0)\n for attribute_name, value in pairwise(token):\n attrs[attribute_name] = value\n\n return CrispyGDSFieldNode(field, attrs)", "def add_field(self, name, fieldType=\"C\", size=\"50\", decimal=0, default=\" \"):\n if not size:\n size = \"50\"\n field_name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\n self.w.field(field_name, fieldType, str(size), decimal) #field name cannot be unicode, must be str()\n self.fields.append([field_name, fieldType, int(size), int(decimal)])\n self.field_names.append(name)\n self.field_indices[name] = len(self.fields) - 1\n for rec in self.records:\n rec.append(default)\n\n self.__isBuilt = False", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def field(title, _type, style, validation=None, **data):\n result = {\n \"title\": title,\n \"type\": _type,\n \"style\": style,\n \"validation\": validation\n }\n result.update(data)\n return result", "def form_CustomisedFormLayoutFields(request):\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String())\n schema.add( 'surname', schemaish.String())\n schema.add( 'age', schemaish.Integer())\n schema.add( 'sex', schemaish.String())\n\n form = formish.Form(schema, 'form')\n\n return form", "def add_field(self, field_name, label, description, type, function=None):\n new_field = {\n \"label\": label,\n \"description\": description,\n \"type\": type,\n }\n if function is not None:\n new_field[\"source\"] = \"function\"\n self.fields[field_name] = function\n else:\n new_field[\"source\"] = \"system\"\n self.fields[field_name] = \"No value\"\n self.description[\"fields\"][\"values\"][field_name] = new_field\n\n # update MongoDB\n #self.mongo_client.cps2_project.objects.update_one(\n #{\"_id\": self.mongo_id},\n #{\"$set\": {\"fields.values.\" + field_name: new_field,\n #\"last_modified.value\": str(datetime.utcnow())}\n #}\n #)\n print(\"Added a new field called \\\"\" + field_name + \"\\\" and updated MongoDB.\")", "def set_up_fields(self, fields):\n self.fields = {\n 'name': self.name,\n 'email': self.email\n }\n for key in fields.keys():\n # special keys first, not to be used in the template\n if key.upper() == 'CC':\n self.is_cc = fields[key]\n elif key.upper() == 'BCC':\n self.is_bcc = fields[key]\n else:\n self.fields[key] = fields[key]", "def _get_FIELD_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n template = ''\n template += '{:d}' if field.decimals == 0 else '{:.%sf}' % field.decimals\n template += ' ' if field.spaced_display else ''\n template += '{!s:s}'\n return template.format(value, field.unit)", "def render(self, field, key, value, REQUEST, render_prefix=None):\n return self._render(field, key, value, REQUEST, render_prefix=render_prefix)", "def field_creator(self, size=(1600, 900)) -> None:\n # size[0] - строк; size[1] - столбцов\n # 80 - оптимальное значение на данный момент\n with open(normpath('levels/level/lvl1' + '_field.py'), 'w') as f:\n f.write('Field = [' + '\\n')\n for i in range(0, size[1], settings.precision):\n buf_mas = list()\n for j in range(0, size[0], settings.precision):\n tmp = False\n for item in self.objects:\n if (item.collidepoint(j, i)):\n tmp = True\n break\n if tmp:\n buf_mas.append(-1)\n else:\n buf_mas.append(0)\n f.write(' '+str(buf_mas) + ',\\n')\n f.write(']')", "def to_field(self, **kwargs):\n if not self.field_klass:\n raise ValueError('Please, supply `field_klass` attribute first.')\n\n field_kwargs = {}\n\n for arg in self.field_args:\n value = kwargs[arg] if arg in kwargs else getattr(self, arg)\n field_kwargs.update({arg: value})\n\n field_kwargs.update(**self.field_kwargs)\n return self.field_klass(**field_kwargs)", "def merge_sbmfield(doctype,config,inst,field,order_index):\n\tsbmfield = [field]\n\tfield_parts = config[inst][doctype][field]\n\tfor i in range(len(field_parts)): #length should be 4\n\t\tif field_parts[i] == \"-\":\n\t\t\tif i==2: #order\n\t\t\t\tpart = order_index\n\t\t\telse:\n\t\t\t\tif field in config[\"default_form\"].keys():\n\t\t\t\t\tif i >= len(config[\"default_form\"][field]): part = \"\" #reffering to a default_form field, which is not defined \n\t\t\t\t\telse: part = config[\"default_form\"][field][i] #take the default if \"-\"\n\t\t\t\telse: part = \"\"\n\t\t\t\tif part == \"-\": \n\t\t\t\t\tpart = \"\" #should not happen\n\t\t\t\t\twarning(\"field: %s is not defined in default_form and refers to '-'\" %field)\n\t\telse: part = field_parts[i]\n\t\tsbmfield.append(part)\n\treturn sbmfield", "def field():\n field = Field()\n field.type = 'TextLine'\n return field", "def _db_field(self):\n return self.specific._db_field({\n 'verbose_name': self.verbose_name,\n 'help_text': self.help_text,\n 'blank': not self.required,\n 'null': not self.required,\n 'unique': self.unique,\n 'primary_key': self.primary_key,\n 'db_index': self.index or None,\n })", "def get_custom_fields_for_model(content_type, filterable_only=False, bulk_edit=False):\n field_dict = OrderedDict()\n kwargs = {'obj_type': content_type}\n if filterable_only:\n kwargs['is_filterable'] = True\n custom_fields = CustomField.objects.filter(**kwargs)\n\n for cf in custom_fields:\n field_name = 'cf_{}'.format(str(cf.name))\n\n # Integer\n if cf.type == CF_TYPE_INTEGER:\n field = forms.IntegerField(required=cf.required, initial=cf.default)\n\n # Boolean\n elif cf.type == CF_TYPE_BOOLEAN:\n choices = (\n (None, '---------'),\n (1, 'True'),\n (0, 'False'),\n )\n if cf.default.lower() in ['true', 'yes', '1']:\n initial = 1\n elif cf.default.lower() in ['false', 'no', '0']:\n initial = 0\n else:\n initial = None\n field = forms.NullBooleanField(required=cf.required, initial=initial,\n widget=forms.Select(choices=choices))\n\n # Date\n elif cf.type == CF_TYPE_DATE:\n field = forms.DateField(required=cf.required, initial=cf.default, help_text=\"Date format: YYYY-MM-DD\")\n\n # Select\n elif cf.type == CF_TYPE_SELECT:\n choices = [(cfc.pk, cfc) for cfc in cf.choices.all()]\n if not cf.required or bulk_edit or filterable_only:\n choices = [(None, '---------')] + choices\n field = forms.TypedChoiceField(choices=choices, coerce=int, required=cf.required)\n\n # URL\n elif cf.type == CF_TYPE_URL:\n field = LaxURLField(required=cf.required, initial=cf.default)\n\n # Text\n else:\n field = forms.CharField(max_length=255, required=cf.required, initial=cf.default)\n\n field.model = cf\n field.label = cf.label if cf.label else cf.name.replace('_', ' ').capitalize()\n if cf.description:\n field.help_text = cf.description\n\n field_dict[field_name] = field\n\n return field_dict" ]
[ "0.6215978", "0.6009022", "0.5988297", "0.59636", "0.5934223", "0.590157", "0.5843997", "0.58085185", "0.5790124", "0.5744198", "0.5619677", "0.56059074", "0.5605764", "0.55969447", "0.55805063", "0.55735326", "0.55544263", "0.55516434", "0.55483365", "0.5527651", "0.5524747", "0.5523143", "0.54927224", "0.5488375", "0.5472581", "0.5471565", "0.54587746", "0.5453662", "0.5430207", "0.54124993", "0.5369384", "0.53440887", "0.5337527", "0.5320969", "0.5302084", "0.528662", "0.5276439", "0.52701604", "0.5262268", "0.52452344", "0.5242927", "0.52245784", "0.5224134", "0.52199805", "0.5219799", "0.5192224", "0.5189843", "0.51886094", "0.5184957", "0.51849437", "0.518364", "0.51804036", "0.51697594", "0.51620066", "0.5144053", "0.5144053", "0.5143866", "0.5143363", "0.5142958", "0.51363224", "0.51356614", "0.51255935", "0.51226115", "0.5120845", "0.5111733", "0.51108366", "0.50925404", "0.5092516", "0.5088824", "0.50886613", "0.5070121", "0.505704", "0.5055384", "0.505054", "0.50412047", "0.50362915", "0.50330347", "0.5024486", "0.5004333", "0.5004304", "0.49969113", "0.49909154", "0.4989382", "0.49770543", "0.49744236", "0.4973932", "0.49708998", "0.49702772", "0.49617073", "0.4960804", "0.49604297", "0.49576533", "0.49560815", "0.49483094", "0.49451002", "0.49443492", "0.49317715", "0.49278766", "0.49269652", "0.4926026" ]
0.51042384
66
If we have a registration code required, we attach it to the form similar to attach_custom_user_fields
def attach_registration_code_field(form_cls): if Configs.registration_code: setattr( # noqa B010 form_cls, "registration_code", StringField( "Registration Code", description="Registration code required to create account", validators=[InputRequired()], ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registration(request, code=None):\n if request.method == \"POST\":\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"confirm.html\")\n else:\n form = RegistrationForm()\n return render(request, 'registration.html', {'form': form, 'code': code})", "def build_registration_code_field(form_cls):\n if Configs.registration_code:\n field = getattr(form_cls, \"registration_code\") # noqa B009\n field.field_type = \"text\"\n return [field]\n else:\n return []", "def handle_register(self, code):\n\n if code in self.factory.users_codes and not self.factory.debug and False:\n self._snd(u\"Codigo Ya Registrado\\nIntroduzca codigo\")\n return\n\n if self.get_odoo_connexion(code):\n self.state = \"tasks\"\n self.menu1_tasks()\n return\n else:\n self._snd(u\"No se pudo establecer\\n la conexion.\\nIntroduzca codigo\")", "def user_register():\n \n data = user_obj.user_register(request.forms) \n return data", "def register(request):\n if request.method == 'GET':\n form = CustomUserCreationForm()\n elif request.method == 'POST':\n form = CustomUserCreationForm( data=request.POST )\n\n if form.is_valid():\n user = form.save( commit=False )\n # we can make any last second changes to the user\n user.save()\n return redirect( '/' )\n\n context = {'form': form}\n return render( request, 'register.html', context )", "def register(request):\n if not settings.BMAT_ALLOW_REGISTER:\n return render(request, \"users/no_register.html\", {})\n \n if request.method == \"GET\":\n return render(request, \"users/register.html\", {\"form\":CustomUserCreationForm()})\n \n elif request.method == \"POST\":\n f = CustomUserCreationForm(data=request.POST)\n \n if not f.is_valid():\n return render(request, \"users/register.html\", {\"form\":f})\n \n u = f.save(commit=False)\n \n u.email = f.cleaned_data.get(\"email\", \"\")\n u.save()\n \n u = authenticate(username=u.username, password=f.cleaned_data[\"password1\"])\n alogin(request, u)\n \n return redirect(\"/\")", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)", "def auto_register(request,backend=None,error_msgs=''):\r\n # Check if a username is provided\r\n username_form = forms.AutoRegisterForm()\r\n if request.method == 'POST' and request.POST.get('username'):\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n username_form = forms.AutoRegisterForm(request.POST)\r\n if username_form.is_valid():\r\n username = username_form.cleaned_data['username']\r\n try:\r\n interface.get_user_without_password(username)\r\n error_msgs ='That username is already in use.'\r\n except DoesNotExistError:\r\n request.session['saved_username'] = request.POST['username']\r\n backend = request.session[name]['backend']\r\n return redirect('socialauth_complete', backend=backend)\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n backend=request.session[name]['backend']\r\n return render_to_response('accounts/auto_register.html', {'backend' : backend, 'error_msgs' : error_msgs, 'username_form' : username_form}, RequestContext(request))", "def create_register_user(self, data, user_type):\n data.pop('password_confirm')\n data['user_type'] = user_type\n user = User.objects.create_user(**data)\n return user", "def registration_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"registration_code\")", "def register(request, key):\n profile = cpm.UserProfile.objects.filter(\n activation_key=key)\n\n if not profile.exists() or profile[0].user.is_active:\n hero_title = 'Hmm... that registration key is invalid.'\n return render_err_msg(request, hero_title)\n\n user = profile[0].user\n\n if request.POST:\n reg_form = RegForm(request.POST)\n if reg_form.is_valid():\n user.is_active = True\n user.first_name = reg_form.cleaned_data['first_name']\n user.last_name = reg_form.cleaned_data['last_name']\n user.set_password(reg_form.cleaned_data['password'])\n\n pic_url = put_profile_pic(\n reg_form.cleaned_data['pic_url'], user.profile)\n if pic_url:\n user.profile.pic_url = pic_url\n\n user.profile.class_year = reg_form.cleaned_data['class_year']\n\n alt_emails = request.POST.getlist('alt_email')\n for alt_email in alt_emails:\n if alt_email:\n user.profile.add_email(alt_email)\n\n user.save()\n user.profile.save()\n\n user = auth.authenticate(username=user.username,\n password=reg_form.cleaned_data['password'])\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n # Redirect to a success page.\n return redirect('/')\n\n else:\n reg_form = RegForm()\n\n template_values = {\n 'page_title': 'register',\n 'form': reg_form,\n 'user': user,\n }\n\n return render_to_response('register.html',\n template_values, request)", "def register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileInfoForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n profile.save()\n registered = True\n else:\n print(user_form.errors,profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileInfoForm()\n return render(request,'footBallApp/registration.html',\n {'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered})", "def register_user(request, extra_context=None):\r\n if request.user.is_authenticated():\r\n return redirect(reverse('dashboard'))\r\n if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):\r\n # Redirect to branding to process their certificate if SSL is enabled\r\n # and registration is disabled.\r\n return external_auth.views.redirect_with_get('root', request.GET)\r\n\r\n context = {\r\n 'course_id': request.GET.get('course_id'),\r\n 'email': '',\r\n 'enrollment_action': request.GET.get('enrollment_action'),\r\n 'name': '',\r\n 'running_pipeline': None,\r\n 'platform_name': microsite.get_value(\r\n 'platform_name',\r\n settings.PLATFORM_NAME\r\n ),\r\n 'selected_provider': '',\r\n 'username': '',\r\n }\r\n\r\n if extra_context is not None:\r\n context.update(extra_context)\r\n\r\n if context.get(\"extauth_domain\", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):\r\n return render_to_response('register-shib.html', context)\r\n\r\n # If third-party auth is enabled, prepopulate the form with data from the\r\n # selected provider.\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n running_pipeline = pipeline.get(request)\r\n current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))\r\n overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))\r\n overrides['running_pipeline'] = running_pipeline\r\n overrides['selected_provider'] = current_provider.NAME\r\n context.update(overrides)\r\n\r\n return render_to_response('register.html', context)", "def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n\n self.fields['nick'].widget.attrs.update({\n 'label': 'Přezdívka',\n 'placeholder': 'Mirek'\n })\n\n self.fields['name'].widget.attrs.update({\n 'label': 'Jméno',\n 'placeholder': 'Mirek'\n })\n\n self.fields['surname'].widget.attrs.update({\n 'label': 'Příjmení',\n 'placeholder': 'Dušín'\n })\n\n self.fields['email'].widget.attrs.update({\n 'label': 'E-mail',\n 'placeholder': 'mirek@rychlesipy.cz'\n })\n\n self.fields['age'].widget.attrs.update({'label': 'Věk'})\n self.fields['age'].initial = 18\n\n self.fields['race'].widget.attrs.update({'label': 'Rasa'})\n self.fields['race'].queryset = Race.objects.filter(\n active=True).only('id', 'name')\n\n self.fields['group'].widget.attrs.update({\n 'label': 'Skupina',\n 'placeholder': 'Rychlé Šípy'\n })\n\n for field in self.fields.keys():\n self.fields[field].widget.attrs.update({\n 'required': self.fields[field].required,\n 'title': '',\n 'class': 'form-control'\n })", "def register(self, form):\n new_user = form.save(commit=False)\n username_field = getattr(new_user, 'USERNAME_FIELD', 'username')\n # Save lowercased email as username.\n setattr(new_user, username_field, form.cleaned_data['email'].lower())\n new_user.first_name = form.cleaned_data['first_name']\n new_user.last_name = form.cleaned_data['last_name']\n new_user.save()\n new_user = authenticate(username=getattr(new_user, username_field), password=form.cleaned_data['password1'])\n login(self.request, new_user)\n user_registered.send(sender=self.__class__, user=new_user, request=self.request)\n profile, _ = Profile.objects.get_or_create(user=new_user)\n self.request.session['signed_up'] = True\n profile.payment_plan = int(form.cleaned_data['payment_plan'])\n profile.company_name = form.cleaned_data['company']\n profile.phone = form.cleaned_data['phone']\n profile.save(update_fields=['payment_plan', 'company_name', 'phone'])\n if profile.payment_plan != Profile.PAYMENT_PLAN_FREE:\n messages.add_message(self.request, messages.INFO,\n 'Congratulations! We won\\'t charge you for this plan for now.')\n return new_user", "def setup(request, template='socialregistration/setup.html',\n form_class=UserForm, extra_context=dict(), claim_form_class=ClaimForm):\n try:\n social_user = request.session['socialregistration_user']\n social_profile = request.session['socialregistration_profile']\n except KeyError:\n return render_to_response(\n template, dict(error=True), context_instance=RequestContext(request))\n\n if not GENERATE_USERNAME:\n # User can pick own username\n if not request.method == \"POST\":\n form = form_class(social_user, social_profile,)\n else:\n form = form_class(social_user, social_profile, request.POST)\n try:\n if form.is_valid():\n form.save()\n user = form.profile.authenticate()\n user.set_unusable_password() # we want something there, but it doesn't need to be anything they can actually use - otherwise a password must be assigned manually before the user can be banned or any other administrative action can be taken\n user.save()\n login(request, user)\n\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n\n return HttpResponseRedirect(_get_next(request))\n except ExistingUser:\n # see what the error is. if it's just an existing user, we want to let them claim it.\n if 'submitted' in request.POST:\n form = claim_form_class(\n request.session['socialregistration_user'],\n request.session['socialregistration_profile'],\n request.POST\n )\n else:\n form = claim_form_class(\n request.session['socialregistration_user'],\n request.session['socialregistration_profile'],\n initial=request.POST\n )\n\n if form.is_valid():\n form.save()\n\n user = form.profile.authenticate()\n login(request, user)\n\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n\n return HttpResponseRedirect(_get_next(request))\n\n extra_context['claim_account'] = True\n\n extra_context.update(dict(form=form))\n\n return render_to_response(template, extra_context,\n context_instance=RequestContext(request))\n \n else:\n # Generate user and profile\n social_user.username = str(uuid.uuid4())[:30]\n social_user.save()\n social_user.set_unusable_password() # we want something there, but it doesn't need to be anything they can actually use - otherwise a password must be assigned manually before the user can be banned or any other administrative action can be taken\n social_user.save()\n\n social_profile.content_object = social_user\n social_profile.save()\n\n # Authenticate and login\n user = social_profile.authenticate()\n login(request, user)\n\n # Clear & Redirect\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n return HttpResponseRedirect(_get_next(request))", "def register(request):\n form = RegistrationForm()\n if request.is_ajax():\n # If the request is an AJAX request, then we want to handle\n # the team assignment and return the result as data.\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user_data = form.cleaned_data\n user_data['username'] = user_data['username'].lower()\n user_data['quest_id'] = user_data['username']\n user = None\n users = CustomUser.objects.filter(username__exact=user_data['quest_id'])\n if users.count() > 0:\n user = users[0]\n else:\n user = None\n\n if user is None or user.team is None:\n team_assignment = sorting_hat.find_pink_tie_team_assignment(user_data)\n user_data.pop('quest_id')\n if user is None:\n user = CustomUser(**user_data)\n else:\n user.first_name = user_data['first_name']\n user.last_name = user_data['last_name']\n user.is_active = True\n user.team = team_assignment\n user.save()\n if user.is_first_year:\n return json_response({ 'valid': True, 'team': user.team.id })\n return json_response({ 'valid': False })\n return render(request, 'registration/register.html', context=RequestContext(request, { 'form' : form, 'team': request.user.team }))", "def register_form():\n\n return render_template(\"register.html\")", "def register_form():\n\n # A dictionary of language options with it's keys. Key as html option id\n # dict[key] as language options. \n lang_option = {\"en\": \"English\", \"sv\": \"Swedish\", \"zh-CN\": \"Chinese\", \n \"es\": \"Spanish\", \"fr\": \"French\", \"ru\": \"Russian\"}\n\n\n return render_template(\"register.html\", lang_option=lang_option)", "def show_register_form():\n return render_template(\"register-form.html\")", "def registration():\n registration_page = Registration()\n registration_page.registration_main_page()", "def register(email, display_name=None):", "def register_form():\n\n return render_template(\"register-form.html\")", "def validate_registration(registration_code):\n aaa.validate_registration(registration_code)\n return 'Thanks. <a href=\"/login\">Go to login</a>'", "def registration_code(self):\n return self._regcode", "def register_user():\n pass", "def add_user():\n\n return render_template('register-form.html')", "def form_valid(self, form, request):\n data = form.data\n\n # Password hashing\n password = make_password(data.get('password1'))\n\n # Checkbox has value 'on' instead of True\n volunteer = False\n flag = data.get('volunteer')\n if flag is not None and flag != 'false' and flag != 'False':\n volunteer = True\n\n # Break first_name and last_name\n names = data.get('name').strip().split(' ')\n first_name = names[0]\n last_name = ''\n if len(names) > 1:\n last_name = ' '.join(names[1:])\n\n err = self.register(data.get('username'), data.get('email'), data.get(\n 'phone_number'), volunteer, password, first_name, last_name)\n return err", "def register(request):\n register_form = UserCreationForm()\n return render(request, 'metro_app/register.html', {'form': register_form})", "def _register_user(request_form):\n idnr = request_form['idnr']\n\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n\n response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n\n create_user(idnr, request_form['dob'].strftime(\"%d.%m.%Y\"), request_id)", "def test_registration_form(client):\n form_data = {\n \"email\": \"test@user.com\",\n \"password1\": \"abcd1234!\",\n \"password2\": \"abcd1234!\",\n }\n\n form = CustomUserRegistrationForm(data=form_data)\n assert form.is_valid()", "def account_register(request, next_url='dashboard'):\n siteconfig = SiteConfiguration.objects.get_current()\n auth_backends = get_enabled_auth_backends()\n\n if (auth_backends[0].supports_registration and\n siteconfig.get(\"auth_enable_registration\")):\n response = register(request, next_page=reverse(next_url),\n form_class=RegistrationForm)\n\n if request.user.is_authenticated():\n # This will trigger sending an e-mail notification for\n # user registration, if enabled.\n user_registered.send(sender=None, user=request.user)\n\n return response\n\n return HttpResponseRedirect(reverse(\"login\"))", "def mk_custom_field(self):\n msg = \"Do you want to generate a minefield with these options?\"\n self.popup.set_text(msg)\n self.popup.set_title(\"GENERATE FIELD?\")\n self.set_focused_ui(self.popup)\n self.popup.set_enabled(True)", "def create_account(request, post_override=None): # pylint: disable-msg=too-many-statements\r\n js = {'success': False} # pylint: disable-msg=invalid-name\r\n\r\n post_vars = post_override if post_override else request.POST\r\n extra_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})\r\n\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n post_vars = dict(post_vars.items())\r\n post_vars.update({'password': pipeline.make_random_password()})\r\n\r\n # if doing signup for an external authorization, then get email, password, name from the eamap\r\n # don't use the ones from the form, since the user could have hacked those\r\n # unless originally we didn't get a valid email or name from the external auth\r\n DoExternalAuth = 'ExternalAuthMap' in request.session\r\n if DoExternalAuth:\r\n eamap = request.session['ExternalAuthMap']\r\n try:\r\n validate_email(eamap.external_email)\r\n email = eamap.external_email\r\n except ValidationError:\r\n email = post_vars.get('email', '')\r\n if eamap.external_name.strip() == '':\r\n name = post_vars.get('name', '')\r\n else:\r\n name = eamap.external_name\r\n password = eamap.internal_password\r\n post_vars = dict(post_vars.items())\r\n post_vars.update(dict(email=email, name=name, password=password))\r\n log.debug(u'In create_account with external_auth: user = %s, email=%s', name, email)\r\n\r\n # Confirm we have a properly formed request\r\n for a in ['username', 'email', 'password', 'name']:\r\n if a not in post_vars:\r\n js['value'] = _(\"Error (401 {field}). E-mail us.\").format(field=a)\r\n js['field'] = a\r\n return JsonResponse(js, status=400)\r\n\r\n if extra_fields.get('honor_code', 'required') == 'required' and \\\r\n post_vars.get('honor_code', 'false') != u'true':\r\n js['value'] = _(\"To enroll, you must follow the honor code.\").format(field=a)\r\n js['field'] = 'honor_code'\r\n return JsonResponse(js, status=400)\r\n\r\n # Can't have terms of service for certain SHIB users, like at Stanford\r\n tos_required = (\r\n not settings.FEATURES.get(\"AUTH_USE_SHIB\") or\r\n not settings.FEATURES.get(\"SHIB_DISABLE_TOS\") or\r\n not DoExternalAuth or\r\n not eamap.external_domain.startswith(\r\n external_auth.views.SHIBBOLETH_DOMAIN_PREFIX\r\n )\r\n )\r\n\r\n if tos_required:\r\n if post_vars.get('terms_of_service', 'false') != u'true':\r\n js['value'] = _(\"You must accept the terms of service.\").format(field=a)\r\n js['field'] = 'terms_of_service'\r\n return JsonResponse(js, status=400)\r\n\r\n # Confirm appropriate fields are there.\r\n # TODO: Check e-mail format is correct.\r\n # TODO: Confirm e-mail is not from a generic domain (mailinator, etc.)? Not sure if\r\n # this is a good idea\r\n # TODO: Check password is sane\r\n\r\n required_post_vars = ['username', 'email', 'name', 'password']\r\n required_post_vars += [fieldname for fieldname, val in extra_fields.items()\r\n if val == 'required']\r\n if tos_required:\r\n required_post_vars.append('terms_of_service')\r\n\r\n for field_name in required_post_vars:\r\n if field_name in ('gender', 'level_of_education'):\r\n min_length = 1\r\n else:\r\n min_length = 2\r\n\r\n if len(post_vars[field_name]) < min_length:\r\n error_str = {\r\n 'username': _('Username must be minimum of two characters long'),\r\n 'email': _('A properly formatted e-mail is required'),\r\n 'name': _('Your legal name must be a minimum of two characters long'),\r\n 'password': _('A valid password is required'),\r\n 'terms_of_service': _('Accepting Terms of Service is required'),\r\n 'honor_code': _('Agreeing to the Honor Code is required'),\r\n 'level_of_education': _('A level of education is required'),\r\n 'gender': _('Your gender is required'),\r\n 'year_of_birth': _('Your year of birth is required'),\r\n 'mailing_address': _('Your mailing address is required'),\r\n 'goals': _('A description of your goals is required'),\r\n 'city': _('A city is required'),\r\n 'country': _('A country is required')\r\n }\r\n js['value'] = error_str[field_name]\r\n js['field'] = field_name\r\n return JsonResponse(js, status=400)\r\n\r\n max_length = 75\r\n if field_name == 'username':\r\n max_length = 30\r\n\r\n if field_name in ('email', 'username') and len(post_vars[field_name]) > max_length:\r\n error_str = {\r\n 'username': _('Username cannot be more than {0} characters long').format(max_length),\r\n 'email': _('Email cannot be more than {0} characters long').format(max_length)\r\n }\r\n js['value'] = error_str[field_name]\r\n js['field'] = field_name\r\n return JsonResponse(js, status=400)\r\n\r\n try:\r\n validate_email(post_vars['email'])\r\n except ValidationError:\r\n js['value'] = _(\"Valid e-mail is required.\").format(field=a)\r\n js['field'] = 'email'\r\n return JsonResponse(js, status=400)\r\n\r\n try:\r\n validate_slug(post_vars['username'])\r\n except ValidationError:\r\n js['value'] = _(\"Username should only consist of A-Z and 0-9, with no spaces.\").format(field=a)\r\n js['field'] = 'username'\r\n return JsonResponse(js, status=400)\r\n\r\n # enforce password complexity as an optional feature\r\n if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):\r\n try:\r\n password = post_vars['password']\r\n\r\n validate_password_length(password)\r\n validate_password_complexity(password)\r\n validate_password_dictionary(password)\r\n except ValidationError, err:\r\n js['value'] = _('Password: ') + '; '.join(err.messages)\r\n js['field'] = 'password'\r\n return JsonResponse(js, status=400)\r\n\r\n # Ok, looks like everything is legit. Create the account.\r\n try:\r\n with transaction.commit_on_success():\r\n ret = _do_create_account(post_vars)\r\n except AccountValidationError as e:\r\n return JsonResponse({'success': False, 'value': e.message, 'field': e.field}, status=400)\r\n\r\n (user, profile, registration) = ret\r\n\r\n dog_stats_api.increment(\"common.student.account_created\")\r\n create_comments_service_user(user)\r\n\r\n context = {\r\n 'name': post_vars['name'],\r\n 'key': registration.activation_key,\r\n }\r\n\r\n # composes activation email\r\n subject = render_to_string('emails/activation_email_subject.txt', context)\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/activation_email.txt', context)\r\n\r\n # don't send email if we are doing load testing or random user generation for some reason\r\n if not (settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING')):\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n try:\r\n if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):\r\n dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']\r\n message = (\"Activation for %s (%s): %s\\n\" % (user, user.email, profile.name) +\r\n '-' * 80 + '\\n\\n' + message)\r\n send_mail(subject, message, from_address, [dest_addr], fail_silently=False)\r\n else:\r\n user.email_user(subject, message, from_address)\r\n except Exception: # pylint: disable=broad-except\r\n log.warning('Unable to send activation email to user', exc_info=True)\r\n js['value'] = _('Could not send activation e-mail.')\r\n # What is the correct status code to use here? I think it's 500, because\r\n # the problem is on the server's end -- but also, the account was created.\r\n # Seems like the core part of the request was successful.\r\n return JsonResponse(js, status=500)\r\n\r\n # Immediately after a user creates an account, we log them in. They are only\r\n # logged in until they close the browser. They can't log in again until they click\r\n # the activation link from the email.\r\n login_user = authenticate(username=post_vars['username'], password=post_vars['password'])\r\n login(request, login_user)\r\n request.session.set_expiry(0)\r\n\r\n # TODO: there is no error checking here to see that the user actually logged in successfully,\r\n # and is not yet an active user.\r\n if login_user is not None:\r\n AUDIT_LOG.info(u\"Login success on new account creation - {0}\".format(login_user.username))\r\n\r\n if DoExternalAuth:\r\n eamap.user = login_user\r\n eamap.dtsignup = datetime.datetime.now(UTC)\r\n eamap.save()\r\n AUDIT_LOG.info(\"User registered with external_auth %s\", post_vars['username'])\r\n AUDIT_LOG.info('Updated ExternalAuthMap for %s to be %s', post_vars['username'], eamap)\r\n\r\n if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):\r\n log.info('bypassing activation email')\r\n login_user.is_active = True\r\n login_user.save()\r\n AUDIT_LOG.info(u\"Login activated on extauth account - {0} ({1})\".format(login_user.username, login_user.email))\r\n\r\n dog_stats_api.increment(\"common.student.account_created\")\r\n redirect_url = try_change_enrollment(request)\r\n\r\n # Resume the third-party-auth pipeline if necessary.\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n running_pipeline = pipeline.get(request)\r\n redirect_url = pipeline.get_complete_url(running_pipeline['backend'])\r\n\r\n response = JsonResponse({\r\n 'success': True,\r\n 'redirect_url': redirect_url,\r\n })\r\n\r\n # set the login cookie for the edx marketing site\r\n # we want this cookie to be accessed via javascript\r\n # so httponly is set to None\r\n\r\n if request.session.get_expire_at_browser_close():\r\n max_age = None\r\n expires = None\r\n else:\r\n max_age = request.session.get_expiry_age()\r\n expires_time = time.time() + max_age\r\n expires = cookie_date(expires_time)\r\n\r\n response.set_cookie(settings.EDXMKTG_COOKIE_NAME,\r\n 'true', max_age=max_age,\r\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\r\n path='/',\r\n secure=None,\r\n httponly=None)\r\n return response", "def register(request):\n # Redirect succesful logins to `next` if set.\n # Failing that `redirect_url`.\n # Failing that, LOGIN_REDIRECT_URL from settings.py.\n redirect_uri = post_or_get(\n request, 'next', fallback=post_or_get(\n request, 'redirect_url', fallback=settings.LOGIN_REDIRECT_URL))\n redirect_absolute_uri = add_query_params_to_url(\n request.build_absolute_uri(redirect_uri),\n {'auth_user': request.user.get_username()})\n\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n # Fails if the form is not valid\n user = datahub_register_user(form)\n if user is not None and user.is_active:\n django_login(request, user)\n # Append auth_user to redirect_uri so apps like Kibitz can\n # pull the username out of the redirect. This should be\n # removed when Thrift is removed from DataHub.\n redirect_uri = add_query_params_to_url(\n redirect_uri, {'auth_user': request.user.get_username()})\n return HttpResponseRedirect(redirect_uri)\n else:\n form = RegistrationForm()\n\n providers = provider_details()\n context = RequestContext(request, {\n 'request': request,\n 'user': request.user,\n 'form': form,\n 'providers': providers,\n 'next': redirect_uri,\n 'absolute_next': redirect_absolute_uri})\n return render_to_response('register.html', context_instance=context)", "def validate(cls, key):\n\n T = current.T\n\n def register_onvalidation(form):\n\n code = form.vars.get(\"code\")\n\n account = cls.account(key, code)\n if not account:\n form.errors[\"code\"] = T(\"Invalid Registration Code\")\n return\n\n email = form.vars.get(\"email\")\n\n from gluon.validators import ValidationError\n auth = current.auth\n utable = auth.settings.table_user\n dbset = current.db(utable.id != account.id)\n requires = IS_NOT_IN_DB(dbset, \"%s.email\" % utable._tablename)\n try:\n requires.validate(email)\n except ValidationError:\n form.errors[\"email\"] = auth.messages.duplicate_email\n return\n\n onvalidation = current.auth.settings.register_onvalidation\n if onvalidation:\n from gluon.tools import callback\n callback(onvalidation, form, tablename=\"auth_user\")\n\n return register_onvalidation", "def registration(request):\n if request.user.is_authenticated:\n # logged in users can't go to registration page, send them back to challenges page\n messages.error(request, 'You are already a registered user.')\n return redirect(reverse('challenges'))\n\n if request.method == \"POST\":\n registration_form = UserRegistrationFrom(request.POST)\n if registration_form.is_valid():\n registration_form.save()\n # login user automatically\n user = auth.authenticate(username=request.POST['username'],\n password=request.POST['password1'])\n\n if user:\n # first time need to create a profile and set up service_level as Free and add order\n product = user.profile.get_product_level()\n profile = Profile.objects.get(user=user)\n profile.product_level = product\n profile.save()\n\n # create order for free product\n Order.objects.create(\n user=user,\n product=product,\n total=product.price,\n payment_status='payment_collected',\n )\n\n # auto login newly created user\n auth.login(user=user, request=request)\n messages.success(request, \"You have successfully registered.\")\n\n return redirect(reverse('index'))\n else:\n messages.error(request, 'Unable to register your account at this time')\n\n else:\n registration_form = UserRegistrationFrom()\n\n return render(request, 'registration.html', {\"registration_form\": registration_form})", "def _signup(request, eamap, retfun=None):\r\n # save this for use by student.views.create_account\r\n request.session['ExternalAuthMap'] = eamap\r\n\r\n if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP', ''):\r\n # do signin immediately, by calling create_account, instead of asking\r\n # student to fill in form. MIT students already have information filed.\r\n username = eamap.external_email.split('@', 1)[0]\r\n username = username.replace('.', '_')\r\n post_vars = dict(username=username,\r\n honor_code=u'true',\r\n terms_of_service=u'true')\r\n log.info('doing immediate signup for %s, params=%s', username, post_vars)\r\n student.views.create_account(request, post_vars)\r\n # should check return content for successful completion before\r\n if retfun is not None:\r\n return retfun()\r\n else:\r\n return redirect('/')\r\n\r\n # default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly\r\n # but this only affects username, not fullname\r\n username = re.sub(r'\\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE)\r\n\r\n context = {'has_extauth_info': True,\r\n 'show_signup_immediately': True,\r\n 'extauth_domain': eamap.external_domain,\r\n 'extauth_id': eamap.external_id,\r\n 'extauth_email': eamap.external_email,\r\n 'extauth_username': username,\r\n 'extauth_name': eamap.external_name,\r\n 'ask_for_tos': True,\r\n }\r\n\r\n # Some openEdX instances can't have terms of service for shib users, like\r\n # according to Stanford's Office of General Counsel\r\n uses_shibboleth = (settings.FEATURES.get('AUTH_USE_SHIB') and\r\n eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX))\r\n if uses_shibboleth and settings.FEATURES.get('SHIB_DISABLE_TOS'):\r\n context['ask_for_tos'] = False\r\n\r\n # detect if full name is blank and ask for it from user\r\n context['ask_for_fullname'] = eamap.external_name.strip() == ''\r\n\r\n # validate provided mail and if it's not valid ask the user\r\n try:\r\n validate_email(eamap.external_email)\r\n context['ask_for_email'] = False\r\n except ValidationError:\r\n context['ask_for_email'] = True\r\n\r\n log.info('EXTAUTH: Doing signup for %s', eamap.external_id)\r\n\r\n return student.views.register_user(request, extra_context=context)", "def signup(request):\n try:\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n user_form.username = request.POST['email']\n profile_form = UserProfileInfoForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = User()\n user.first_name = request.POST.get('first_name')\n user.last_name = request.POST.get('last_name')\n user.email = request.POST.get('email')\n user.username = request.POST.get('email')\n user.set_password(request.POST.get('password'))\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n profile.save()\n registered = True\n else:\n print(user_form.errors,profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileInfoForm()\n return render(request,'accounts/registration.html',\n {'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered})\n except Exception as e:\n return HttpResponse(e, status=500)", "def register(request):\r\n if request.method == 'POST':\r\n form = bforms.UserCreationForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return HttpResponseRedirect('/accounts/login/')\r\n if request.method == 'GET':\r\n form = bforms.UserCreationForm()\r\n payload = {'form':form}\r\n return render(request, 'registration/create_user.html', payload)", "def validate(self, data):\n user_type = 3\n return validate_register_user(self, data, user_type)", "def signup(self, request, user):\n pass", "def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"email\"].required = True", "def registration_form():\n\n return render_template(\"/registration_form.html\")", "def signup():", "def signup(self, code):\n log.info(\"Confirming user with username : \" + self.__username)\n path = 'user'\n signup_info = {\n 'user_name': self.__username,\n \"verification_code\": code\n }\n signup_url = serverconfig.HOST + path\n\n try:\n log.debug(\"Confirm user request url : \" + signup_url)\n response = requests.post(url=signup_url,\n data=json.dumps(signup_info),\n headers=self.__request_header,\n verify=configmanager.CERT_FILE)\n log.debug(\"Confirm user response : \" + response.text)\n response.raise_for_status()\n except requests.exceptions.SSLError:\n raise SSLError\n except requests.exceptions.ConnectionError:\n raise NetworkError\n except Exception:\n raise Exception(response.text)\n log.info(\"Signup successful.\")\n return True", "def register(request, success_url=None,\r\n form_class=RegistrationForm, profile_callback=None,\r\n template_name='registration/registration_form.html',\r\n extra_context=None):\r\n if request.method == 'POST':\r\n form = form_class(data=request.POST, files=request.FILES)\r\n if form.is_valid():\r\n new_user = form.save(profile_callback=profile_callback)\r\n # success_url needs to be dynamically generated here; setting a\r\n # a default value using reverse() will cause circular-import\r\n # problems with the default URLConf for this application, which\r\n # imports this file.\r\n return HttpResponseRedirect(success_url or reverse('registration_complete'))\r\n else:\r\n form = form_class()\r\n \r\n if extra_context is None:\r\n extra_context = {}\r\n context = RequestContext(request)\r\n for key, value in extra_context.items():\r\n context[key] = callable(value) and value() or value\r\n return render_to_response(template_name,\r\n { 'form': form },\r\n context_instance=context)", "def register(self):\n first_name = self.first_name_entry.get()\n insertion = self.insertion_entry.get()\n last_name = self.last_name_entry.get()\n zip_code = self.zip_entry.get()\n streetnumber = self.streetnumber_entry.get()\n email = self.email_entry.get()\n\n # Validate input\n if self.is_valid(first_name, last_name, zip_code, streetnumber, email):\n d = self.convert(first_name, insertion, last_name, zip_code, streetnumber, email)\n\n check = User(self.ov).register(d['first_name'], d['insertion'], d['last_name'], d['zip_code'], d['streetnumber'], d['email'])\n\n if check:\n user = User(self.ov)\n\n self.frame.pack_forget()\n MainScreen(self.master, user)\n\n return True\n else:\n return False", "def register(request):\n phone_number = request.POST.get('phone_number')\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n password = request.POST.get('password1')\n password_confirm = request.POST.get('password2')\n\n if not (phone_number and first_name and last_name and password and password_confirm):\n res_body = {\n \"error\": \"phone_number or first_name or last_name or password or password_confirm not provided\"\n }\n return JsonResponse(res_body, status=400)\n\n if User.objects.filter(phone_number=phone_number).exists():\n res_body = {\n \"error\": \"A user with this phone_number does exist\"\n }\n return JsonResponse(res_body, status=400)\n\n form = UserCreationForm(data={\n \"phone_number\": phone_number,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"password1\": password,\n \"password2\": password_confirm\n })\n if form.is_valid():\n new_user = form.save()\n login(request, new_user)\n\n res_body = {\n \"success\": \"User {} successfully registered.\".format(new_user.get_full_name())\n }\n return JsonResponse(res_body, status=201)\n else:\n res_body = {\n 'error': 'provided form is invalid'\n }\n return JsonResponse(res_body, status=400)", "def registration(request):\n cart = Cart(request)\n if request.method == \"POST\": # после отправки формы\n regform = UserCreationForm(request.POST)\n if regform.is_valid(): #валидация полей формы\n reg_f = regform.save(commit=False) # не сохраняем автоматически данные формы\n reg_f.is_staff = False # запрещен вход в административный раздел\n reg_f.is_active = True # активный пользователь\n reg_f.is_superuser = False # не является суперпользователем\n reg_f.date_joined = datetime.now() # дата регистрации\n reg_f.last_login = datetime.now() # дата последней авторизации\n\n reg_f.save() # сохраняем изменения после добавления данных\n\n return redirect('login') # переадресация на главную страницу после регистрации\n else:\n regform = UserCreationForm() # создание объекта формы для ввода данных нового пользователя\n\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/registration.html',\n {\n 'regform': regform, # передача формы в шаблон веб-страницы\n 'cart': cart,\n 'year':datetime.now().year,\n }\n)", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)\n # Put the first and last name at the top\n new_order = self.fields.keyOrder[:-2]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n self.fields.keyOrder = new_order", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def register(self, form):\n new_user = self.create_inactive_user(form)\n signals.user_registered.send(\n sender=self.__class__, user=new_user, request=self.request\n )\n return new_user", "def register(request):\n backend = get_registration_backend()\n form_class = backend.get_form_class(request)\n template_name = backend.get_registration_template()\n\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES)\n if form.is_valid():\n new_user = backend.register(request, **form.cleaned_data)\n response = backend.post_registration_redirect(request, new_user)\n #keep the post behaviour exactly the same as django facebook\n\n return response\n else:\n form = form_class()\n\n context = RequestContext(request)\n context['form'] = form\n response = render_to_response(template_name, context_instance=context)\n\n return response", "def create_phone_signup_code(self, phone, password):\r\n code = self.random_code(settings.CODE_LENGTH)\r\n # if phone in [\"+77753721232\", \"+77752470125\", \"+77074443333\", \"+77076799939\"]:\r\n # code = \"4512\"\r\n # else:\r\n # code = \"%0.4d\" % random.randint(0, 9999)\r\n\r\n # mobizonproxy.send_sms(phone, text=u\"{} - Код активации для Pillowz365\".format(code))\r\n activation = Activation(phone=phone,\r\n to_reset=False,\r\n password=make_password(password),\r\n code=code)\r\n activation.save()\r\n return activation", "def attach_custom_user_fields(form_cls, **kwargs):\n new_fields = UserFields.query.filter_by(**kwargs).all()\n for field in new_fields:\n validators = []\n if field.required:\n validators.append(InputRequired())\n\n if field.field_type == \"text\":\n input_field = StringField(\n field.name, description=field.description, validators=validators\n )\n elif field.field_type == \"boolean\":\n input_field = BooleanField(\n field.name, description=field.description, validators=validators\n )\n\n setattr(form_cls, f\"fields[{field.id}]\", input_field)", "def order_submitted(request, order_id):\n profile_details = ['first_name', 'last_name', 'running_club', 'address_line_1', 'address_line_2', 'address_line_3', 'town_or_city', 'county', 'postcode']\n details_to_update = False\n marketing_opted_in = False\n order_id = order_id\n # registration_form = UserRegistrationForm(request.POST or None, initial={'email': request.session['email']})\n profile_details = {\n 'email' : request.session.get('email', None), \n 'running_club' : request.session.get('running_club', None),\n 'first_name' : request.session.get('first_name', None), \n 'last_name' : request.session.get('last_name', None),\n 'address_line_1' : request.session.get('address_line_1', None),\n 'address_line_2' : request.session.get('address_line_2', None), \n 'address_line_3' : request.session.get('address_line_3', None), \n 'town_or_city' : request.session.get('town_or_city', None), \n 'county' : request.session.get('county', None), \n 'postcode' : request.session.get('postcode', None) \n }\n if request.method == \"POST\":\n registration_form = UserRegistrationForm(request.POST)\n if registration_form.is_valid():\n registration_form.save()\n user = auth.authenticate(username=request.POST['email'],\n password=request.POST['password1']) \n if user: \n auth.login(user=user, request=request)\n user.profile.running_club = profile_details['running_club']\n user.first_name = profile_details['first_name']\n user.last_name = profile_details['last_name']\n user.profile.address_line_1 = profile_details['address_line_1']\n user.profile.address_line_2 = profile_details['address_line_2']\n user.profile.address_line_3 = profile_details['address_line_3']\n user.profile.town_or_city = profile_details['town_or_city']\n user.profile.county = profile_details['county']\n user.profile.postcode = profile_details['postcode']\n user.profile.save()\n messages.success(request, \"You have successfully registered!\")\n else:\n messages.error(request, \"Could not register\")\n return redirect(reverse('index'))\n else:\n messages.error(request, \"Unable to register your account at this time\") \n else:\n registration_form = UserRegistrationForm(request.POST or None, initial={'email': request.session['email']}) \n return render(request, 'order_submitted.html', {\n 'details_to_update': details_to_update, \n 'marketing_opted_in': marketing_opted_in, \n 'registration_form': registration_form,\n \"order_id\": order_id,\n })", "def register_page(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n data = {'is_ok': True}\n else:\n data = {'is_ok': False, 'msg': form.errors}\n return JsonResponse(data)\n else:\n form = UserCreationForm()\n return render(request, 'register.html', {'form': form})", "def register(request):\n\n data = request.POST.copy() or None\n\n user_creation_form = auth_forms.UserCreationForm(data)\n if user_creation_form.is_bound:\n if user_creation_form.is_valid():\n user = user_creation_form.save()\n\n user = authenticate(username=user.username, password=user_creation_form.cleaned_data['password1'])\n login(request, user)\n\n return HttpResponseRedirect(reverse('user', args=(user.username,)))\n\n context = {\n 'user_creation_form': user_creation_form,\n }\n req_ctx = RequestContext(request, context)\n\n return render_to_response('register.html', req_ctx)", "def get_register_form_data(cls, pipeline_kwargs):\r\n # Details about the user sent back from the provider.\r\n details = pipeline_kwargs.get('details')\r\n\r\n # Get the username separately to take advantage of the de-duping logic\r\n # built into the pipeline. The provider cannot de-dupe because it can't\r\n # check the state of taken usernames in our system. Note that there is\r\n # technically a data race between the creation of this value and the\r\n # creation of the user object, so it is still possible for users to get\r\n # an error on submit.\r\n suggested_username = pipeline_kwargs.get('username')\r\n\r\n return {\r\n 'email': cls.get_email(details) or '',\r\n 'name': cls.get_name(details) or '',\r\n 'username': suggested_username,\r\n }", "def signup(request):\r\n return {}", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n form = PasswordResetForm({'email': instance.email})\n\n if form.is_valid():\n current_site = get_current_site(request=None)\n request = HttpRequest()\n request.META['HTTP_HOST'] = current_site.domain\n form.save(\n request=request,\n use_https=False,\n from_email=settings.DEFAULT_FROM_EMAIL,\n html_email_template_name='registration/new_user_html_password_reset_email.html')", "def signup_form(request):\n return {'signup_form': UserForm()}", "def register(request):#, success_url=reverse('registrationsuccess')):\n\tif request.method == 'POST':\n\t\tform = RegistrationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew_user = RegistrationProfile.objects.create_inactive_user(username=form.cleaned_data['username'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpassword=form.cleaned_data['password1'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\temail=form.cleaned_data['email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tname=form.cleaned_data['first_name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsurname=form.cleaned_data['last_name'])\n\t\t\treturn HttpResponseRedirect(reverse('registrationsuccess'))\n\telse:\n\t\tform = RegistrationForm()\n\treturn render_to_response(request, 'registration/registration_form.html', {'form': form })", "def should_be_register_form(self) -> None:\n assert self.is_element_present(*LoginPageLocators.REGISTER_FORM), \"Register form is not presented\"", "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your neighwatch Account'\n message = render_to_string('registration/activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})", "def Register(self):\n if not self.firstname.text() or \\\n not self.lastname.text() or not self.password.text():\n self.popUp.setText(\"Some fields empty, please fill them\")\n self.popUp.exec_()\n else:\n val = (\n self.username.text(),\n self.firstname.text(),\n self.lastname.text(),\n self.password.text(),\n \"Y\"\n )\n if DB.register_account(val): # Check if account is in DB\n self.popUp.setText(\"Registered successfully, please log in\")\n self.popUp.exec_()\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()\n else:\n self.popUp.setText(\"Whoops, something went wrong!\\\n Please try again\")\n self.popUp.exec_() # popup error wrong username/password", "def save(self, profile_callback=None):\n\n # First, save the parent form\n new_user = super(BodbRegistrationForm, self).save(profile_callback=profile_callback)\n\n # Update user with first, last names\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Update profile with affiliation\n profile = new_user.get_profile()\n profile.affiliation = self.cleaned_data['affiliation']\n profile.save()\n\n cache.set('%d.profile' % new_user.id, profile)\n\n return new_user", "def register():\n\n return render_template(\"auth/registerHere.html\")", "def register():\n form = RegistrationForm()\n if form.validate_on_submit():\n expert_data = Expert(first_name=form.first_name.data,\n last_name=form.last_name.data,\n username=form.username.data,\n title_id=form.title_id.data.id,\n affiliation_id=form.affiliation_id.data.id,\n discipline=form.discipline.data,\n uni_work=form.uni_work.data,\n country=form.country.data,\n specialization=form.specialization.data,\n personal_descr=form.personal_descr.data,\n permission_mention=form.permission_mention.data.name,\n permission_add_question=form.permission_add_question.data.name,\n email=form.email.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(expert_data)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')", "def register():\n\n if current_user is not None and current_user.is_authenticated():\n return redirect(url_for(\"user.profile\"))\n\n if current_app.config[\"RECAPTCHA_ENABLED\"]:\n from flaskbb.auth.forms import RegisterRecaptchaForm\n form = RegisterRecaptchaForm(request.form)\n else:\n from flaskbb.auth.forms import RegisterForm\n form = RegisterForm(request.form)\n\n if form.validate_on_submit():\n user = form.save()\n login_user(user)\n\n flash((\"Thanks for registering\"), \"success\")\n return redirect(url_for(\"user.profile\", username=current_user.username))\n return render_template(\"auth/register.html\", form=form)", "def registration():\n form = forms.RegistrationForm()\n if form.validate_on_submit():\n models.User.create_user(username=form.username.data,\n email=form.email.data,\n password=form.password.data)\n return redirect(url_for('login'))\n return render_template(\"register.html\", form=form)", "def test_register_information(self):\n form = UserRegistrationForm(\n {\n 'email': 'testing@gmail.com', \n 'username': 'Name', \n 'password1': 'SuperSecretPassword', \n 'password2': 'SuperSecretPassword'\n })\n self.assertTrue(form.is_valid())", "def custom_profile_fields(self, custom_profile_fields):\n\n self._custom_profile_fields = custom_profile_fields", "def register(request):\n context, preferences = _get_context_pref()\n\n if request.method == 'POST':\n form = KursAnmeldungForm(request.POST)\n #self.page_msg(self.request.POST)\n if form.is_valid():\n # Create, but don't save the new instance.\n new_entry = form.save(commit=False)\n\n rnd_hash = crypt.get_new_seed()\n new_entry.verify_hash = rnd_hash\n\n new_entry.log(request, \"created\")\n\n # Save the new instance.\n new_entry.save()\n\n # save many-to-many data\n form.save_m2m()\n\n _send_verify_email(request, preferences, new_entry, rnd_hash, new_entry)\n\n # Save new log entries\n new_entry.save()\n\n new_location = reverse(\"KursAnmeldung-register_done\")\n return HttpResponseRedirect(new_location)\n else:\n form = KursAnmeldungForm()\n\n context[\"form\"] = form\n return context", "def users_register(self):\n content = request.form\n if not USERS_REGISTER_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))), 400\n photo = None\n if 'photo' in request.files:\n photo = Photo.from_bytes(request.files['photo'].stream)\n try:\n self.auth_server.user_register(email=content[\"email\"], fullname=content[\"fullname\"],\n phone_number=content[\"phone_number\"], photo=photo,\n plain_password=content[\"password\"])\n except UserAlreadyRegisteredError:\n self.logger.debug(messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"])\n return messages.ERROR_JSON % messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"], 400\n except InvalidRegisterFieldError as e:\n self.logger.debug(str(e))\n return messages.ERROR_JSON % str(e), 400\n return messages.SUCCESS_JSON, 200", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n # Get the profile, the `save` method above creates a profile for each\n # user because it calls the manager method `create_user`.\n # See: https://github.com/django-userena-ce/django-userena-ce/blob/master/userena/managers.py#L65\n profile = new_user.my_profile\n profile.gender = self.cleaned_data['gender']\n profile.education = self.cleaned_data['education']\n profile.birthday = self.cleaned_data['birthday']\n profile.annual_income = self.cleaned_data['annual_income']\n profile.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def signup(**kwargs):\n\n pass", "def activate_profile(field, code, request):\n try:\n activation = ActivationProfile.objects.get(**{field:code})\n except ActivationProfile.DoesNotExist:\n messages.error(request, _('Activation code expired or not valid!'))\n return False\n if timezone.now() < activation.valid_through:\n activation.user.is_active = True\n activation.user.set_unusable_password()\n activation.user.save()\n if request.user.is_anonymous():\n if field == 'token':\n user = authenticate(username=activation.user.username, token=activation.token)\n elif field == 'sms_key':\n user = authenticate(username=activation.user.username, code=activation.sms_key)\n else:\n user = None\n activation.delete()\n if user:\n login(request, user)\n messages.success(request, _(\"\"\"Profile activated successfully! You should change your password!\"\"\"))\n return True\n else:\n return False\n else:\n messages.success(request, _(\"\"\"You already have an account!\"\"\"))\n return False", "def create_custom_user(sender, instance, signal, created, **kwargs):\n from gpsfun.main.User.models import GPSFunUser\n if created:\n GPSFunUser.objects.create(user=instance)\n instance.gpsfunuser.save()", "def user_register(request):\n DEBUG = False\n form = Form(request, RegistrationSchema)\n #mailer = get_mailer(request)\n\n # create a random string for email verification procedure\n # http://stackoverflow.com/questions/2257441/\n # python-random-string-generation-with-upper-case-letters-and-digits\n N = 6\n randomstring = ''.join(random.choice(string.ascii_uppercase\n + string.digits) for x in range(N))\n #print \" -- the random string: \" + randomstring\n\n URL = \"localhost:6543\"\n # ToDo XXX change this to be more generic\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('form does not validate!')\n if DEBUG: # pragma: no cover\n print \"submitted, but not validated\"\n else: # pragma: NO COVER # just for debugging, RLY\n if DEBUG:\n print \"form.submitted was not seen\"\n pass\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for registration!\n #request.session.flash('form validated!')\n username = unicode(form.data['username'])\n\n message = Message(\n subject=\"C3S: confirm your email address\",\n sender=\"noreply@c-3-s.org\",\n recipients=[form.data['email']],\n body=\"Hello, \" + form.data['surname'] + \", \\n\"\n \"Please confirm your email address by clicking this link: \\n\"\n \"http://\" + URL + \"/user/confirm/\" + randomstring + \"/\"\n + form.data['username'] + \" \\n\"\n \"Thanks!\")\n msg_accountants = Message(\n subject=\"[C3S] new member registration\",\n sender=\"noreply@c-3-s.org\",\n recipients=['christoph@infinipool.com'],\n body=\"Hello \\n\"\n \"A new member has registered with your site: \\n\"\n \"Username: \" + form.data['username'] + \" \\n\"\n \"First name: \" + form.data['surname'] + \" \\n\"\n \"Last name: \" + form.data['lastname'] + \" \\n\"\n \"Email: \" + form.data['email'] + \" \\n\"\n \"Thanks!\")\n\n user = User(\n username=username,\n password=unicode(form.data['password']),\n surname=unicode(form.data['surname']),\n lastname=unicode(form.data['lastname']),\n email=unicode(form.data['email']),\n email_is_confirmed=False,\n email_confirm_code=unicode(randomstring),\n phone=unicode(form.data['phone']),\n fax=unicode(form.data['fax']),\n )\n user.set_address(street=unicode(form.data['street']),\n number=unicode(form.data['number']),\n postcode=unicode(form.data['postcode']),\n city=unicode(form.data['city']),\n country=unicode(form.data['country']),\n )\n\n user_group = Group.get_Users_group()\n user.groups = [user_group]\n\n # dbsession.add(user)\n dbsession.flush(user)\n\n #\n # boto stuff: creating a bucket for that user\n # don't do that -- we better have one bucket for all tracks...\n #\n # from boto.exception import S3CreateError, BotoServerError\n # try:\n # c3sI2Conn.create_named_bucket(username)\n # request.session.flash(u'created bucket for ' + username)\n # except BotoServerError, e:\n # print(\"There was an error: \" + str(e) )\n # except S3CreateError, e:\n # print(\"There was an error: \" + str(e) )\n #\n # send email\n try:\n if DEBUG: # pragma: no cover\n print(\"sending email........\")\n else:\n pass\n #mailer.send(message)\n #mailer.send(msg_accountants)\n\n # instead of sending mails, we inform in-browser\n request.session.flash(\n 'DEBUG: not sending email. to test email confirmation view, '\n 'append this to URL to confirm email: /user/confirm/'\n + randomstring + '/'\n + str(user.username) + '/' + str(form.data['email']))\n except: # pragma: no cover\n print \"could not send email. no mail configured?\"\n\n # remember who this was == sign in user == log her in\n headers = remember(request, username)\n\n redirect_url = route_url('home', request)\n\n return HTTPFound(location=redirect_url, headers=headers)\n\n return {'form': FormRenderer(form), }", "def register():\r\n form = RegisterForm(request.form)\r\n\r\n if request.method == 'POST' and form.validate():\r\n new_user = User(form.email.data, form.password.data)\r\n g.session.add(new_user)\r\n g.session.commit()\r\n\r\n new_profile = Profile(form.first_name.data, form.last_name.data, new_user.id)\r\n g.session.add(new_profile)\r\n g.session.commit()\r\n # TODO: make it async\r\n if current_app.config[\"REQUIRE_EMAIL_CONFIRMATION\"]:\r\n send_confirmation(new_user)\r\n new_user.init_folders()\r\n logout_user()\r\n return redirect(url_for(\".login\"))\r\n return render_template(\"account/register_user.pug\", form=form)", "def register():\n (status, userRecord) = cs411_user.registerUser(\n request.form.get('username', 'USER NAME IS MISSING'),\n request.form.get('password', 'PASSWORD IS MISSING'),\n request.form.get('Email', 'EMAIL IS MISSING'),\n request.form.get('UFirst_Name', 'FIRST NAME IS MISSING'),\n request.form.get('ULast_Name', 'LAST NAME IS MISSING')\n )\n if status is False: raise InvalidUsage(userRecord[\"message\"], 403)\n else: return prepJSON(userRecord)", "def registerDesigner():\n\tif request.method == 'POST':\n\t\tdata = request.form.to_dict(flat=False)\n\t\tstate, result = cloud.designerRegister(data)\n\n\t\tif state == Cloud.error.SUCCESS:\n\t\t\treturn result, status.HTTP_200_OK\n\n\t\telif state == Cloud.error.ERROR:\n\t\t\treturn {'Error': result}, status.HTTP_400_BAD_REQUEST\n\n\t\telif state == Cloud.error.USER_EXISTS:\n\t\t\treturn {'Error':result}, status.HTTP_200_OK\n\t\t\n\t\telse:\n\t\t\treturn result, status.HTTP_400_BAD_REQUEST", "def set_user_register(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_user_register(self, *args, **kwargs)", "def save(self, commit=True):\n\n email_local_part = self.cleaned_data['email'].split('@')[0]\n username_start = email_local_part[:5] if len(email_local_part) >= 5 else email_local_part\n self.instance.username = username_start + ''.join(\n [choice(ascii_letters) for _ in range(30 - len(username_start))])\n\n return super(RegisterForm, self).save(commit=commit)", "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SUPERVISOR\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n UserBusiness.objects.create(uid=user)\n else:\n super().save_model(request, obj, form, change)", "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def post_registration_redirect(self, request, user):\n\t\treturn ('registration_complete', (), {})", "def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n self.fields['first_name'].required = True\n self.fields['password'].widget = forms.PasswordInput() \n\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def on_user_create(self, user):", "def registerPage(request):\n if request.user.is_authenticated:\n return redirect('indexPage')\n form = PersonalUserCreationForm()\n if request.method == 'POST':\n form = PersonalUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(\n request, 'Un nouveau compte vient d\\'être créé pour ' + user\n )\n return redirect('loginPage')\n context.update({'form': form})\n return render(request, 'register.html', context)", "def __init__(self, request=None, *args, **kwargs):\n # self.request = request\n\n super(RegistrationForm, self).__init__(*args, **kwargs)", "def on_signup(self, data):\n self.participant_id = data[\"participant\"][\"id\"]", "def register(self):\r\n if self.fields_not_empty(request, [\"first_name\", \"last_name\", \"age\", \"CPR\", \"email\", \"phone_number\", \"password\", \"confirm_password\"]):\r\n return jsonify({\"error\": \"Some fields are empty\"}), 400\r\n user = self.create_user_object(request)\r\n if request.form.get(\"password\") != request.form.get(\"confirm_password\"):\r\n return jsonify({\"error\": \"Passwords did not match\"}), 400\r\n db.insert_one(user)\r\n return self.start_session(user)", "def register():\n register_form = RegisterForm() # We're only getting stuff from JSON now\n if not register_form.validate():\n return jsonify({\n \"errors\": register_form.errors.items(),\n \"success\": False,\n \"user\": None,\n \"sent_json\": request.json\n })\n\n user = User.create(username=request.json['username'], password=request.json['password'])\n\n g.user = user\n\n return jsonify({\n \"errors\": [],\n \"success\": True,\n \"user\": g.user.username,\n \"sent_json\": request.json\n })", "def register(request, success_url=settings.PREFIX_URL + '/accounts/register/complete/',\n form_class=RegistrationForm,\n template_name='registration/registration_form.html'):\n if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:\n show_dataset_interface = settings.SHOW_DATASET_INTERFACE_OPTIONS\n else:\n show_dataset_interface = False\n\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n new_user = form.save()\n request.session['username'] = new_user.username\n request.session['first_name'] = new_user.first_name\n request.session['last_name'] = new_user.last_name\n request.session['email'] = new_user.email\n groups_of_user = [ g.name.replace('_',' ') for g in new_user.groups.all() ]\n request.session['groups'] = groups_of_user\n\n if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:\n list_of_datasets = request.POST.getlist('dataset[]')\n if '' in list_of_datasets:\n list_of_datasets.remove('')\n\n from django.contrib.auth.models import Group, User\n group_manager = Group.objects.get(name='Dataset_Manager')\n\n motivation = request.POST.get('motivation_for_use', '') # motivation is a required field in the form\n\n # send email to each of the dataset owners\n from django.core.mail import send_mail\n current_site = Site.objects.get_current()\n\n for dataset_name in list_of_datasets:\n # the datasets are selected via a pulldown list, they should exist\n dataset_obj = Dataset.objects.get(name=dataset_name)\n\n # Notify the dataset owners about (accepted) request for access\n owners_of_dataset = dataset_obj.owners.all()\n\n if dataset_obj.is_public:\n\n # Give user access to view the database\n assign_perm('can_view_dataset', new_user, dataset_obj)\n\n for owner in owners_of_dataset:\n\n groups_of_user = owner.groups.all()\n if not group_manager in groups_of_user:\n # this owner can't manage users\n continue\n\n subject = render_to_string('registration/dataset_to_owner_new_user_given_access_subject.txt',\n context={'dataset': dataset_name,\n 'site': current_site})\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n\n message = render_to_string('registration/dataset_to_owner_new_user_given_access.txt',\n context={'dataset': dataset_name,\n 'new_user_username': new_user.username,\n 'new_user_firstname': new_user.first_name,\n 'new_user_lastname': new_user.last_name,\n 'new_user_email': new_user.email,\n 'motivation': motivation,\n 'site': current_site})\n\n # for debug purposes on local machine\n if settings.DEBUG_EMAILS_ON:\n print('owner of dataset: ', owner.username, ' with email: ', owner.email)\n print('message: ', message)\n print('setting: ', settings.DEFAULT_FROM_EMAIL)\n\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [owner.email])\n\n else:\n\n for owner in owners_of_dataset:\n\n groups_of_user = owner.groups.all()\n if not group_manager in groups_of_user:\n # this owner can't manage users\n continue\n\n from django.core.mail import send_mail\n current_site = Site.objects.get_current()\n\n subject = render_to_string('registration/dataset_to_owner_user_requested_access_subject.txt',\n context={'dataset': dataset_name,\n 'site': current_site})\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n\n message = render_to_string('registration/dataset_to_owner_user_requested_access.txt',\n context={'dataset': dataset_name,\n 'new_user_username': new_user.username,\n 'new_user_firstname': new_user.first_name,\n 'new_user_lastname': new_user.last_name,\n 'new_user_email': new_user.email,\n 'motivation': motivation,\n 'site': current_site})\n\n # for debug purposes on local machine\n if settings.DEBUG_EMAILS_ON:\n print('owner of dataset: ', owner.username, ' with email: ', owner.email)\n print('message: ', message)\n print('setting: ', settings.DEFAULT_FROM_EMAIL)\n\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [owner.email])\n\n\n request.session['requested_datasets'] = list_of_datasets\n return HttpResponseRedirect(success_url)\n else:\n # error messages\n messages.add_message(request, messages.ERROR, ('Error processing your request.'))\n # for ff in form.visible_fields():\n # if ff.errors:\n # print('form error in field ', ff.name, ': ', ff.errors)\n # messages.add_message(request, messages.ERROR, ff.errors)\n\n # create a new empty form, this deletes the erroneous fields\n # form = form_class()\n else:\n # this insures that a preselected dataset is available if we got here from Dataset Details\n form = form_class(request=request)\n return render(request,template_name,{ 'form': form,\n 'SHOW_DATASET_INTERFACE_OPTIONS': show_dataset_interface })", "def register():\n\n if request.method == 'POST':\n new_account = Account(fullname = request.form['fullname'],\n email = request.form['email'],\n username = request.form['username'],\n password = request.form['password'])\n \n new_account.save()\n return \"Welcome\"\n else:\n return render_template('register.html')", "def register_user_view(request, account_type):\n if not _is_valid_account_type(account_type):\n raise Http404(\"Page not found.\")\n\n if request.method == \"POST\":\n user_model_form = UserSignupForm(request.POST, prefix=\"user\")\n profile_form = ProfileSignupForm(request.POST, prefix=\"profile\")\n account_type_form = ACCOUNT_TYPE_FORMS[account_type](\n request.POST,\n prefix=(account_type)\n )\n\n if (\n user_model_form.is_valid() and\n profile_form.is_valid() and\n account_type_form.is_valid()\n ):\n\n user = user_model_form.save()\n\n # TODO: Decide whether we would like to continue to parse one form\n # to build user and profile models. If so, recommend removing post-\n # save receiver for user profile in .models.\n\n profile_form = ProfileSignupForm(\n request.POST,\n prefix=\"profile\",\n instance=user.profile\n )\n profile_form.save()\n\n account_type_instance = account_type_form.save(commit=False)\n account_type_instance.profile = user.profile\n account_type_instance.save()\n\n _send_registration_email(request, user, account_type)\n\n return redirect('activate_notification')\n\n elif request.method == \"GET\":\n user_model_form = UserSignupForm(prefix=\"user\")\n profile_form = ProfileSignupForm(prefix=\"profile\")\n account_type_form = ACCOUNT_TYPE_FORMS[account_type](\n prefix=account_type\n )\n\n return render(\n request,\n 'mentorship_profile/register.html',\n {\n 'user_form': user_model_form,\n 'profile_form': profile_form,\n 'account_type_form': account_type_form\n }\n )" ]
[ "0.6838169", "0.66669655", "0.6514536", "0.6208826", "0.6137646", "0.6134475", "0.6013371", "0.6006505", "0.6004355", "0.5999936", "0.5989894", "0.5950262", "0.59496284", "0.5925366", "0.59166074", "0.58668554", "0.58395535", "0.5811587", "0.5776874", "0.5757824", "0.57516545", "0.57429975", "0.5727455", "0.5722573", "0.57154155", "0.5713376", "0.57004833", "0.56962436", "0.56723225", "0.5664976", "0.56482625", "0.5634919", "0.5633861", "0.5630894", "0.56139666", "0.5605681", "0.5596647", "0.55962014", "0.5595959", "0.5590375", "0.558628", "0.55857366", "0.5582862", "0.55775315", "0.55744076", "0.5571812", "0.55676544", "0.55388826", "0.55336434", "0.5530489", "0.5529287", "0.55272394", "0.5514732", "0.5505611", "0.54973066", "0.54880226", "0.54878134", "0.5485113", "0.5479753", "0.5475523", "0.54744387", "0.5464715", "0.54642767", "0.54639924", "0.5460506", "0.5440495", "0.5428641", "0.5423138", "0.5421519", "0.541232", "0.5401995", "0.5398715", "0.5396769", "0.5382721", "0.53825986", "0.5375803", "0.5375277", "0.5372685", "0.537144", "0.5366751", "0.5361332", "0.5358024", "0.5358022", "0.53556746", "0.5354932", "0.53385967", "0.5332329", "0.5321325", "0.53114885", "0.53106177", "0.5302026", "0.5294324", "0.5294237", "0.52914447", "0.5290134", "0.52797645", "0.5276968", "0.5271461", "0.5269611", "0.5262902" ]
0.78658634
0
Custom init to persist the obj parameter to the rest of the form
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) obj = kwargs.get("obj") if obj: self.obj = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)", "def __init__(self, obj, attribs):\n self.obj = obj\n self.attribs = attribs\n if self.obj:\n self._save()", "def __init__(self, obj, field, value):\n self._object = obj\n self._field = field\n self._value = value", "def __init__(self,obj):\n self.nature_libelle = obj['NatureLibelle']\n self.ins_nom = obj['InsNom']\n self.ins_numero_install = obj['InsNumeroInstall']\n self.equipement_id = obj['EquipementId']", "def __init__(field, form, content):", "def __init__(field, form, content):", "def init(self, obj):\n obj_dict = {'name': obj.get_obj_name(),\n 'properties': obj.get_obj_properties(),\n 'actions': []}\n\n self.log_data[obj.get_obj_id()] = obj_dict", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckTools, self)._set_id(kwargs)\n self.name = super(AttckTools, self)._set_attribute(kwargs, 'name')\n self.alias = super(AttckTools, self)._set_attribute(kwargs, 'aliases')\n self.description = super(AttckTools, self)._set_attribute(kwargs, 'description')\n self.reference = super(AttckTools, self)._set_reference(kwargs)\n self.created = super(AttckTools, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckTools, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckTools, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckTools, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckTools, self)._set_wiki(kwargs)\n self.contributor = super(AttckTools, self)._set_attribute(kwargs, 'contributor')", "def __init__(self, **kwargs):\n fields = get_fields(type(self))\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(self, name, value)\n \n # Get the default values\n if kwargs:\n for name, field in fields.items():\n if not field.auto_increment and not name in kwargs:\n default = field.default\n if default is None:\n raise ValueError(\"the field {} of model {} has no \" \\\n \"default value\".format(field.field_name,\n type(self)))\n elif callable(default):\n default = default(self)\n\n object.__setattr__(self, name, default)\n \n # If named parameters were specified, save the object\n if kwargs and Model.data_connector:\n with Model.data_connector.u_lock:\n Model.data_connector.add_object(self)", "def __init__(self, obj, *args, **kwargs):\n self.obj_ = obj\n super(ArtificialRV, self).__init__(*args, **kwargs)", "def __init__(self, handler=None, formdata=None, obj=None, prefix='', **kwargs):\n if handler:\n self._handler = handler\n super(Form, self).__init__(formdata=TornadoInputWrapper(self._handler), obj=obj, prefix=prefix, **kwargs)", "def from_obj(self, obj):\n self.__obj = obj\n self.__obj.swagger_types = self.swagger_types\n self.__obj.swagger_map = self.swagger_map", "def __init__(self, *args, **kwargs):\n\t\t\n\t\tinstance = kwargs.get('instance', None)\n\t\tinitial = kwargs.pop('initial', None)\n\t\tif instance is not None:\n\t\t\tif initial is None:\n\t\t\t\tinitial = {}\n\t\t\t\tinitial['ingredient_name'] = instance.ingredient.name\n\t\t\t\tinitial['unit_name'] = instance.unit.name\n\t\t\tif initial is not None:\n\t\t\t\tkwargs['initial'] = initial\n\t\tsuper(RecipeIngredientForm, self).__init__(*args, **kwargs)", "def __init__(self, **params):\n self.__object = object_param(**params)", "def __init__ (self, *k, **kw):\n self.newobj = True\n self.keyvals = {}\n self.locals = []\n self.reinit ()\n for i in self.locals:\n fieldobj = object.__getattribute__(self, i)\n if kw.has_key (i):\n self.keyvals[i] = kw[i]\n else:\n if fieldobj.required == True:\n if fieldobj.default is not None:\n self.keyvals[i] = fieldobj.default\n else:\n raise Exception (\"Need a default value for %s\" % (i))", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def __init__(self, instance=None):\n self.instance = instance\n\n for name, field in self.hidden_fields.items():\n self.hidden_fields[name] = getattr(self.instance, name)", "def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )", "def full_init_self(self, db, field_name, model):\n if not self.db:\n self.__class__.db = db\n\n self.field_name = field_name\n self.model = model # property", "def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]", "def __init__(self, **kwargs):\n\t\t# unparse input\t\t\n\t\tif 'obj' in kwargs: \n\t\t\tself.obj = kwargs.pop('obj')\n\t\t\t# sanity check\n\t\t\tif 'dir_obj' in kwargs:\n\t\t\t\tif self.obj.dir_obj != kwargs.pop('dir_obj'):\n\t\t\t\t\traise Exception(\"[operator] conflicting dir_obj entered\")\n\t\telse: \n\t\t\tself.obj = obsObj(**kwargs)\n\n\t\tself.ra = self.obj.ra\n\t\tself.dec = self.obj.dec\n\t\tself.dir_obj = self.obj.dir_obj\n\n\t\t# sanity check\n\t\tif self.dir_obj is None:\n\t\t\traise TypeError('dir_obj not specified')", "def __init__(self, *args, **kwargs):\n super(AddEventForm, self).__init__(*args)\n\n if kwargs.get('current_user') is not None:\n self.fields['speakers'].initial = kwargs.get('current_user')\n\n self.fields['speakers'].label_from_instance = self.label_from_instance", "def __init__(self, obj=None, key=None):\n d = _get(self, \"__dict__\")\n d[\"_obj\"] = obj\n d[\"__key__\"] = key", "def __init__(self, obj):\n self.obj = obj\n self._pkcache = {}\n self._idcache = obj.__class__.__instance_cache__\n self._typecache = defaultdict(dict)\n self.init()", "def __init__(__self__, *,\n object_id: Optional[pulumi.Input[str]] = None):\n if object_id is not None:\n pulumi.set(__self__, \"object_id\", object_id)", "def __init__(self, initial_params, save_name=\"model_param.joblib\"):\n super().__init__()\n self.initial_params = initial_params\n self.save_name = save_name", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def __init__(self, dict=None, **kwargs):\n dict = self.validate(dict)\n super().__init__(dict=dict, **kwargs)", "def __init__(self, obj):\n from lxml import objectify\n try:\n self.root = objectify.fromstring(obj.data)\n except:\n # try something else\n self.root = objectify.fromstring(obj)\n self.obj = obj", "def __init__(self):\n self.constant_fields = {}\n self.post_score_renames = {}\n self.form = None\n self.form_field_regex = None\n self.field_count = None\n\n self.set_generic_fields()\n self.set_specific_fields()\n self.set_post_score_renames()", "def new(self, obj):\n pass", "def __init__( self\n , _o_data\n ):\n self.o_data = _o_data", "def __init__(self, obj):\n if isinstance(obj, str):\n # The schema given is some kind of handle which we try to open\n self.data = self._get_schema_content(obj)\n else:\n self.data = obj", "def __init__(self, _dict=None, **kwargs):\n \n if _dict is not None:\n self.__dict__.update(_dict)\n self.__dict__.update(kwargs)", "def set_init_args(self, args_obj):\n if self and self[0][0] == '__init__':\n print(\"Only one __init__ step is allowed\")\n return\n self.insert(0, ('__init__', args_obj))", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def __init__(self, **kwargs):\n assert self.__class__.get_primary() != None\n for field in self.fields()+self.tables():\n value = object.__getattribute__(self, field)\n instance = value.__class__(*value.args, **value.kwargs)\n instance.model = self\n object.__setattr__(self, field, instance)\n \n for key, val in kwargs.iteritems():\n self.__setattr__(key, val)\n \n self._retrieved = False", "def __init__(self, *args, **kwargs):\n\n # Les lignes suivantes permettent de modifier les label d'un champ dans la page\n super(ModelForm, self).__init__(*args, **kwargs)\n self.fields[\"nom_de_l_evenement\"].label = \"Nom de l'évènement\"\n self.fields[\"date_de_l_evenement\"].label = \"Date de l'évènement\" # utiliser plutôt l'attribut label comme pour AbonnementEvenementForm\n self.fields[\"fichier\"].label = \"Photo(s)\"", "def _init(self, store_value):\n self.store_value = store_value", "def __init__(self, opt):\n # init will store opt into the object.\n super().__init__(opt)\n\n # variable is tripped once a model is requested to save.\n self.save_trip = False", "def __init__(self, *args, **kwargs):\n self.store = dict()\n self.update(dict(*args, **kwargs))", "def _construct_form(self, i, **kwargs):\n defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}\n if self.is_bound:\n defaults['data'] = self.data\n defaults['files'] = self.files\n if self.initial:\n try:\n defaults['initial'] = self.initial[i]\n except IndexError:\n pass\n # Allow extra forms to be empty.\n if i >= self.initial_form_count():\n defaults['empty_permitted'] = True\n defaults.update(kwargs)\n form = self.form(self.params[len(self.params) - i - 1][1], self.params[len(self.params) - i - 1][0], i, **defaults) #passando o params[i] para o form[i]\n self.add_fields(form, i)\n return form", "def __init__(self):\n self._params = None", "def __init__(self, **kwargs):\n\t\tself.vars = kwargs\n\t\tself.old_vars = None", "def from_dict(cls, obj):\r\n raise NotImplementedError", "def __init__(self, request=None, instance=None, *args, **kwargs):\n #debug_instance(request.customer)\n if request is None:\n raise ValueError(\"Request must not be None\")\n if instance:\n raise ValueError(\"Pass in 'request' instead of 'instance'\")\n\n # for save()\n self.request = request\n\n if request.customer.is_visitor():\n request.customer = CustomerModel.objects.get_or_create_from_request(request)\n super(SubscribeForm, self).__init__(instance=request.customer, *args, **kwargs)", "def __init__(self, qobj):\n self._qobj = qobj\n self._configuration = None # IMPLEMENT for your backend", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def __init__(self, form):\n\t\tself.form = form\n\t\tself.id = form[0]\n\t\t#last name plus the first initial of first name\n\t\tself.name = (form[11] + '_' + form[12][0]).lower()", "def __init__(self, *args, **kwargs):\n if kwargs:\n self.top_bid = kwargs.pop('top_bid')\n self.min_bid = kwargs.pop('min_bid')\n super(BidForm, self).__init__(*args, **kwargs)", "def __init__(self, data={}):\n self._update_(data)", "def form_valid(self, form):\n label = form.cleaned_data[\"label\"]\n\n if \"objects\" not in self.request.session:\n self.request.session[\"objects\"] = OrderedDict()\n if \"forms\" not in self.request.session:\n self.request.session[\"forms\"] = OrderedDict()\n\n self.request.session[\"objects\"].update({label: form.halomod_obj})\n self.request.session[\"forms\"].update({label: form.data})\n\n return super().form_valid(form)", "def __init__(self, **attributes):\n self.set(**attributes)", "def __init__(self, **kwargs):\n # TODO: see if i can remove keyword args\n super().__init__()\n self._updateData = {}", "def __init__(self, text='', _id=None):\n self._id = _id\n self.text = text\n self.form = TodoForm()", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, adict):\n\n self.__dict__.update(adict)\n\n for k, v in adict.items():\n if isinstance(v, dict):\n self.__dict__[k] = ParamObject(v)", "def __init__(self, **kwargs):\n for name, default in self.schema.items():\n # Try to copy the default value with jsonalize's copy(), else use\n # copy.deepcopy()\n try:\n default_copy = copy(default)\n except (JSONAlizeError, TypeError):\n default_copy = deepcopy(default)\n setattr(self, name, \n unserialize(kwargs.pop(name, default_copy)))\n if kwargs:\n raise NameError(\"unknown parameters passed to constructor: %s\" %\n \", \".join(kwargs.keys()))", "def __init__(self, field_info):\n self.field_info = field_info", "def __init__(self, **kw):\r\n self.__dict__.update(kw)", "def __init__(self, from_dict: dict = None):\n for name, field in self.fields.items():\n setattr(self, name, field.default_value)\n\n if from_dict:\n if not isinstance(from_dict, dict):\n raise RuntimeError(\"Param from_dict must be a dictionary object\")\n for field, value in from_dict.items():\n setattr(self, field, value)", "def __init__(self, **kwargs):\n self.__kwargs = kwargs", "def __init__(self, obj):\n self._store = {}\n self.obj = weakref.proxy(obj)", "def get_form(self, request, obj=None, **kwargs):\n if not obj:\n kwargs['form'] = ASCreationForm\n return super().get_form(request, obj, **kwargs)", "def __init__(self):\r\n \r\n # Instantiate a data access object \r\n # Contains methods to access the database\r\n self.emp_dao = StaffDAO()\r\n\r\n # Instantiate a validation object\r\n # Contains methods to validate input fields\r\n self.validator = Validation()\r\n\r\n # Form fields\r\n # Instantiate stringvars - hold data entered in fields of form\r\n self.staff_id = tk.StringVar()\r\n self.first_name = tk.StringVar()\r\n self.last_name = tk.StringVar()\r\n self.title = tk.StringVar()\r\n self.email = tk.StringVar()\r\n self.sex = tk.StringVar()\r\n self.contact_no = tk.IntVar()\r\n\r\n # List of employee ids - lb for listbox\r\n self.lb_ids = None\r\n\r\n # Messagebox title\r\n self.mb_title_bar = \"Staff CRUD\"\r\n\r\n pass", "def setup_object(obj):\n for key, conf, value in obj.get_config_vars():\n obj[key] = raw_input_default_config(conf, default=value, obj=obj)", "def __init__(self, obj, parent=None):\n # Call the constructor of the parent class\n logger.info('%s initialization' % obj.name)\n super(self.__class__,self).__init__(obj, parent)\n\n logger.info('Component initialized')", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)", "def attach(self, obj):\n self.Object = obj.Object", "def set_form(self, form):\n self.parameters = form", "def populate_obj(self, obj):\n for name, field in iteritems(self._fields):\n if name == 'studies':\n for study_form in self.studies.entries:\n study_form.form.populate_obj(\n obj.studies[study_form.study_id.data])\n else:\n field.populate_obj(obj, name)", "def get_form_kwargs(self):\n self.object = self.get_object()\n kwargs = super().get_form_kwargs()\n return kwargs", "def __init__(self, uid, arbor=None, root=False):\n self.uid = uid\n self.arbor = weakref.proxy(arbor)\n if root:\n self.root = -1\n self.field_data = FieldContainer(arbor)\n else:\n self.root = None", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def get_initial(self):\n obj = self.get_object()\n print(obj)\n if obj is not None:\n initial_data = model_to_dict(obj)\n print(initial_data)\n initial_data.update(model_to_dict(obj))\n print(initial_data)\n return initial_data\n else:\n return super().get_initial()", "def get_initial(self):\n obj = self.get_object()\n print(obj)\n if obj is not None:\n initial_data = model_to_dict(obj)\n print(initial_data)\n initial_data.update(model_to_dict(obj))\n print(initial_data)\n return initial_data\n else:\n return super().get_initial()", "def __init__(self, name, obj=None):\n super().__init__(name)\n if obj != None:\n self.appendPhandle(obj)", "def __init__(self, *args):\n this = _ida_hexrays.new_lvar_saved_infos_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckMalware, self)._set_id(kwargs)\n self.created_by_ref = super(AttckMalware, self)._set_attribute(kwargs, 'created_by_ref')\n self.name = super(AttckMalware, self)._set_attribute(kwargs, 'name')\n self.aliases = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_aliases')\n self.platforms = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_platforms')\n self.labels = super(AttckMalware, self)._set_list_items(kwargs, 'labels')\n self.description = super(AttckMalware, self)._set_attribute(kwargs, 'description')\n self.external_references = super(AttckMalware, self)._set_reference(kwargs)\n self.created = super(AttckMalware, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckMalware, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckMalware, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckMalware, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckMalware, self)._set_wiki(kwargs)\n self.contributor = super(AttckMalware, self)._set_list_items(kwargs, 'x_mitre_contributors')\n self.revoked = super(AttckMalware, self)._set_attribute(kwargs, 'revoked')", "def __init__(self, *args, **kwargs):\n if 'instance' in kwargs:\n initial = kwargs.setdefault('initial', {})\n # The widget for a ModelMultipleChoiceField expects a list of primary key for the selected data.\n initial['members'] = [\n t.pk for t in kwargs['instance'].recipient_set.all()\n ]\n\n forms.ModelForm.__init__(self, *args, **kwargs)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, obj, api_prefix):\n self._obj = obj\n self._api_prefix = api_prefix", "def save(self, obj):", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults.update(\n {\"form\": self.add_form, \"fields\": flatten_fieldsets(self.add_fieldsets)}\n )\n defaults.update(kwargs)\n return super().get_form(request, obj, **defaults)", "def __init__(self, **kwargs):\n self.data_dict = dict()\n self.data_list = dict()\n self.user_id = kwargs[\"user_id\"]", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(AuthForm, self).__init__(*args, **kwargs)", "def __init__ (self, d):\n try:\n self.__dict__.update (d.__dict__)\n except:\n self.__dict__.update (d)", "def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n if hasattr(self, 'work_ids'):\n form.base_fields['works'].initial = self.work_ids\n return form", "def __post_init__(self):\n pass", "def __init__(__self__, *,\n email: str,\n name: str,\n object_id: Optional[str] = None):\n pulumi.set(__self__, \"email\", email)\n pulumi.set(__self__, \"name\", name)\n if object_id is not None:\n pulumi.set(__self__, \"object_id\", object_id)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'business_name': 'Please enter your business name',\n 'user_type': 'Please select the type of user',\n 'phone': 'Phone Number',\n 'postcode': 'Postcode',\n 'city': 'City',\n 'street_address': 'Street Address',\n 'street_address2': 'Street Address 2',\n 'county': 'County',\n 'country': 'Country'\n }\n\n # to force cursor to start in business name field\n self.fields['business_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = False\n self.fields[field].label = placeholder\n self.fields[field].widget.attrs['class'] = 'form-control'", "def __init__(self, *args, **kwargs):\n\n self._caffe = kwargs.pop('caffe')\n\n kwargs.setdefault('label_suffix', '')\n super(ExpenseForm, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Nazwa'\n self.fields['company'].label = 'Firma'\n self.fields['company'].required = False\n self.fields['company'].queryset =\\\n Company.objects.filter(caffe=self._caffe)", "def _init_storage(self):\n if self._ is None:\n self._ = Parameters(self)" ]
[ "0.81270677", "0.7214874", "0.6869673", "0.66981107", "0.66653264", "0.66653264", "0.6648626", "0.65440965", "0.6481102", "0.6445112", "0.6415407", "0.6336561", "0.6326652", "0.63131803", "0.6255758", "0.6211104", "0.6211104", "0.62063265", "0.6182554", "0.613536", "0.6123644", "0.6113366", "0.60831493", "0.6076878", "0.6075449", "0.60648745", "0.6062343", "0.60353625", "0.60159445", "0.5991739", "0.59914494", "0.5970514", "0.59686905", "0.59507304", "0.59279925", "0.59234416", "0.59087884", "0.5896441", "0.5895253", "0.5888329", "0.5881343", "0.5881281", "0.5871344", "0.5866744", "0.5865307", "0.5849548", "0.5846876", "0.58397806", "0.58375907", "0.5834357", "0.58213544", "0.5820893", "0.58140725", "0.5810883", "0.58092517", "0.57865083", "0.57813394", "0.5780546", "0.5779552", "0.5779552", "0.5779552", "0.5777991", "0.5775021", "0.5773753", "0.5770344", "0.57689136", "0.5767329", "0.57628727", "0.5759644", "0.5758276", "0.5756064", "0.57430017", "0.5733189", "0.5732221", "0.573097", "0.57292295", "0.57284", "0.57211417", "0.5716769", "0.5715086", "0.5715086", "0.5705759", "0.57056093", "0.56994164", "0.56954527", "0.5691611", "0.5691611", "0.5691611", "0.5691019", "0.56860256", "0.5682936", "0.5681296", "0.56789714", "0.56729615", "0.5670263", "0.56593055", "0.56579214", "0.56517667", "0.5651316", "0.565011" ]
0.7258114
1
Present the first document in the queue for labeling
def main(): return render_template('doc.html', docid=queue.pop(0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element", "def first(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.root().element().value()", "def first(self):\r\n if self.is_empty():\r\n raise Empty(\"Queue is empty\")\r\n return self._head._element", "def first(self):", "def front(self):\n return self.queue[0] if not self.empty() else None", "def first(self):\n if self.head is None:\n raise Exception(\"nothing in queue\")\n return self.head.value", "def first(self):\n self._current_page = self._get_item(self._current_survey['pages'], 'position', 1)\n question = self._get_item(self._current_page['questions'], 'position', 1)\n self._current_question = self._format_question(question)", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element # front aligned with head of list", "def Front(self):\r\n if (len(self.queue) >= 1):\r\n return self.queue[0]\r\n else:\r\n return -1", "def front(self):\n if self.is_empty():\n raise Exception(\"Queue is empty !!! Please add data to the Queue :) \")\n else:\n return self.data[0]", "def peek(self):\n return self.the_queue[0]", "def get_first(self):\n for u in self.user_order:\n if self.user_skip[u] == 0:\n return self.user_queue[u][0].obj\n return None", "def first(self):\n if self.is_empty():\n raise Empty(\"Queue undeflow.\")\n return self._head._element", "def peek(self):\n return self.queue[0]", "def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._data[self._front]", "def Front(self):\n return -1 if self.isEmpty() else self.queue[self.start]", "def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None", "def first(self):\r\n return self.__head", "def first_label(self):\n if self.labels:\n return self.labels[0]\n else:\n return None", "def getFront(self):\n\t\tfront = self.queue[self.front]\n\t\treturn front\n\t\tpass", "def show_one_document(idx: int):\n\n document = Document(connection=connection, cursor=cursor)\n document_description = document.get_document_by_id(document_id=idx)\n\n task = Task(connection=connection, cursor=cursor)\n all_document_tasks = task.get_task_by_document_id(document_id=idx)\n\n context = {\n 'document_description': document_description,\n 'all_document_tasks': all_document_tasks\n }\n\n return render_template('pages/document.html', **context)", "def __getitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n return self.docs[doc_label]", "def first(self):\n return self.__head", "def curr_queue(self):\n pass", "def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[0]", "def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[0]", "def top(self):\n return self.queue[0]", "def document_view(document_id):\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n queued = QueuedRequest.query.filter(QueuedRequest.document == doc).order_by(\n QueuedRequest.created_at\n ).all()\n queued = sorted(queued, key=lambda x: (x.priority, x.created_at))\n return render_template('admin/documents/view.html', document=doc, queued=queued)", "def first(self):\n if self.is_empty():\n raise Empty(\"Queue is empty.\")\n head = self._tail._next\n return head._element", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def first(self):\n return self.deque[0]", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def get_random_unlabelled_document(self):\n # Return a random unlabelled document or None\n try:\n return self.get_unlabelled_documents_queryset()\\\n .order_by('?')[0]\n except IndexError:\n return None", "def peek(self):\n\n return self._queue[0]", "def getFirstWorker(self):\n return self.entries[0]", "def peek(self):\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.WAITING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record", "def peek(self) -> int: \n if not self.empty(): \n return self.queue[0] \n return None", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def peek(self):\n if not self.empty():\n return self.queue[-1]\n return None", "def Front(self):\n if self.count == 0:\n return -1\n return self.queue[self.headIndex]", "def get_next_batch_start(self):\n return None", "def first(self):\n if self.head:\n self.cursor = self.head\n return self.cursor\n return None", "def first(self):\n\n bq = self.bq.with_criteria(lambda q: q.slice(0, 1))\n return (\n bq.for_session(self.session)\n .params(self._params)\n ._using_post_criteria(self._post_criteria)\n ._iter()\n .first()\n )", "def first(self):\n self._ll_tree.first()", "def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")", "def top(self): # O(1)\n if not self.queue:\n return None\n return self.queue[0]", "def startDocument(self):\n pass", "def startDocument(self):\n pass", "def getfirstmessage(s,refconvdf):\r\n return refconvdf[(refconvdf.convid==s) & (refconvdf.part_type=='initial')].body.iloc[0]", "def front(queue):\n if empty_queue(queue):\n raise IndexError(\"Queue is empty!\")\n else:\n return queue.front.value", "def first(self):\n return _(self._[0])", "def peek(self):\n return self.first", "def next_message(self) -> Optional[MessageQueueItem]:\n if self._message_queue:\n return self._message_queue[0]\n return None", "def getFirst(self):\n if self.first != None:\n return self.first.filename\n else:\n return None", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def first_post(self):\r\n try:\r\n return self.post_set.all()[0]\r\n except IndexError:\r\n return None", "def at_first(self):\n return self._collection.at_first()", "def find_document(self):\n pass", "def first(self):\n try:\n data = self.get_cursor()[0]\n return self.from_(**self.prepare_data(data))\n except IndexError:\n return None", "def top(self):\n try: return self._queue[0]\n except IndexError: return 0", "def first(self):\n return self.head and self.head.value or None", "def find_start_recognition_message(self):\n messages = self.find_messages_by_type(\"StartRecognition\")\n assert len(messages) == 1\n return messages[0]", "def first(self):\n if self.ordered:\n queryset = self\n else:\n self._check_ordering_first_last_queryset_aggregation(method=\"first\")\n queryset = self.order_by(\"pk\")\n for obj in queryset[:1]:\n return obj", "def display_q(self: Qs, label: str = \"\") -> None:\n\n if label:\n print(label)\n\n for i, ket in enumerate(self.qs, start=1):\n print(f\"n={i}\")\n ket.display_q()\n print(\"\")", "def first(self):\n return self.begin and self.begin.value or None", "def get_document(name):\n document = [d for d in documents if d.name == name]\n if len(document) > 0:\n return document[0]", "def Front(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[self.front]", "def test_initialization_with_empty_list_first_node_check():\n queue = Queue([])\n assert queue._queue.first_node is None", "def _get_doc(results, index):\n return results[index]", "def peek_front(self):\n\n if self.items:\n return self.items[0]\n return None", "def Front(self) -> int:\n if self.count == 0:\n return -1\n return self.queue[self.headIndex]", "def fetch_next(self):\n if not self._buffer_size() and self.alive:\n # Return the Future, which resolves to number of docs fetched or 0.\n return self._get_more()\n elif self._buffer_size():\n future = self._framework.get_future(self.get_io_loop())\n future.set_result(True)\n return future\n else:\n # Dead\n future = self._framework.get_future(self.get_io_loop())\n future.set_result(False)\n return future", "def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]", "def peek(self): # total: O(1)\n return self._queue[self._start] #O(1)", "def head(self):\n if self.isquiet():\n raise QueueEmpty()\n\n qcurr = self.base + \".\" + str(self.curr)\n assert os.path.exists(qcurr)\n qt = open(qcurr, \"r\")\n data = qt.read()\n qt.close()\n return data", "def document(self):\n ...", "def __nextTask(self):\n self.activeWindow().nextTask()", "def first(self) -> Task:\n return self._tasks[0]", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def start(self):\n self.events[0].record()\n self.cur = 1", "def dequeue(self):", "def display_document_collection(self):\n display_document_collection_sitem = self.locator_finder_by_xpath(self.display_document_collection_id)\n display_document_collection_sitem.click()\n time.sleep(2)", "def assign_labels_first_left(document, label_encoder):\n for sentence in document.sentences:\n for word in sentence.words:\n sorted_probs = sorted(word.tokens[0].predictions)\n label_idx = np.argmax(sorted_probs[0][1], axis=-1)\n word.predicted_label = label_encoder.inv_label_map[label_idx]", "def peek(self):\n return self.list.head", "def getNextUntitled(self):\n self.nextUntitled += 1\n return str(self.nextUntitled)", "def front(self):\n if self.isEmpty():\n return None\n else:\n return self.__head.getPayload()", "def getFirstDocument(address=\"\", database=\"\", collection=\"\"):\n\n document = []\n client = connectMongo(address, database, collection)\n\n document.append(client.find_one())\n\n return document", "def enqueue(self,e):", "def find_one():\n fmter.tpl._straightline(\"one document\", 100)\n result = users.find_one({})\n print(type(result))\n ppt(result)\n \n fmter.tpl._straightline(\"none result\", 100)\n result = users.find_one({\"_id\": 100})\n print(type(result))\n ppt(result)", "def first_page(self):\n if self._start == 0:\n raise ValueError('Already at the first page.')\n self._start = 0", "def get_label_by_id(document_id):\n document = Documents.query.filter_by(id=document_id).first()\n if document:\n return document.label\n return document", "def label(self, input_doc=None):\n if input_doc == None:\n input_doc = self.stemmed_corpus\n X = self.vect.transform(input_doc)\n new_corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)\n topics = self.ldamodel.get_document_topics(new_corpus)\n max_topic = []\n for tpc in list(topics):\n # get most relevant topic (tuple: 0 = topic, 1 = relevance distribution)\n max_topic.append(max(tpc,key=lambda item:item[1])[0]) \n return max_topic", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item", "def front(self) -> str:\n if not self._fully_loaded:\n self._load()\n return self._front", "def next(self):\n\n\t\t# TODO Check if there's a more efficient way to do this\n\t\tlist = PollQuestion.objects.filter(id__gt = self.id, poll = self.poll)\n\t\tlist = list.order_by('id')\n\n\t\tif len(list) < 1:\n\t\t\treturn None\n\n\t\treturn list[0]" ]
[ "0.59403706", "0.5902959", "0.57828766", "0.5667334", "0.56622183", "0.5638245", "0.56327254", "0.5631978", "0.5631978", "0.56066155", "0.56028295", "0.55972004", "0.55730265", "0.5565737", "0.55282176", "0.55227655", "0.55021554", "0.5459005", "0.545217", "0.54516345", "0.54198897", "0.5417603", "0.54121214", "0.54084414", "0.53787446", "0.5369334", "0.5358391", "0.5358391", "0.53351754", "0.53242934", "0.5320919", "0.5318945", "0.5314695", "0.53092057", "0.5298941", "0.5288811", "0.5263648", "0.5254796", "0.52528495", "0.5239339", "0.5207895", "0.51880264", "0.513222", "0.5124063", "0.5122433", "0.5122337", "0.5101964", "0.50919366", "0.5091236", "0.50864506", "0.5056815", "0.5056815", "0.5048114", "0.5047748", "0.50331324", "0.5032232", "0.50317377", "0.50222725", "0.50109965", "0.49867788", "0.49838227", "0.49799713", "0.49604988", "0.49580982", "0.4955815", "0.49537957", "0.49421775", "0.4942072", "0.49294654", "0.49281028", "0.4922917", "0.4914446", "0.49089068", "0.49057093", "0.48993844", "0.4892159", "0.48920545", "0.48838165", "0.48777542", "0.48722398", "0.4871107", "0.48667324", "0.48603064", "0.485954", "0.48475218", "0.48452654", "0.48398042", "0.48387498", "0.483848", "0.48307705", "0.48241138", "0.48125964", "0.47907048", "0.47888184", "0.47884881", "0.47808865", "0.47808045", "0.47795767", "0.47767594", "0.47747687" ]
0.5553572
14
Present a particular document for labeling
def contract(docid): return render_template('doc.html', docid=docid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XCAFDoc_DocumentTool_DocLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_DocLabel(*args)", "def DocLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_DocLabel(*args)", "def document(self):\n ...", "def doc(docid):\n\t\n\tdata = {'id':docid,\n\t\t\t'text':\"Some made up text for classification\"}\n\treturn _send(data)", "def documento():\r\n\tpass", "def edit_document():", "def show_one_document(idx: int):\n\n document = Document(connection=connection, cursor=cursor)\n document_description = document.get_document_by_id(document_id=idx)\n\n task = Task(connection=connection, cursor=cursor)\n all_document_tasks = task.get_task_by_document_id(document_id=idx)\n\n context = {\n 'document_description': document_description,\n 'all_document_tasks': all_document_tasks\n }\n\n return render_template('pages/document.html', **context)", "def build_document(self, labels_from_json):\n if not len(self.raw_labels):\n self.get_labels(labels_from_json)\n raw_text = self.instance_input_file.read()\n document = self.DOCUMENT_CLASS(self.identifier, title=self.identifier)\n document.build_from_text(raw_text, start_index=0)\n for start_index, end_index in self.raw_labels:\n document.add_label_for_position(\n 'claim', int(start_index), int(end_index))\n return document", "def find_document(self):\n pass", "def show_documentation(self):\n self.docs = documentation.Documentation()", "def document(self) -> str:\n return pulumi.get(self, \"document\")", "def document_detail(application_id, group_tag, document_model):\n return 'doc_detail_%s_%s_%s' % (str(application_id), str(document_model), group_tag)", "def onDocumentation(self):\n path = self.settings.path + 'doc\\\\report.pdf'\n os.startfile(path)", "def __getitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n return self.docs[doc_label]", "def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()", "def examine_document(self, action):\n doc = action[1] # this should have a document ID so we can pull out the correct document text\n screen = DocScreen('doc_title', 'doc_content goes here')\n\n return screen", "def doc(caesar, input):\n name = input.group(1)\n name = name.lower()\n\n if caesar.doc.has_key(name): \n caesar.reply(caesar.doc[name][0])\n if caesar.doc[name][1]: \n caesar.say('e.g. ' + caesar.doc[name][1])", "def docs():", "def get_document_by_name(label, doc_type):\n return Documents.query.filter_by(type=doc_type, label=label).first()", "def build_document(self):\n pass", "def tests_ti_document_add_label(self):\n super().group_add_label()", "def StartDoc(*args, **kwargs):\n return _gdi_.DC_StartDoc(*args, **kwargs)", "def startDocument(self):\n pass", "def startDocument(self):\n pass", "def mark_plot_labels(app, document):\r\n for name, explicit in document.nametypes.iteritems():\r\n if not explicit:\r\n continue\r\n labelid = document.nameids[name]\r\n if labelid is None:\r\n continue\r\n node = document.ids[labelid]\r\n if node.tagname in ('html_only', 'latex_only'):\r\n for n in node:\r\n if n.tagname == 'figure':\r\n sectname = name\r\n for c in n:\r\n if c.tagname == 'caption':\r\n sectname = c.astext()\r\n break\r\n\r\n node['ids'].remove(labelid)\r\n node['names'].remove(name)\r\n n['ids'].append(labelid)\r\n n['names'].append(name)\r\n document.settings.env.labels[name] = \\\r\n document.settings.env.docname, labelid, sectname\r\n break", "def __call__(self, doc):\n return doc", "def doc(obj):\n return Documentation.fromObject(obj).first", "def GetDocument(self, *args, **kwargs):\n pass", "def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")", "def test_create_labelled_document(self):\n model_name = TestSingleLabelClassifierModel.get_name()\n\n document = Document.objects.create()\n self.assertFalse(LabelledDocument.objects.exists())\n\n url = reverse('django_learnit:document-labelling', kwargs={\n 'name': model_name,\n 'pk': document.pk\n })\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n form = response.context['form']\n self.assertIsInstance(form, SingleLabelClassifierForm)\n\n data = form.initial\n data['label'] = '1'\n\n self.client.post(url, data)\n\n labelled_document = LabelledDocument.objects.get_for_document(\n document, model_name)\n\n expected_value = LabelledDocument.serialize_value({\n 'label': '1'\n })\n\n self.assertEqual(labelled_document.value, expected_value)", "def classify_document(classification_file, classification_dict, document):\n\tdocument_dictionary = make_prob_dictionary(classification_file, classification_dict)\n\tdocument = read_doc(document)\n\tdoc_words = document[0]\n\tdoc_length = float(document[1])\n\tdocument = probabilities(doc_words, doc_length, classification_dict)\n\treturn document", "def get_label_by_id(document_id):\n document = Documents.query.filter_by(id=document_id).first()\n if document:\n return document.label\n return document", "def set_label_text(self):\n self.text = 'Pages to read: {}'.format(self.collection.get_required_pages())", "def documentdetail(request, docid):\n\tdocument = get_object_or_404(Document, pk=docid)\n\tif not request.user.has_perm('documents.view_document', obj=document):\n\t\treturn HttpResponse(loader.render_to_string('401.html',\n\t\t\tRequestContext(request, {'error_message':\n\t\t\t\t_(\"You are not allowed to view this document.\")})), status=401)\n\ttry:\n\t\trelated = document.content_type.get_object_for_this_type(id=document.object_id)\n\texcept:\n\t\trelated = ''\n\n\treturn render_to_response(\"documents/docinfo.html\", RequestContext(request, {\n\t\t'permissions_json': json.dumps(_perms_info(document, DOCUMENT_LEV_NAMES)),\n\t\t'document': document,\n\t\t'imgtypes': imgtypes,\n\t\t'related': related\n\t}))", "def detect_document(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n #print('\\nBlock confidence: {}\\n'.format(block.confidence))\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n word_text = ''.join([symbol.text for symbol in word.symbols])\n text.append(word_text.encode('utf-8'))\n #print(word_text)", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def document_details(context, document):\n return {'document': document, 'request': context['request']}", "def view_document(self, database, collection, _id):\n r = self.__get_response(settings.VIW_DOC,\n {\"db\": database, \"col\": collection, \"id\": str(_id)})\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def open_document(self, id, **kw):\r\n\r\n #Getting the document directory path\r\n document_directory_path = request.env['npa.document'].get_document_directory_path()\r\n\r\n #Getting Document Record\r\n document_rec = request.env['npa.document'].search([('id','=',int(id))])\r\n #Checking if the file exists, and then fetching the document.\r\n if os.path.exists(document_rec.file_loc):\r\n with open(document_rec.file_loc, 'rb') as doc_file:\r\n filecontent = doc_file.read()\r\n if not filecontent:\r\n return request.not_found()\r\n else:\r\n if document_rec.file_name[-3:] == 'pdf':\r\n #Return the file and filename to the browser.\r\n return request.make_response(filecontent,\r\n [('Content-Type', 'application/pdf'),\r\n ('Content-Disposition', 'inline')])\r\n else:\r\n return request.make_response(filecontent,\r\n [('Content-Type', 'attachment'),\r\n ('Content-Disposition', 'inline')])\r\n else:\r\n msg = 'File document {0} not found in NFS server. Please check the file or upload again.'.format(document_rec.file_loc)\r\n return request.not_found(msg)", "def show_documents():\n\n document = Document(connection=connection, cursor=cursor)\n\n all_documents = document.get_all_documents()\n\n context = {\n 'all_documents': all_documents\n }\n\n return render_template('pages/tables/documents.html', **context)", "def _DocSim(self,df,a):\r\n #Obtain the descriptions of the two input courses.\r\n textA = df['description'][a]\r\n #Obtain the document embedding vector for each description.\r\n vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)\r\n return vectorA", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def StartDoc(*args, **kwargs):\n return _gdi_.GraphicsContext_StartDoc(*args, **kwargs)", "def document_index():\n\n return render_template('admin/documents/index.html',\n path='/admin/documents' + ('/search' if 'search' in request.args else ''))", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "def setup_document(document_name=\"fSCAD-Preview\"):\n preview_doc = None\n saved_camera = None\n saved_units = None\n for document in app().documents:\n if document.name == document_name:\n preview_doc = document\n break\n if preview_doc is not None:\n preview_doc.activate()\n saved_camera = app().activeViewport.camera\n saved_units = design().fusionUnitsManager.distanceDisplayUnits\n preview_doc.close(False)\n\n preview_doc = app().documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)\n preview_doc.name = document_name\n preview_doc.activate()\n if saved_camera is not None:\n is_smooth_transition_bak = saved_camera.isSmoothTransition\n saved_camera.isSmoothTransition = False\n app().activeViewport.camera = saved_camera\n saved_camera.isSmoothTransition = is_smooth_transition_bak\n app().activeViewport.camera = saved_camera\n if saved_units is not None:\n design().fusionUnitsManager.distanceDisplayUnits = saved_units\n design().designType = adsk.fusion.DesignTypes.DirectDesignType", "def getDocumentId(self): #$NON-NLS-1$\r", "def get_feature_string_by_document(self, _set, document):\n label = self.sets[_set][document]['label']\n line = \"{} \".format(label)\n for word in self.sets[_set][document]['words']:\n line += \"{}:{} \".format(self.dictionary[word]['id'],tfidf.tfidf(word, document, self))\n line += \"\\n\"\n return line", "def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))", "def doc(request, doc_id):\n doc_obj = get_object_or_404(Document, pk=doc_id)\n author_person_objs = doc_obj.author_person.all()\n author_organization_objs = doc_obj.author_organization.all()\n recipient_person_objs = doc_obj.recipient_person.all()\n recipient_organization_objs = doc_obj.recipient_organization.all()\n cced_person_objs = doc_obj.cced_person.all()\n cced_organization_objs = doc_obj.cced_organization.all()\n page_objs = doc_obj.page_set.all()\n obj_dict = {\n 'doc_obj': doc_obj,\n 'author_person_objs': author_person_objs,\n 'author_organization_objs': author_organization_objs,\n 'recipient_person_objs': recipient_person_objs,\n 'recipient_orgaization_objs': recipient_organization_objs,\n 'cced_person_objs': cced_person_objs,\n 'cced_organization_objs': cced_organization_objs,\n 'page_objs': page_objs\n }\n return render(request, 'doc.jinja2', obj_dict)", "def fini_doc(self):\n raise NotImplementedError()", "def main():\n return render_template('doc.html', docid=queue.pop(0))", "def document_view(index_name, doc_type, doc_id):\n resp = es.get(index=index_name, doc_type=doc_type, id=doc_id)\n document = resp[\"_source\"]\n print(document)", "def __contains__(self, doc_label):\n return doc_label in self.docs", "def get_document(self, docid):\n raise NotImplementedError", "def get_doc(self, dtype, identity):\n if dtype == 'pii':\n doc = FullDoc(sd_pii = identity)\n elif dtype == 'doi':\n doc= FullDoc(doi = identity)\n\n if doc.read(ElsClient(self.API_list[0])):\n pass\n else:\n print (\"Read document failed.\")\n\n return doc", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def open_document(filepath, show=True):\n\t\n\tk = krita.Krita.instance()\n\tprint('Debug: opening %s' % filepath)\n\tdoc = k.openDocument(filepath)\n\tif show:\n\t\tApplication.activeWindow().addView(doc)\n\treturn doc", "def tests_ti_document_get_label(self):\n super().group_get_label()", "def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)", "def display_document_collection(self):\n display_document_collection_sitem = self.locator_finder_by_xpath(self.display_document_collection_id)\n display_document_collection_sitem.click()\n time.sleep(2)", "def full_text_doc_url(self):\n base_url = 'https://pic.datamade.us/chicago/document/'\n # base_url = 'http://127.0.0.1:5000/chicago/document/'\n \n if self.documents.filter(document_type='V').all():\n legistar_doc_url = self.documents.filter(document_type='V').first().document.url\n doc_url = '{0}?filename={2}&document_url={1}'.format(base_url, \n legistar_doc_url, \n self.identifier)\n return doc_url\n else:\n return None", "def setDocLabels(self, Y):\n GraphConjugate.setDocLabels(self, Y)\n self.lCluster = self.form_cluster(Y)\n \n self.addClusterToDoc(self.lCluster)\n traceln(\" %d cluster(s) found\" % (len(self.lCluster)))", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def documentation():\n return render_template('help.html')", "def get_labels_docs(self):\n df_train = pd.read_csv(self.train_file, names=['label', 'title', 'doc'])\n df_test = pd.read_csv(self.test_file, names=['label', 'title', 'doc'])\n train_labels = df_train['label'].values\n train_docs = df_train['doc'].values\n test_labels = df_test['label'].values\n test_docs = df_test['doc'].values\n return train_labels, train_docs, test_labels, test_docs", "def XCAFDoc_DocumentTool_ShapesLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ShapesLabel(*args)", "async def create_doc(self, *args, **kwargs):\n pass", "def _create_document(result_dict):\n document = Document(\n name=result_dict['docname'],\n original_id=result_dict['itemid'],\n doctype=result_dict['doctype'],\n language=result_dict['languageisocode'],\n conclusion=result_dict['conclusion'],\n originatingbody=result_dict['originatingbody'],\n application=result_dict['application'],\n )\n return document", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def document(self, outputDir, docFormat=MARKDOWN):\n self.__masterFunctions.document(outputDir, docFormat)", "def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def detect_document(path):\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.Image(content=content)\n\n response = client.document_text_detection(image=image)\n if response.error.message:\n raise Exception('{}'.format(response.error.message))\n return response.full_text_annotation.text", "def dummy(doc):\r\n return doc", "def test_display():\n builder = DocumentBuilder(\"test\")\n t0 = Token(\"foo\", 0)\n t1 = Token(\"bar\", 1)\n t2 = Token(\"baz\", 2)\n s1 = builder.create_sentence([t0, t1, t2])\n m0 = Mention.create(s1, [t0], NAME, MISC)\n m1 = Mention.create(s1, [t0, t1], NAME, ORG)\n m2 = Mention.create(s1, [t1, t2], NAME, MISC)\n m3 = Mention.create(s1, [t0, t1, t2], NAME, MISC)\n builder.add_mentions([m0, m1, m2, m3])\n system_doc = builder.build()\n\n builder = DocumentBuilder(\"test\")\n t0 = Token(\"foo\", 0)\n t1 = Token(\"bar\", 1)\n t2 = Token(\"baz\", 2)\n s1 = builder.create_sentence([t0, t1, t2])\n m0 = Mention.create(s1, [t0], NAME, MISC)\n m1 = Mention.create(s1, [t0, t1], NAME, ORG)\n m2 = Mention.create(s1, [t1, t2], NAME, MISC)\n m3 = Mention.create(s1, [t0, t1, t2], NAME, MISC)\n builder.add_mentions([m0, m1, m2, m3])\n gold_doc = builder.build()\n\n res = score_prf([gold_doc], [system_doc])\n res.print()", "def detect_document(path):\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n print('\\nBlock confidence: {}\\n'.format(block.confidence))\n\n for paragraph in block.paragraphs:\n print('Paragraph confidence: {}'.format(\n paragraph.confidence))\n\n for word in paragraph.words:\n word_text = ''.join([\n symbol.text for symbol in word.symbols\n ])\n print('Word text: {} (confidence: {})'.format(\n word_text, word.confidence))\n\n for symbol in word.symbols:\n print('\\tSymbol: {} (confidence: {})'.format(\n symbol.text, symbol.confidence))\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))", "def update_document(self):\n pass", "def makeDocument(fontPath):\n\n f = Font(fontPath) # Get PageBot Font instance of Variable font.\n \n W = H = PageSize\n\n # Create a new document, default to the defined page size. \n doc = Document(w=W, h=H, originTop=False, title='Text Flow', autoPages=1)\n \n view = doc.getView()\n view.padding = 0 # Aboid showing of crop marks, etc.\n view.showPageCropMarks = True\n view.showPageRegistrationMarks = True\n view.showPageFrame = True\n view.showPagePadding = True\n view.showElementOrigin = False\n view.showElementDimensions = False\n \n # Get list of pages with equal y, then equal x. \n #page = doc[0][0] # Get the single page from te document.\n page = doc.getPage(0) # Get page on pageNumber, first in row (this is only one now).\n page.name = 'Page 1'\n page.padding = PagePadding\n \n fs = newFS(f.info.familyName + ' ' + f.info.styleName, \n style=dict(font=f.name, fontSize=18, textFill=0))\n _, th = textSize(fs)\n title = newTextBox(fs, conditions=[Top2Top(), Fit2Width()],\n parent=page, h=th*1.2)\n \n circle = VariableCircle(f, s=GLYPH_NAME, name='VariableCircleSpeciment',\n parent=page, padding=4, x=100, fontSize=64,\n maxW=W-2*PagePadding, minW=100, showAxisName=True, \n # Conditions make the element move to top-left of the page.\n # And the condition that there should be no overflow, otherwise the text box\n # will try to solve it. \n conditions=[Float2Top(), Fit2Bottom(), Center2Center()],\n # Position of the origin of the element. Just to show where it is.\n # Has no effect on the position conditions. \n yAlign=BOTTOM, xAlign=LEFT, fill=CIRCLE_ELEMENT_FILL, borders=0,\n )\n \n score = doc.solve() # Try to solve all pages.\n if score.fails:\n print score.fails\n\n # To avoid circular dependent conditions, we correct the position of the title\n # on left to that the position of the circle has become.\n title.pl = circle.x - page.pl\n \n return doc # Answer the doc for further doing.", "def show(args, syn):\n \n ent = syn.get(args.id, downloadFile=False)\n syn.printEntity(ent)", "def POSCAR_title(doc):\n com_for=doc['snl']\n formu=com_for['formula']\n return formu", "def doc(self):\n return {'_id': self._id,\n 'text': self.text}", "def hs_document_title(self):\n return self.__unicode__()", "def simulate_response(self, documents):", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def textdisplay(textTitle, analysis):\n try:\n global current_file\n with Database() as database:\n text_owner = database.getTextOwner(textTitle, session['username'])\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + text_owner\n path = app.config['UPLOAD_FOLDER'] + '/objects/' + textTitle + '.txt'\n with open(path, 'rb') as f:\n current_file = pickle.load(f)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts[analysis],\n facts=text_facts,\n keywords=keywords,\n owner=text_owner,\n user=session['username'])\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('profile', username=session['username']))", "def document_status_label(document):\n labels = {\n Document.STATUS_NEW: 'danger',\n Document.STATUS_INREVIEW: 'warning',\n Document.STATUS_INAPPROVAL: 'info',\n Document.STATUS_APPROVED: 'success'\n }\n return ('<span id=\"doc_status_{0}\" class=\"label label-{1}\">{2}'\n '</span>'.format(\n document.pk,\n labels[document.status],\n document.get_status_display()))", "def forDisplay(self, doc):\n out = doc.copy()\n del out['_id']\n out['t'] = out['t'].astimezone(tzlocal()).isoformat()\n if not out['description'].strip():\n out['displayDescription'] = out['href']\n else:\n out['displayDescription'] = out['description']\n\n out['tagWords'] = [{'word': w} for w in out['tag'].split(None)]\n out['domain'] = urllib.parse.urlparse(out['href']).netloc\n out['editLink'] = 'addLink?' + urllib.parse.urlencode(\n [('url', out['href'])])\n out['shareWith'] = [{'label': uri} for uri in doc.get('shareWith', [])]\n return out", "def get_document(self):\n return self.document", "def show_doc_attention(self, x):\n att_layer = self.model.get_layer('doc_attention')\n prev_tensor = att_layer.input\n\n dummy_layer = Lambda(\n lambda x: att_layer._get_attention_weights(x)\n )(prev_tensor)\n\n return Model(self.model.input, dummy_layer).predict(x)", "def document_index_load():\n\n return render_template('components/item-list.html', type='document', headless=True,\n items=loading_list(Document.query))", "def write_doc(self, docname: str, doctree: nodes.document) -> None:\n self.fix_ids(doctree)\n self.add_visible_links(doctree, self.config.epub_show_urls)\n super().write_doc(docname, doctree)", "def get_doc(self):\n return self.p", "def cloud_ai_document(self) -> 'outputs.GoogleCloudDocumentaiV1DocumentResponse':\n return pulumi.get(self, \"cloud_ai_document\")", "def document(cls):\n header = cls.document_header()\n content = cls.document_content()\n index = cls.document_index()\n package = cls.package().name\n filename = inspect.getsourcefile(cls)\n try:\n lines = inspect.getsourcelines(cls)\n except IOError:\n lines = ([], 0)\n line = lines[1]+1\n return DocEntry(\n header, content, index=index,\n package=package, filename=filename, line=line)", "def start(self) -> None:\n\n self.doc = self.doc + r'''\n \\documentclass[\n 10pt, % Main document font size\n a4paper, % Paper type, use 'letterpaper' for US Letter paper\n ]{scrartcl}\n\n \\usepackage{graphicx}\n \\usepackage{epstopdf}\n \\usepackage{float}\n \\usepackage[scale=0.75]{geometry} % Reduce document margins\n \\usepackage{hyperref}\n \\usepackage{longtable}\n\n \\begin{document}\n\n \\title{Automatic Exploratory Data Analysis} % The article title\n\n \\subtitle{Study Case} % Uncomment to display a subtitle\n\n \\author{Jacob} % The article author(s) - author affiliations need to be specified in the AUTHOR AFFILIATIONS block\\\n\n \\maketitle % Print the title/author/date block\n\n \\newpage\n \\tableofcontents % Print the table of contents\n\n \\newpage\n \\listoffigures % Print the list of figures\n\n \\newpage\n \\listoftables % Print the list of tables\n '''", "def _id(self, document):\n pass", "def show_sequence_label(self, event):\n c=self.seqframe\n box = c.bbox(CURRENT)\n x1=box[0]\n y1=box[1]\n x2=box[2]\n y2=box[3]\n items=[]\n #make selection rectangle one pixel larger to include rect and text\n items=c.find_enclosed(x1-1,y1-1,x2+1,y2+1)\n\n import tkFont\n sfont = tkFont.Font (family='Arial', size=12,weight='bold')\n for obj in items:\n c.tag_raise(obj)\n #if item is text, get recog sequence and display\n for name in c.gettags(obj):\n #ignore other tags, just get name of seq\n if name!='current' and name!='comparison_seq':\n obj=c.create_text(x2+3,y1-3,text=name,tags='seqlabel',\n font=sfont,width=120,anchor='nw')\n box = c.bbox(obj)\n rect = c.create_rectangle(box,tag='seqlabel',fill='yellow')\n c.lift(obj)\n\n return", "def documento(self):\n return self.persona.documento" ]
[ "0.6995251", "0.6902461", "0.6584978", "0.63693833", "0.6363506", "0.6272244", "0.6196975", "0.59724045", "0.59635377", "0.58901525", "0.586877", "0.58180165", "0.581586", "0.5797789", "0.57909936", "0.5767639", "0.57195663", "0.5712125", "0.5709976", "0.5680226", "0.5668638", "0.56455296", "0.56369495", "0.56369495", "0.56031656", "0.5575704", "0.55531466", "0.5548407", "0.5546323", "0.55428517", "0.55401295", "0.55035263", "0.5499346", "0.5492828", "0.549235", "0.54906005", "0.546433", "0.5445511", "0.54417264", "0.54378957", "0.5437714", "0.54295194", "0.5414892", "0.5391429", "0.53751457", "0.5366977", "0.536001", "0.53595805", "0.5359105", "0.5355602", "0.5347838", "0.53467363", "0.533907", "0.53238595", "0.53016764", "0.5300404", "0.5286907", "0.5284401", "0.5281017", "0.5278162", "0.5277076", "0.5270035", "0.5267709", "0.5263459", "0.52585185", "0.5257496", "0.5256455", "0.52393425", "0.5226812", "0.5217114", "0.5217114", "0.521328", "0.5209431", "0.5206757", "0.51975375", "0.51891065", "0.5184353", "0.5176421", "0.51622206", "0.51573294", "0.5152415", "0.5146256", "0.5139133", "0.5138558", "0.51358414", "0.5133621", "0.51258934", "0.5121396", "0.5111834", "0.5101294", "0.5098585", "0.509755", "0.50973505", "0.5093127", "0.50921184", "0.50912595", "0.5083419", "0.5083335", "0.5063564", "0.5055299", "0.50528127" ]
0.0
-1
Build a queue of docs to be labeled Exclude those doc_cloud_ids that have already been labeled
def get_queue(filename): build_queue = [q.replace("\n", "") for q in open(filename)] build_queue = [l for l in build_queue\ if not os.path.exists(SETTINGS.XML_LOCATION + l + ".xml")] build_queue = list(set(build_queue)) # dedupe build_queue.sort(key=sort_have_labels) return build_queue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_docs():\n docs = []\n for base_id in range(DOCUMENTS_PER_LEVEL):\n d = jina_pb2.Document()\n d.granularity = 0\n d.adjacency = 0\n d.id = base_id\n docs.append(d)\n iterate_build(d, 0, 2, 0, 2)\n return docs", "def keep_documents(self, idx):\n print('{} documents have been removed'.format(self.data.shape[0] - len(idx)))\n self.documents = [self.documents[i] for i in idx]\n self.labels = self.labels[idx]\n self.data = self.data[idx, :]", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus", "def get_unlabelled_documents_queryset(self):\n queryset = self.get_queryset()\n\n # Retrieve labelled IDs\n labelled_ids = self.get_labelled_documents_queryset()\\\n .values_list('document_id', flat=True)\n\n return queryset.exclude(pk__in=labelled_ids)", "def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n\n stemmer = stem.PorterStemmer()\n\n #Dictionary for saving our tokens and the reference to their postings list\n dictionary = dict()\n #Number of files that will be indexed\n num_files = 1000000\n #1. We have to open the reuters training docs directory and traverse it, opening each doc.\n #List all files in the dir and sort them by numerical order, to have sorted postings lists\n lst = os.listdir(in_dir)\n lst.sort(key=lambda f: int(re.sub(r'\\D', '', f)))\n\n #2. For each file in the dir:\n for filename in lst:\n #Open it\n f = open(in_dir+\"/\"+filename, \"r\")\n #Read it\n text = f.read()\n #Get the sentences in the file\n sentences = nltk.sent_tokenize(text)\n #This \" \" token will be used for NOT queries\n not_postings_list = dictionary.get(\" \", list())\n not_postings_list.append(int(filename))\n dictionary[\" \"] = not_postings_list\n\n for sentence in sentences:\n #For each sentence get the words that compose it\n words = nltk.word_tokenize(sentence)\n\n for word in words:\n \n word = word.lower()\n word = stemmer.stem(word)\n \n\n #For each word check if its already registered in the dictionary\n #If its not, a new postings list is created for that word\n #If its already registered, its postings list is retrieved\n postings_list = dictionary.get(word, list())\n \n #This is to check if the word is not registered and a postings list \n #was just created for it\n if(len(postings_list) == 0):\n #In that case save the postings list in the dictionary\n dictionary[word] = postings_list\n #Then add the file name (id) in which the word appears\n postings_list.append(int(filename))\n\n #If the word was already in the dictionary, we check that the last entry\n #in its posting list is not the same as the filename (id) we are currently checking\n #as we don't want duplicate doc ids in the postings list\n elif(postings_list[len(postings_list)-1] != int(filename)):\n #So if its the first time that it appears in the file we save the filename (id)\n postings_list.append(int(filename))\n\n #This is to limit the number of docs that will be indexed \n num_files -= 1 \n if(num_files <= 0): \n break\n \n #with open('ugly_dictionary.txt', 'w') as fp:\n #json.dump(dictionary, fp)\n #After checking all the words in the files, we have our dictionary with its postings lists\n # But we don't want to save the postings list with the dictionary as they can be quite large\n # Now we will traverse each word (key) in the dictionary, get its postings list and save it in a different file \n \n postings_list_file = open(out_postings, \"wb\") \n for word in dictionary:\n postings_list = dictionary[word]\n #Know the starting position\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n dictionary[word] = postings_list_position\n #Close the postings lists file\n postings_list_file.close() \n #Now open the dictionary file and save it\n \n with open(out_dict, 'wb') as dictionary_file:\n pickle.dump(dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n '''\n with open(out_dict, 'w') as fp:\n json.dump(dictionary, fp)\n '''", "def consume_data(self, data):\n # Get parameters\n logger_manager = data['logger_manager']\n doc_m = data['document_manager']\n message_id = data['message_id']\n documents = data['documents']\n to_remove_queue = data['to_remove_queue']\n duplicates = no_requestInTs = 0\n hash_set = set()\n\n for current_document in documents:\n\n # Mark to removal documents without requestInTs immediately (as of bug in xRoad software ver 6.22.0)\n if current_document['requestInTs'] is None and current_document['securityServerType'] is None:\n to_remove_queue.put(current_document['_id'])\n no_requestInTs += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('no_requestInTs',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is batch duplicated\n current_document_hash = doc_m.calculate_hash(current_document)\n if current_document_hash in hash_set:\n # If yes, mark to removal\n to_remove_queue.put(current_document['_id'])\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('batch_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Check if is database duplicated\n if self.db_m.check_if_hash_exists(current_document_hash):\n # If here, add to batch duplicate cache\n hash_set.add(current_document_hash)\n duplicates += 1\n self.db_m.mark_as_corrected(current_document)\n \"\"\"\n :logger_manager.log_warning('database_duplicated',\n :'_id : ObjectId(\\'' + str(current_document['_id']) + '\\'),\n :messageId : ' + str(current_document['messageId']))\n \"\"\"\n continue\n\n # Mark hash as seen\n hash_set.add(current_document_hash)\n # Find possible matching documents\n matching_documents = self.db_m.find_by_message_id(current_document)\n # Try to match the current document with possible pairs (regular)\n merged_document = doc_m.find_match(current_document, matching_documents)\n matching_type = ''\n\n if merged_document is None:\n # Try to match the current document with orphan-matching\n merged_document = doc_m.find_orphan_match(current_document, matching_documents)\n if merged_document is not None:\n matching_type = 'orphan_pair'\n else:\n matching_type = 'regular_pair'\n\n if merged_document is None:\n matching_type = 'orphan'\n if current_document['securityServerType'] == 'Producer':\n new_document = doc_m.create_json(None, current_document, None, current_document_hash, message_id)\n else:\n if current_document['securityServerType'] != 'Client':\n current_document['securityServerType'] = 'Client'\n new_document = doc_m.create_json(current_document, None, current_document_hash, None, message_id)\n\n new_document = doc_m.apply_calculations(new_document)\n new_document['correctorTime'] = database_manager.get_timestamp()\n new_document['correctorStatus'] = 'processing'\n new_document['matchingType'] = matching_type\n\n # Mark non-xRoad queries as 'done' instantly. No reason to wait matching pair\n if 'client' in new_document and new_document['client'] is not None and 'clientXRoadInstance' in new_document['client'] \\\n and new_document['client']['clientXRoadInstance'] is None:\n new_document['correctorStatus'] = 'done'\n new_document['matchingType'] = 'orphan'\n\n self.db_m.add_to_clean_data(new_document)\n\n else:\n\n if current_document['securityServerType'] == 'Client':\n\n if merged_document['client'] is None:\n merged_document['client'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['clientHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching clients for 1 producer: {1}'.format(self.worker_name, current_document)\n logger_manager.log_warning('corrector_merging', msg)\n\n else:\n\n if merged_document['producer'] is None:\n merged_document['producer'] = current_document\n merged_document = doc_m.apply_calculations(merged_document)\n merged_document['producerHash'] = current_document_hash\n merged_document['correctorTime'] = database_manager.get_timestamp()\n merged_document['correctorStatus'] = 'done'\n merged_document['matchingType'] = matching_type\n self.db_m.update_document_clean_data(merged_document)\n else:\n # This should never-ever happen in >= v0.4.\n msg = '[{0}] 2 matching producers for 1 client: {1}'.format(self.worker_name, current_document)\n logger_manager.log_error('corrector_merging', msg)\n\n self.db_m.mark_as_corrected(current_document)\n\n if no_requestInTs:\n msg = '[{0}] {1} document(s) without requestInTs present'.format(self.worker_name, no_requestInTs)\n logger_manager.log_warning('corrector_no_requestInTs', msg)\n\n return duplicates", "def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''", "def queue_all_texts(self, q, texts, window_size):\n for batch_num, batch in enumerate(self.yield_batches(texts)):\n q.put(batch, block=True)\n before = self._num_docs / self.log_every\n self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)\n if before < (self._num_docs / self.log_every):\n logger.info(\n \"%d batches submitted to accumulate stats from %d documents (%d virtual)\",\n (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)", "def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")", "def prune_terms(docs, min_df=3):\n ###TODO\n final_list = []\n items_dict = defaultdict(lambda:0.0)\n for i in docs:\n for j in i:\n items_dict[j] = items_dict[j] + 1\n \n for i in docs:\n for j in list(i):\n if items_dict[j] < min_df:\n del i[j]\n if len(i) != 0:\n final_list.append(Counter(i))\n return final_list", "def preprocess(self, documents):\n\n # Store the total number of documents\n num_docs = np.float(len(documents))\n\n # A dict storing the frequency of each word across all documents\n total_word_freq = {}\n\n # A dict storing the number of documents that word appears in\n doc_word_freq = {}\n\n # Iterate over all documents\n for doc in documents:\n # Split the string into a list of words\n words = extract_words(doc)\n\n # Update the 'total_word_freq' dict using all words in 'words'\n for w in words:\n ''' YOUR CODE HERE '''\n if w not in total_word_freq.keys():\n total_word_freq[w] = 1\n else:\n total_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # Update the 'doc_word_freq' dict. Remember to only add '1' corresponding to\n # each word in a document. In case a word appears twice in a document, then\n # it should be ignored. We use the set() data structure to achieve this.\n for w in set(words):\n ''' YOUR CODE HERE '''\n if w not in doc_word_freq:\n doc_word_freq[w] = 1\n else:\n doc_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # A set of words with total frequency less than 'self.min_freq'\n remove_words = set()\n\n ''' YOUR CODE HERE '''\n\n # Check frequency of each word and add to 'remove_words'\n for w in total_word_freq.keys():\n if total_word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'total_word_freq' and\n # 'doc_word_freq'.\n for w in remove_words:\n del total_word_freq[w]\n del doc_word_freq[w]\n\n # Create a numpy array to store frequencies from which\n # we can create the 'self.idf' preprocessed numpy array.\n word_freq_tensor = np.zeros(len(doc_word_freq))\n\n # For each word in 'doc_word_freq' dict, update\n # 'self.word_to_idx' and 'self.idx_to_word' and\n # 'word_freq_tensor'.\n i = 0\n for w in doc_word_freq.keys():\n self.word_to_idx[w] = i \n self.idx_to_word[i] = w\n word_freq_tensor[i] = doc_word_freq[w]\n i+=1\n \n #print(word_freq_tensor.shape)\n #print(word_freq_tensor)\n # Calculate 'self.idf' (see hint.pdf for formula)\n self.idf = -1*np.log(word_freq_tensor/(len(documents)))\n ''' END YOUR CODE HERE '''", "def build_corpus(self):\n # #############################\n\n doc = metapy.index.Document()\n tok = metapy.analyzers.ICUTokenizer(suppress_tags=True)\n tok = metapy.analyzers.LowercaseFilter(tok)\n tok = metapy.analyzers.LengthFilter(tok, min=3, max=1000)\n tok = metapy.analyzers.Porter2Filter(tok)\n tok = metapy.analyzers.ListFilter(tok, \"lemur-stopwords.txt\", metapy.analyzers.ListFilter.Type.Reject)\n collection = -1\n\n with open(self.documents_path) as file:\n for num, line in enumerate(file):\n l = line.strip()\n c = int(l[0])\n l = l[2:]\n doc.content(l)\n tok.set_content(doc.content())\n if c != collection:\n self.documents.append([])\n collection = c\n self.documents[c].append([token for token in tok])\n self.number_of_collections = len(self.documents)\n self.number_of_documents = len(self.documents[0])\n #print(self.number_of_collections)\n #print(self.number_of_documents)\n #print(self.documents[0])", "def __init__(self, docs, freq_threshold= 2):\n BaseDoc2Vec.__init__(self) # initialize variables\n self.stopwords += self.additional_stopwords\n self.docs = []\n self.words = set()\n\n for doc in docs: # go through documents to record all words\n words = set()\n for word in self.cut_words(doc):\n self.words.add(word)\n words.add(word)\n self.docs.append(list(words))\n self.words = list(self.words)\n\n self.dfdict = dict([(wrd, 0) for wrd in self.words])\n for doc in self.docs: \n for word in doc:\n self.dfdict[word] += 1 # calculate word frequency\n \n # exclude words that appear less than threshold\n self.words = [word for word in self.words if self.dfdict[word] > freq_threshold]\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')", "def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()", "def __init__(self, files, folder, storage_method=\"raw\", force_shorten=True,\n data_dir=\"./data_og_consecutive\", tokenizer_path=\"./\"):\n Doc.__init__(self, storage_method, force_shorten, data_dir, tokenizer_path)\n self.all_docs = []\n\n for f in tqdm(files):\n doc = {}\n with open(os.path.join(folder, f)) as fp:\n tos = json.load(fp)\n for section in tos:\n # Transform dict into X/y sample\n text = section[\"Text\"]\n label = section[\"Section\"]\n doc = self.add_to_section(text, label, doc)\n\n self.all_docs.append(doc)", "def generate_queue(self,pool):\n\t\tqueue = []\n\t\tfor ele in self.elements:\n\t\t\tif ele.pool == pool and ele.status == 'pending':\n\t\t\t\tele.abs_path = \"/%s/%s/%s/%s\" % (\n\t\t\t\t\tself.base_dir,\n\t\t\t\t\tself.parent_dir,\n\t\t\t\t\tself.project,\n\t\t\t\t\tele.filename\n\t\t\t\t\t)\n\t\t\t\tqueue.append(ele)\n\t\treturn queue", "def merge_docs(self):", "def filter_already_queued(queue, targets):\n\n current_targets = {x[0]: 0 for x in windowed_query(db.session.query(Target.target).filter(Target.queue == queue), Target.id)}\n targets = [tgt for tgt in targets if tgt not in current_targets]\n return targets", "def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc", "def excludeObsolete(self) -> 'ElementsRequestBuilder':\n ...", "def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def _createDocObjects(DocIDs):\n return [docObject(docId) for docId in DocIDs]", "def prune(mapq=30):\n\n mkdir(PRUNE_DIR)\n\n #\n # samtools filters:\n # -f 3: keep properly paired and mapped reads\n # -F 4: filter out unmapped reads\n # -F 8: filter out unmapped mates\n # -F 256: filter out secondary reads\n # -F 1024: filter out duplicates marked by Picard above\n # -F 2048: filter out supplementary reads\n #\n\n template = \"\"\"samtools view -b -h -F 4 -F 256 -F 1024 -F 2048 -q {mapq} {input_bam} {autosomes} > {output_bam}; samtools index {output_bam}\"\"\"\n\n printp(\"\"\"\\n# drmr:label prune\\n\"\"\")\n printp(\"\"\"# drmr:job nodes=1 processors=1 memory=4g time_limit=4h working_directory={}\"\"\".format(PRUNE_DIR))\n printp(\"\"\"\\n#\\n# prune the BAM files with marked duplicates down to properly paired\"\"\")\n printp(\"\"\"# and mapped primary autosomal alignments of good quality, for peak calling\\n#\\n\"\"\")\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_bam = get_md_bam(sample, control = False) if x == 'treatment' else get_md_bam(sample, control = True)\n output_bam = get_pruned_bam(sample, control = False) if x == 'treatment' else get_pruned_bam(sample, control = True)\n autosomes = ' '.join(AUTOSOMAL_REFERENCES[get_genome(sample)])\n printp(template.format(**locals()), timed=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def preprocess(docs, nlp, min_length, min_counts, max_counts):\n\n def clean_and_tokenize(doc):\n text = ' '.join(doc.split()) # remove excessive spaces\n text = nlp(text, tag=True, parse=False, entity=False)\n #return [t.lemma_ for t in text if t.is_alpha and len(t) > 2 and not t.is_stop]\n return [t.lower_ for t in text if t.is_alpha and len(t) > 2 and not t.is_stop] # remove .lemma and add lower case operation\n\n tokenized_docs = [(i, clean_and_tokenize(doc)) for i, doc in tqdm(docs)]\n\n # remove short documents\n n_short_docs = sum(1 for i, doc in tokenized_docs if len(doc) < min_length)\n tokenized_docs = [(i, doc) for i, doc in tokenized_docs if len(doc) >= min_length]\n print('number of removed short documents:', n_short_docs)\n\n # remove some tokens\n counts = _count_unique_tokens(tokenized_docs)\n tokenized_docs = _remove_tokens(tokenized_docs, counts, min_counts, max_counts)\n n_short_docs = sum(1 for i, doc in tokenized_docs if len(doc) < min_length)\n tokenized_docs = [(i, doc) for i, doc in tokenized_docs if len(doc) >= min_length]\n print('number of additionally removed short documents:', n_short_docs)\n\n counts = _count_unique_tokens(tokenized_docs)\n encoder, decoder, word_counts = _create_token_encoder(counts)\n\n print('\\nminimum word count number:', word_counts[-1])\n print('this number can be less than MIN_COUNTS because of document removal')\n\n encoded_docs = _encode(tokenized_docs, encoder) # all the doc is encoded as indexes instead of words using ix2word\n return encoded_docs, decoder, word_counts", "def fill_batch_queue(self):\n\t\twhile True:\n\t\t\tif self._hps.mode.value != 'decode':\n\t\t\t\t# Get bucketing_cache_size-many batches of Examples into a list, then sort\n\t\t\t\tinputs = []\n\t\t\t\tfor _ in xrange(self._hps.batch_size.value * self._bucketing_cache_size):\n\t\t\t\t\tinputs.append(self._example_queue.get())\n\t\t\t\tinputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence\n\n\t\t\t\t# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.\n\t\t\t\tbatches = []\n\t\t\t\tfor i in xrange(0, len(inputs), self._hps.batch_size.value):\n\t\t\t\t\tbatches.append(inputs[i:i + self._hps.batch_size.value])\n\t\t\t\t\n\t\t\t\tfor b in batches: # each b is a list of Example objects\n\t\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))\n\n\t\t\telse: # beam search decode mode\n\t\t\t\tex = self._example_queue.get()\n\t\t\t\tb = [ex for _ in xrange(self._hps.batch_size.value)]\n\t\t\t\tself._batch_queue.put(Batch(b, self._hps, self._vocab))", "def __init__(self, num_topics, corpus, stop_words, alpha=None, eta=0.1, max_iteration=10):\n if alpha:\n self.alpha = alpha\n else:\n self.alpha = float(50 / num_topics)\n self.eta = eta\n self.K = num_topics\n self._corpus = corpus\n self.max_iteration = max_iteration\n self.word2id = {}\n self.id2word = {}\n self.document = []\n index = 0\n for doc in corpus:\n word_count = {}\n temp_doc = []\n for word in doc:\n word = word.lower()\n if word not in stop_words and len(word) > 1 and not re.search(r'[0-9]', word):\n temp_doc.append(word)\n if word not in self.word2id.keys():\n self.word2id[word] = index\n self.id2word[index] = word\n index += 1\n if word in word_count.keys():\n word_count[word] += 1\n else:\n word_count[word] = 1\n self.document.append(temp_doc)\n # number of docs\n self.M = len(self._corpus)\n # number of words\n self.N = len(self.word2id)\n self.doc_topic_matrix = np.zeros([self.M, self.K], dtype=np.int8)\n self.topic_word_matrix = np.zeros([self.K, self.N], dtype=np.int8)\n self.topic_matrix = np.zeros(self.K, dtype=np.int8)\n self.current_word_topic_matrix = []", "def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def __post_init__(self) -> None:\n self.gtex += [None]\n self.bm += [None]\n self._q: queue.Queue = queue.Queue(maxsize=self.maxsize)", "def _docMapping(self):\n doc2quests = defaultdict(list)\n for q, d in self.quest2doc.items():\n doc2quests[d].append(q)\n return doc2quests", "def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")", "def build(corpus: List[List[str]], size=5000, freq_cutoff=5):\n vocab = VocabEntry()\n word2freq = Counter(chain(*corpus))\n word2freq = {word: freq for word, freq in word2freq.items() if freq > freq_cutoff}\n words_selected = sorted(word2freq.keys(), key=lambda w: word2freq[w], reverse=True)[:size]\n for w in words_selected:\n vocab.add(w)\n print(\"vocabulary constructing completed, %d/%d words included......\" % (len(words_selected), len(word2freq)))\n return vocab", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def __init__(self):\n self.queues=[]", "def structure_PBDMS_annotations(documents, kb_data):\n \n doc_annotations = list()\n partial_func = partial(parse_PBDMS_doc, kb_data)\n \n with multiprocessing.Pool(processes=10) as pool:\n doc_annotations = pool.map(partial_func, documents)\n \n return doc_annotations", "def fill_example_queue(self):\n\t\tinput_gen = self.text_generator(data.example_generator(self._data, self._single_pass,self._device_id, data_as_tf_example=self._data_as_tf_example))\n\t\tcount = 0\n\t\tquery = None\n\t\tword_edge_list = None\n\t\tquery_edge_list = None\n\t\tif self._data_as_tf_example:\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\t article, abstract, word_edge_list, query, query_edge_list, epoch_num = input_gen.next() # read the next example from file. article and abstract are both strings.\n\t\t\t\t\t #tf.logging.info(random.randint(1,101))\n\t\t\t\texcept StopIteration: # if there are no more examples:\n\t\t\t\t\ttf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n\t\t\t\t\tif self._single_pass:\n\t\t\t\t\t\ttf.logging.info(\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n\t\t\t\t\t\tself._finished_reading = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\t\t\t\tabstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n\t\t\t\texample = Example(article, abstract_sentences, self._vocab, self._hps, word_edge_list=word_edge_list, query=query, query_edge_list=query_edge_list, epoch_num=epoch_num, bert_vocab=self.bert_vocab)\n\t\t\t\tself._example_queue.put(example)\n\t\telse:\n\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tcurr_data = input_gen.next()\n\t\t\t\t\tcount = count + 1\n\t\t\t\t\tarticle = curr_data['article']\n\t\t\t\t\tabstract = curr_data['abstract'].strip()\n\t\t\t\t\tif self._hps.word_gcn.value:\n\t\t\t\t\t\tword_edge_list = curr_data['word_edge_list']\n\t\t\t\t\tif self._hps.query_encoder.value:\n\t\t\t\t\t\tquery = curr_data['query']\n\t\t\t\t\tif self._hps.query_gcn.value:\n\t\t\t\t\t\tquery_edge_list = curr_data['query_edge_list']\n\t\t\t\texcept Exception as e: # if there are no more examples:\n\t\t\t\t\ttf.logging.info(\"The example generator for this example queue filling thread has exhausted data.\")\n\t\t\t\t\tif self._single_pass:\n\t\t\t\t\t\ttf.logging.info(\n\t\t\t\t\t\t\t\"single_pass mode is on, so we've finished reading dataset. This thread is stopping.\")\n\t\t\t\t\t\tself._finished_reading = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\ttf.logging.info(e)\n\t\t\t\t\t\traise Exception(\"single_pass mode is off but the example generator is out of data; error.\")\n\n\t\t\t\tabstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.\n\t\t\t\texample = Example(article, abstract_sentences, self._vocab, self._hps, word_edge_list=word_edge_list, query=query, query_edge_list=query_edge_list, epoch_num=epoch_num)\n\t\t\t\tself._example_queue.put(example) # place the Example in the example queue.", "def clean_docs(self, docs):\n cleaned = [self.cleaning(doc) for doc in docs]\n print(cleaned[0])\n return cleaned", "def get_labelled_documents_queryset(self):\n from ..models import LabelledDocument\n\n return LabelledDocument.objects\\\n .filter(model_name=self.get_name())", "def filter(self, results):\r\n \r\n docs = self.docs & results.docs\r\n self.scored_list = [docnum for docnum in self.scored_list if docnum in docs]\r\n self.docs = docs", "def __init__(self, docs, dict_path= 'wordindex.npy'):\n super(NNModel, self).__init__()\n self.stopwords += self.additional_stopwords\n self.words = set(['OOB', 'UNK']) # OOB for out of boundary, UNK for unknown words\n self.docs = []\n\n for doc in docs:\n datum = []\n for word in self.cut_words(doc):\n self.words.add(word)\n datum.append(word)\n self.docs.append(datum)\n\n self.words = list(self.words)\n self.word2idx = dict([(self.words[i], i) for i in range(len(self.words))])\n logging.info(f'{len(docs)} articles loaded, with word bag length: {len(self.words)}')\n if dict_path != '': # save dict\n np.save(DATA_DIR + dict_path, self.word2idx)", "def ClearBatchQueue(self):\n\t\tself.batch_queue = gdata.contacts.data.ContactsFeed()", "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def get_all_candidates_in_queue(self):\n all_queued_ids = [t.gaid for t in self.c.select(queued=1)]\n all_relaxed_ids = [t.gaid for t in self.c.select(relaxed=1)]\n\n in_queue = [qid for qid in all_queued_ids\n if qid not in all_relaxed_ids]\n return in_queue", "def index_missing_objects(app_label, model_name, document_class, index_generation_time):\n model = apps.get_model(app_label, model_name)\n document = _get_document(model=model, document_class=document_class)\n query_string = \"{}__lte\".format(document.modified_model_field)\n queryset = (\n document().get_queryset().exclude(**{query_string: index_generation_time})\n )\n document().update(queryset.iterator())\n\n log.info(\n \"Indexed missing objects from model.\",\n count=queryset.count(),\n model=model.__name__,\n )\n\n # TODO: Figure out how to remove the objects from ES index that has been deleted", "def fetch_build_queue(self, planet=None):\n print(\"Not implemented yet!\")", "def clearQueue(targets):", "def __init__(self, docs, K):\n\n\t\tself.K = K\n\t\tself.D = len(docs)\n\t\tself.docs = docs\n\n\t\tdoc_list = list(itertools.chain(*docs))\n\t\tself.token_key = {}\n\t\tfor i,v in enumerate(set(doc_list)): self.token_key[v] = i\n\t\tself.V = len(self.token_key)\n\n\t\tself.tokens = np.array([self.token_key[t] for t in doc_list], dtype = np.int)\n\t\tself.N = self.tokens.shape[0]\n\t\tself.topic_seed = np.random.random_integers(0,K-1,self.N)\n\n\t\tself.docid = [[i]*len(d) for i,d in enumerate(docs)]\n\t\tself.docid = np.array(list(itertools.chain(*self.docid)), dtype = np.int)\n\n\t\tself.alpha = 50/self.K\n\t\tself.beta = 200/self.V", "def __init__(self, messages):\n self.pq = PriorityQueue()\n self.cover = []\n self.words_in_cover = set()\n\n # add message dictionary and process all messages (add to priority queue)\n self.message_corpus = messages\n # TODO: process messages prior to ingestion\n for msg_id in self.message_corpus.iterkeys():\n self.add_entry(msg_id)", "def generate_queue(train_idx, mode, mode2):\n user = train_idx.keys()\n train_queue = deque()\n if mode == 'random':\n initial_queue = {}\n for u in user:\n if mode2 == 'train':\n initial_queue[u] = deque(train_idx[u][1:])\n else:\n initial_queue[u] = deque(train_idx[u])\n queue_left = 1\n while queue_left > 0:\n np.random.shuffle(user)\n for j, u in enumerate(user):\n if len(initial_queue[u]) > 0:\n train_queue.append((u, initial_queue[u].popleft()))\n if j >= int(0.01 * len(user)):\n break\n queue_left = sum([1 for x in initial_queue if len(initial_queue[x]) > 0])\n elif mode == 'normal':\n for u in user:\n for i in train_idx[u]:\n train_queue.append((u, i))\n return train_queue", "def start_index(self, stem):\n with open(\n self.posting_and_dictionary_path + \"/docsStem\" if stem else self.posting_and_dictionary_path + \"/docs.txt\",\n \"w+\") as out:\n out.write(\"Number City NumOfUniqeTerms maxTf Date\\n\")\n out.close()\n\n stop_words = {}\n try:\n with open(self.corpus_path + \"/stop_words.txt\", \"r\") as sw:\n lines = sw.readlines()\n for line in lines:\n stop_words[line[:len(line) - 1]] = \"\"\n sw.close()\n\n except Exception:\n raise FileNotFoundError(\"the file stop_words.txt didn't found\")\n\n files_number = len(\n [word for word in os.listdir(self.corpus_path) if os.path.isdir(self.corpus_path + \"/\" + word)])\n s = files_number / 46\n tasks = []\n i = 0\n while i < int(s):\n index_element = IndexElement(i, self.corpus_path, self.posting_and_dictionary_path, stem, 46, stop_words)\n tasks.append(index_element)\n i += 1\n if files_number % 46 > 0:\n tasks.append(IndexElement(i, self.corpus_path, self.posting_and_dictionary_path, stem, files_number % 46,\n stop_words))\n starttime = time.time()\n pool = Pool(processes=(multiprocessing.cpu_count()))\n pool.map(self.index, tasks)\n print(time.time() - starttime)\n self.start_merge(stem)", "def _generate_batch_para(doc_ids, word_ids, batch_size, num_skips, window_size):\n data_index = 0\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * window_size\n labels = np.ndarray(shape=(batch_size), dtype=np.int32)\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n span = 2 * window_size + 1\n buffer = collections.deque(maxlen=span)\n buffer_para = collections.deque(maxlen=span)\n\n i = 0\n while data_index < len(word_ids):\n if len(buffer) == span and len(set(buffer_para)) == 1:\n target = window_size\n targets_to_avoid = [window_size]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n labels[i + j] = buffer[target]\n batch[i + j] = buffer[window_size]\n i += num_skips\n buffer.append(word_ids[data_index])\n buffer_para.append(doc_ids[data_index])\n data_index = (data_index + 1) % len(word_ids)\n if i == batch_size:\n yield batch, labels[:, None]\n i = 0\n labels = np.ndarray(shape=(batch_size), dtype=np.int32)\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)", "def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)", "def parse_documents():\n\n\tcount_before = control.find().count()\n\n\tprint \"There are currently %i unprocessed records.\" % count_before\n\n\t#dispatch\n\t# executor = concurrent.futures.ThreadPoolExecutor(10)\n\t# futures = [executor.submit(analyze_message, document) for document in control.find()]\n\t# concurrent.futures.wait(futures)\n\n\tfor document in control.find():\n\t\tanalyze_message(document)\n\n\tcount_after = control.count()\n\tprint \"There are now %i stored records.\" % control.count()", "def clean_all_documents(cls):\n for index, text in enumerate(cls.documents):\n text_processed = cls.clean_document(text)\n cls.processed_documents.append(text_processed)", "def init_email_queue():\n g.setdefault('email_queue', [])", "def _build_task_queue(self, dt: datetime.datetime, scheduled_tasks: List[ScheduledTask]):\r\n self.task_queue = tuple([task for task in scheduled_tasks if task.is_scheduled_to_run(dt)])\r\n logging.info(f\"Task queue built, {len(self.task_queue)} tasks scheduled\")", "def relevant_docs_from_posting(self, query):\n relevant_docs = {}\n # postingLists = [self.FindPostingByTerm(term) for term in query] #list of posting file -->[idx,tweet id,tfi]\n for term in query:\n post = self.FindPostingByTerm_Binary(term)\n for p in post:\n tweet_id = p[1]\n if tweet_id not in relevant_docs.keys():\n relevant_docs[tweet_id] = {}\n relevant_docs[tweet_id][term] = p[2] * self.inverted_index[term][1] # wiq\n return relevant_docs", "def __init__(self, ixs):\n ixs = sorted(list(set(ixs)))\n self.ixs = ixs\n self.all_ids = fc.load_keys('../data/keys.json')\n self.ids = set(itemgetter(*ixs)(self.all_ids))\n corpus_files = glob('../data/documents/*')\n corpus_files.sort(key=natural_keys)\n self.corpus_files = corpus_files\n\n self.dictionary = Dictionary.load('../data/corpus.dict')\n self.token2id = self.dictionary.token2id\n logging.info(\"loading docs to RAM\")\n self.docs_ram_dict = self.push_docs_to_ram()\n\n self.wv = Word2Vec.load('../data/w2v_200_5_w8').wv\n\n logging.info(\"loading qdr model\")\n self.qdr = qdr.QueryDocumentRelevance.load_from_file('../data/qdr_model.gz')\n\n logging.info(\"loading mpk data\")\n with open('../data/all_mpk.pkl', 'rb') as f:\n self.all_mpk = pickle.load(f)\n self.mpk = ft.MPK()\n logging.info('data is loaded')", "def generate_batch_doc2VecC_tail(doc_ids, word_ids, doc_len, batch_size, window_size, sample_size):\n data_index = 0\n assert batch_size % window_size == 0\n span = window_size + 1\n buffer = collections.deque(maxlen=span)\n buffer_doc = collections.deque(maxlen=span)\n batches = np.ndarray(shape=(batch_size, window_size + 1), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n batch_doc = np.ndarray(shape=(batch_size, sample_size), dtype=np.int32)\n mask = [1] * span\n mask[-1] = 0\n i = 0\n\n while data_index < len(word_ids):\n if len(set(buffer_doc)) == 1 and len(buffer_doc) == span:\n doc_id = buffer_doc[-1]\n batches[i, :] = list(compress(buffer, mask)) + [doc_id]\n labels[i, 0] = buffer[-1]\n batch_doc[i, :] = random.sample(word_ids[doc_len[doc_id]:doc_len[doc_id + 1]],\n sample_size)\n i += 1\n buffer.append(word_ids[data_index])\n buffer_doc.append(doc_ids[data_index])\n data_index = (data_index + 1) % len(word_ids)\n if i == batch_size:\n yield batches, labels, batch_doc", "def load_objects(self, queue):\n pass", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def buildCorpus(self, filename, stopwords_file=None):\n with open(filename, 'r') as infile:\n # use pattern.subs\n # doclines = [line.rstrip().lower().split(' ') for line in infile]\n doclines = [self.help_clean(line) for line in infile]\n n_docs = len(doclines)\n self.vocab = list({v for doc in doclines for v in doc})\n if stopwords_file:\n with open(stopwords_file, 'r') as stopfile:\n stops = stopfile.read().split()\n self.vocab = [x for x in self.vocab if x not in stops]\n self.vocab.sort()\n self.documents = []\n for i in range(n_docs):\n self.documents.append({})\n for j in range(len(doclines[i])):\n if doclines[i][j] in self.vocab:\n self.documents[i][j] = self.vocab.index(doclines[i][j])", "async def __call__(self, *args, **kwargs):\n if Builder.queue:\n try:\n successfully_built = await super().__call__(*args, **kwargs)\n if successfully_built:\n del Builder.queue[0]\n except Exception as e:\n msg = str(e).lower()\n logger.error(msg)\n finally:\n await self.__call__()", "def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q", "def __init__(self):\n self.inverted_index = OrderedDict({})\n self.i=0\n self.unique_doc_ids=set()", "def do_build(self):\n self.build_queue = sorted(self.build_queue, key=lambda q: q['priority'])\n for item in self.build_queue:\n for i in range(0, item['count']['raw']):\n item['town'].owned.append(item['unit'])", "def requeue_changes(cls, queue):\n for c in sorted(cls.get_changes(), key=lambda c: 1 if fnmatch.fnmatch(c, \"*mini-buildd-build*\") else 0):\n LOG.info(\"Incoming: Re-queuing: {c}\".format(c=c))\n queue.put(c)", "def _clean_data(self, docs: []):\n print('Cleaning data...')\n preprocessed_data = []\n for doc in docs:\n if len(doc) <= constants.MIN_DOC_LENGTH:\n continue\n\n temp_doc = self._remove_urls(doc)\n temp_doc = self._remove_special_chars(temp_doc)\n temp_doc = self._transform_to_lowercase(temp_doc)\n temp_doc = self._remove_stopwords(temp_doc)\n\n preprocessed_data.append(temp_doc)\n\n return preprocessed_data", "def createPostingList():\n try :\n ##### peut etre à mettre dans une fonction\n file_reader_last_read_list = initialize_file_readers()\n for idx, file_reader_and_last_read in enumerate(file_reader_last_read_list):\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_and_last_read)\n current_word = min_top_word(file_reader_last_read_list=file_reader_last_read_list)\n final_file = open(POSTING_LIST_PATH, \"w\")\n ######\n\n doc_dict = get_doc_dict(DOC_LIST_PATH)\n nb_doc = len(doc_dict)\n\n ### autre function\n i = 0 \n while current_word != \"|||\": \n current_PL = current_word_PL(current_word=current_word, file_reader_last_read_list=file_reader_last_read_list,\\\n doc_dict=doc_dict, nb_doc=nb_doc ) \n curent_string = \"\"\n for key, value in current_PL.items():\n curent_string = \" \" + str(key) + \" \" + str(value) + curent_string\n curent_string = current_word + curent_string\n final_file.write(curent_string + \"\\n\")\n current_word = min_top_word(file_reader_last_read_list=file_reader_last_read_list)\n #if i %1000 == 0:\n #print(i/1000)\n i +=1\n ####\n \n final_file.close()\n close_file_readers(file_reader_last_read_list=file_reader_last_read_list)\n \n except Exception as ex:\n print(ex)\n final_file.close()\n close_file_readers(file_reader_last_read_list=file_reader_last_read_list)", "def _action_ondocuments(self, ids, action, status):\n docIDs = []\n# documents=[]\n documentType = self.env['plm.document']\n check=self._context.get('no_move_documents', False)\n if not check:\n for oldObject in self.browse(ids):\n for document in oldObject.linkeddocuments:\n if (document.id not in docIDs):\n if documentType.ischecked_in(document.id):\n docIDs.append(document.id)\n idMoves=move_workflow(documentType, docIDs, action, status)\n documentType.logging_workflow(idMoves, action, status)\n return docIDs", "def queueOff() -> None:\n\t\tLogging.enableQueue = False", "def preprocess_docs(self, docs_array):\n # Remove stopwords\n new_docs_list = []\n for doc in docs_array:\n temp = doc.lower()\n if \"\\\\\" in temp:\n temp = temp.replace(\"\\\\\", \"\")\n if \"&lt;FONT face\" in temp:\n temp = re.sub(r\"&lt;FONT face.*$\", \"\", doc)\n if \"a href=\" in temp:\n temp = re.sub(r\" ;a href.*/a&gt;\", \"\", doc)\n if \"A HREF\" in temp:\n temp = re.sub(r\" ;A HREF.*/A&gt;\", \"\", doc)\n if \"&lt\" in temp:\n temp = temp.replace(\"&lt\", \"\")\n if \"i&gt\" in temp:\n temp = temp.replace(\"i&gt\", \"\")\n if \" \" in temp:\n temp = temp.replace(\" \", \" \")\n if \"#151\" in temp:\n temp = temp.replace(\"#151\", \"\")\n if \"#36\" in temp:\n temp = temp.replace(\"#36\", \"\")\n if \"#39\" in temp:\n temp = temp.replace(\"#39\", \"\")\n new_docs_list.append(temp)\n return np.array(new_docs_list)", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def generate_wordclouds_for_document_topics(doc_topic_distrib, doc_labels, top_n, topic_labels='topic_{i1}',\n which_documents=None, return_images=True, **wordcloud_kwargs):\n return generate_wordclouds_from_distribution(doc_topic_distrib, row_labels=doc_labels, val_labels=topic_labels,\n top_n=top_n, which_rows=which_documents, return_images=return_images,\n **wordcloud_kwargs)", "def preprocess(self, df, maxlen = 169):\n \n vocabs = self.tk.word_index.keys()\n \n df1 = self.treat_na(df)\n df2 = self.remove_punc_sw(df1)\n df3 = self.remove_numbers(df2)\n df4 = self.lemma_pos(df3)\n df5 = self.bigram(df4)\n df6 = self.combine_bigrams(df5)\n \n new_docs = []\n \n for word_list in df6:\n \n if len(word_list) == 2 and word_list[0].lower() == 'noinfo' and word_list[1].lower() == 'noinfo':\n new_docs.append(list(np.zeros(maxlen)))\n \n else:\n new_word_list = []\n for word in word_list:\n if word not in vocabs:\n word = 'UNKNOWN_TOKEN'\n new_word_list.append(word)\n \n sequence = \" \".join(new_word_list)\n vectors = self.tk.texts_to_sequences([sequence])\n padded_vectors = pad_sequences(vectors, maxlen=maxlen, padding='post', truncating='post')\n \n new_docs.append(list(padded_vectors[0]))\n \n return new_docs", "def get_Pre_Succ(I):\n #Docs = I.docs\n #Docs_id = Docs.keys()\n Docs = I.getIndex().all_ids_\n Docs_id = [ int(float(k)) for k in Docs] \n N_pgs = len(Docs_id)\n Index_P = { id:idx for idx,id in enumerate(Docs_id)}\n Counter_Index_P = { idx:id for idx,id in enumerate(Docs_id)}\n \n print \"\\nBuilding Pi...\"\n Succ = { Index_P[p]:(I.getLinksForDoc(p),len(I.getLinksForDoc(p))) for p in Docs_id }\n P = {}\n for e in Succ:\n succ_e,l_e = Succ[e]\n for s in succ_e: \n if Index_P.get(s,\"Unknown_Doc_id\") not in P:\n P[Index_P.get(s,\"Unknown_Doc_id\")] = set()\n P[Index_P.get(s,\"Unknown_Doc_id\")].add(e) \n \n return P,Succ,Index_P,Counter_Index_P,N_pgs", "def generate_batch(batch_size, num_skips, skip_window):\n # global keyword gives this function access to global variable data_index\n global data_index\n assert batch_size % num_skips == 0 \n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1\n # Create a double-ended queue (both stack and queue) for word buffer\n # maxlen - keeping a fixed sliding window \n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n # Shift the skipgram window to the left by 1\n buffer.append(data[data_index])\n # Increase data_index for next shift\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n # target label at the center of the buffer \n target = skip_window \n # avoid the target word and later selected words\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n # batch is the same word for current num_skip\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels", "def rocchio_doc_list(query_vector, corpus, topic):\n #create dict of vectors for each docid that contains\n #at least one non-zero term in query_vector\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n doc_shortlist = dict()\n vector_len = len(query_vector)\n word_list = list(inv_index.keys())\n if corpus == cg.REUTERS:\n topic_docs = list(map(int, text_categorization.get_topic_dict()[topic]))\n else:\n topic_docs = list(range(0, 663))\n for index, weight in enumerate(query_vector):\n word = word_list[index]\n for doc_id in set(inv_index[word]).intersection(set(topic_docs)):\n if doc_id in doc_shortlist:\n #doc already added, just update weight entry for this word\n doc_shortlist[doc_id][index] = inv_index[word][doc_id]['weight']\n else:\n #doc not added yet add doc_id to shortlist,\n #initialize list to 0s for all words in query\n #update weight entry for current word\n entry = np.zeros(vector_len)\n entry[index] = inv_index[word][doc_id]['weight']\n doc_shortlist[doc_id] = entry\n\n return doc_shortlist", "def AutoQuakePycker_run():\n import glob\n from itertools import product\n import multiprocessing\n import logging\n import more_itertools as mit\n from munch import munchify\n import os\n import yaml\n from obspy import read_events, Catalog\n\n logger = multiprocessing.log_to_stderr(logging.DEBUG)\n # Read in config file\n with open(\"config.yaml\", \"r\") as ymlfile:\n cfg = munchify(yaml.safe_load(ymlfile))\n initial_cat = read_events(cfg.input.lassie_cat_file)\n\n # Read in station locations from file\n sta_list = [[l.split()[1], float(l.split()[3]), float(l.split()[4])] for l\n in open(\"NLLOC_run/run.in\", \"r\") if l.split()[0] == \"GTSRCE\"]\n sta_locs = {sta[0]: {\"lat\": sta[1], \"lon\": sta[2]} for sta in sta_list}\n if cfg.output.FORCE_RECALC is True:\n filelist = glob.glob(os.path.join(\"refined_events\", \"*.xml\"))\n for f in filelist:\n os.remove(f)\n if cfg.run.nproc == \"auto\":\n nproc = multiprocessing.cpu_count()\n else:\n nproc = cfg.run.nproc\n # Get events for which data currently is available\n cat_filter = Catalog()\n for n, event in enumerate(initial_cat):\n e_id = event.event_descriptions[0].text\n if e_id in os.listdir(\"{:}/\".format(cfg.input.DIR_TO_EVENTDIRS)):\n if (cfg.output.FORCE_RECALC is False and\n os.path.exists(\"refined_events/{:}.xml\".format(e_id))):\n print(\"Already have this evening ... skipping\")\n else:\n cat_filter.append(event)\n # Split catalogue across multiple processes and process in parallel\n cat_split = [i for i in mit.divide(nproc, cat_filter)]\n # process_events(cat_split[7], 7)\n pool = multiprocessing.Pool(processes=nproc)\n print(\"hello\")\n a = pool.starmap(process_events, product(cat_split, range(nproc), cfg,\n sta_locs))\n\n logger.debug(a)", "def build_DB(self, doc_files):\n\t\tcompteur=0\n\t\tdoc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\twhile os.path.exists(doc_name):\n\t\t doc=Doc(doc_name)\n\t\t self.DB.add_doc(doc)\n\t\t compteur+=1\n\t\t doc_name=doc_files+'doc_'+str(compteur)+'.txt'\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\t#print self.DB.id2nbword\n\t\tself.dump_DB()", "def remove_from_queue(self, confid):\n\n queued_ids = self.c.select(queued=1, gaid=confid)\n ids = [q.id for q in queued_ids]\n self.c.delete(ids)", "def prefiltered_docs(self):\n relevant_docs = set()\n for term in self.query_terms:\n try:\n for entry in self.inv_idx.idx[term]:\n relevant_docs.add(entry.doc_id)\n except KeyError:\n pass\n return relevant_docs", "def search(self, docs: DocumentArray, **kwargs):\n cursor = self.connection.cursor()\n for doc in docs:\n # retrieve metadata\n cursor.execute(f'SELECT DOC FROM {self.table} WHERE ID = %s;', (doc.id,))\n result = cursor.fetchone()\n data = bytes(result[0])\n retrieved_doc = Document(data)\n retrieved_doc.pop('embedding')\n doc.MergeFrom(retrieved_doc)", "def labelize(docs, label_type='doc', class_labels=[], offset=0):\n # import gensim\n assert TDoc.isListOfTokens(docs, n_test=10), \"Ill-formated input docs: %s\" % docs\n\n TaggedDocument = gensim.models.doc2vec.TaggedDocument\n labeledDocs = []\n\n # testing\n labelx = []\n if len(class_labels) > 0: \n assert len(docs) == len(class_labels)\n # docLabels = [] # test uniqueness only\n\n counter = {l: 0 for l in np.unique(class_labels)}\n for i, doc in enumerate(docs): \n dID = counter[class_labels[i]]\n dID = dID + offset\n label = '%s_%s' % (class_labels[i], dID); labelx.append(label)\n labeledDocs.append(TaggedDocument(doc, [label, ]))\n \n # update document ID of the same class label\n counter[class_labels[i]] += 1\n else: \n for i, doc in enumerate(docs):\n dID = i + offset\n label = '%s_%s' % (label_type, dID); labelx.append(label)\n labeledDocs.append(TaggedDocument(doc, [label, ]))\n\n nuniq, ntotal = len(np.unique(labelx)), len(labelx)\n # print('labelize> n_uniq: %d =?= n_total: %d' % (nuniq, ntotal))\n assert len(np.unique(labelx)) == len(labelx), \"labels are not unique %d vs %d\" % (nuniq, ntotal)\n return labeledDocs", "def train_lda_topic_model_with_mallet(texts, path_mallet,\n terms_to_remove=[], num_topics=50,\n no_below=10, no_above=0.9,\n scoring=False, start=2, step=3):\n preprocessed_corpus = []\n print ('training of gensim corpus began')\n for i, text in enumerate(texts):\n if i == 0:\n # todo filter here\n text = text.split()\n\n # Additional filtering steps #\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n\n dct = initialize_gensim_dictionary([text])\n else:\n text = text.split()\n # Additional filtering steps\n\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n add_documents_to_gensim_dictionary(dct, [text])\n # todo:this is to be integrated to the building process\n\n if len(terms_to_remove) > 0:\n for term in terms_to_remove:\n dct.filter_tokens(bad_ids=[dct.token2id[term]])\n\n dct.filter_extremes(no_below=no_below, no_above=no_above)\n\n gensim_corpus = [dct.doc2bow(bag_of_word.split()) for bag_of_word in texts]\n print ('gensim corpus done')\n if scoring:\n\n coherence_values = []\n\n for n in range(start, num_topics, step):\n\n lda = LdaMallet(constants.PATH_TO_MALLET,\n gensim_corpus, id2word=dct,\n num_topics=n)\n coherencemodel = CoherenceModel(model=lda,\n texts=preprocessed_corpus,\n dictionary=dct, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return coherence_values\n\n else:\n lda = LdaMallet(constants.PATH_TO_MALLET, gensim_corpus,\n id2word=dct, num_topics=num_topics)\n # Visualize LDA results, poor results obtained.\n # from gensim.models.wrappers import ldamallet\n # lda_model = ldamallet.malletmodel2ldamodel(lda)\n # vis = pyLDAvis.gensim.prepare(lda_model, gensim_corpus, dct)\n # pyLDAvis.save_html(vis , 'test.html')\n return {'model': lda, 'corpus': gensim_corpus}", "def queue_on_project(self, queue_for_bfs, list_tasks_project):\n if not len(queue_for_bfs):\n return\n else:\n task = queue_for_bfs.popleft()\n for key_sub in task.subtasks:\n sub = self.get_task(key_sub)\n list_tasks_project.append(sub)\n queue_for_bfs.append(sub)\n self.save_task(sub)\n\n self.queue_on_project(queue_for_bfs, list_tasks_project)", "def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)", "def clearQueueAll():", "def get_sources(queue, args, gedcom_data, dbid_map, apid_image_map):\n sources = {}\n dbid_list = []\n source_list = []\n logging.info(\"Generating updated source records\")\n gedcom = StringIO(gedcom_data)\n line = gedcom.readline()\n while line:\n if \"0 @S\" not in line:\n if \" _APID \" in line:\n dbid = line.split(\",\")[1].split(\":\")[0]\n if dbid not in dbid_list:\n dbid_list.append(dbid)\n if \" SOUR \" in line:\n source = line.split(\" \")[2].strip()\n if source not in source_list:\n source_list.append(source)\n line = gedcom.readline()\n continue\n apid = \"\"\n source = []\n source_data = [line]\n source_id = line.split(\" \")[1]\n if source_id not in source_list:\n logging.error(\"Found unreferenced source record %s\", source_id)\n line = gedcom.readline()\n continue\n line = gedcom.readline().strip()\n while line[0] != \"0\":\n source_data.append(line)\n if \"_APID\" in line:\n apid = line.strip().split(\" \")[2]\n dbid = apid.split(\":\").pop(0).split(\",\").pop(1)\n if dbid not in dbid_list:\n logging.error(\n \"Found unreferenced DBID record %s in source record %s\",\n dbid,\n source_id,\n )\n line = gedcom.readline()\n continue\n line = gedcom.readline().strip()\n if apid == \"\":\n sources.update({source_id: source_data})\n continue\n original = []\n publisher = []\n description = []\n if dbid in dbid_map:\n if \"publisher\" in dbid_map[dbid] and dbid_map[dbid][\"publisher\"] != \"\":\n publisher = build_note(dbid_map[dbid][\"publisher\"], keyword=\"PUBL\")\n if \"original\" in dbid_map[dbid] and dbid_map[dbid][\"original\"] != \"\":\n original = build_note(\n \"Original Data: {0}\".format(dbid_map[dbid][\"original\"]),\n keyword=\"NOTE\",\n )\n if \"description\" in dbid_map[dbid]:\n if dbid_map[dbid][\"description\"] not in [\"\", \"Learn more...\"]:\n description = build_note(\n dbid_map[dbid][\"description\"], keyword=\"NOTE\"\n )\n else:\n logging.error(\"Found DBID record %s with no data\", dbid)\n in_title = False\n in_publisher = False\n short_title = apid = \"\"\n for entry in source_data:\n if \" _APID \" in entry:\n apid = entry\n continue\n if in_title:\n if \" CONC \" in entry or \" CONT \" in entry:\n source.append(entry)\n continue\n in_title = False\n if short_title != \"\":\n source.append(\"1 ABBR {0}\".format(short_title))\n if in_publisher:\n if \" CONC \" in entry or \" CONT \" in entry:\n source.append(entry)\n continue\n in_publisher = False\n if args.source_url:\n source.append(\n \"1 NOTE https://search.ancestry.com/search/db.aspx?dbid={0}\".format(\n dbid\n )\n )\n if \"NOTE\" in entry and len(entry) < 8:\n continue\n if \"CONC\" in entry and len(entry) < 8:\n continue\n if \" PUBL \" in entry:\n if publisher != []:\n for item in publisher:\n source.append(item)\n else:\n source.append(entry)\n in_publisher = True\n continue\n if \" TITL \" in entry:\n if len(entry[7:].strip()) <= 60:\n short_title = entry[7:].strip()\n in_title = True\n source.append(entry)\n if original != []:\n for item in original:\n source.append(item)\n if description != []:\n for item in description:\n source.append(item)\n search = apid.split(\":\").pop(0) + \"::\"\n for entry in apid_image_map:\n if search in entry:\n source.append(\"1 OBJE {0}\".format(apid_image_map[entry]))\n if args.keep_apid:\n source.append(\"1 _APID {0}\".format(apid))\n sources.update({source_id: source})\n logging.info(\"Updated source records generated\")\n queue.put(sources)", "def generate_inverted_index(self, doc_id, tokenized_document):\n self.unique_doc_ids.add(doc_id)\n for t in tokenized_document:\n self.add_to_index(t, doc_id)", "def generate_bar_example(\n num_topics=10, num_documents=500, num_words_per_doc=100, alpha=1, beta=1, seed=None\n):\n\n width = 5\n\n vocab_size = width * width\n rng = random.Random()\n if seed is not None:\n rng.seed(seed)\n\n zeros = [[0 for i in range(width)] for j in range(width)]\n topic_squares = [zeros for i in range(num_topics)]\n for i in range(width):\n for j in range(width):\n topic_squares[i][i][j] = 1.0 / width\n for i in range(width):\n for j in range(width):\n topic_squares[width + i][j][i] = 1.0 / width\n topics = []\n for k in range(num_topics):\n topics.append(list(_itertools.chain(*topic_squares[k])))\n\n def weighted_choice(probs):\n total = sum(probs)\n r = rng.uniform(0, total)\n upto = 0\n for i, w in enumerate(probs):\n if upto + w > r:\n return i\n upto += w\n assert False, \"Shouldn't get here\"\n\n documents = []\n thetas = []\n for d in range(num_documents):\n doc = [0 for i in range(width * width)]\n topic_dist = [rng.gammavariate(1, 1) for k in range(num_topics)]\n topic_dist = [z / sum(topic_dist) for z in topic_dist]\n for i in range(num_words_per_doc):\n k = weighted_choice(topic_dist)\n w = weighted_choice(topics[k])\n doc[w] += 1\n thetas.append(topic_dist)\n documents.append(doc)\n\n sparse_documents = []\n for d in documents:\n sd = {}\n for i in range(width):\n for j in range(width):\n k = str(i) + \",\" + str(j)\n sd[k] = d[i * width + j]\n sparse_documents.append(sd)\n bow_documents = turicreate.SArray(sparse_documents)\n return bow_documents", "def dequeue(self):", "def init_ready_queue(inbound_counts, nodes):\n ready = [n for n in nodes if inbound_counts[n] == 0]\n return deque(ready) # to get popleft()", "def __init__(self, cfg, data_dir, train_files):\n self.cfg = cfg\n self.imgs, self.ids, self.anns = None, None, None\n self.data_dir = data_dir\n self.product_labels = {}\n print('loading annotations into memory...')\n tic = time.time()\n self.datasets = []\n if type(train_files) != list:\n train_files = [train_files]\n for train_file in train_files:\n labels_file = os.path.dirname(train_file)\n labels_file = os.path.join(labels_file, 'labels.txt')\n with open(labels_file, 'r') as f:\n self.product_names = {}\n for line in f:\n label, prod_name = line.split()\n self.product_labels[prod_name] = int(label)\n with open(train_file, 'r') as f:\n dataset = {}\n train_file_dir = os.path.dirname(train_file)\n for line in f:\n img, ann_file = line.split()\n img = os.path.join(train_file_dir, 'images',\n os.path.basename(img))\n ann_file = os.path.join(train_file_dir, 'annotations',\n os.path.basename(ann_file))\n dataset[img] = ann_file\n self.datasets.append(dataset)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n self.create_index()", "def similar_docs(self, doc=None, docs=[], count=10):\n #import ipdb; ipdb.set_trace()\n if doc is not None:\n docs = [doc]\n docs = [text_utils.lemmatize_text(doc) for doc in docs]\n vec = self.vectorizer.transform(docs)\n tvec = self.transformer.transform(vec)\n sims, docids = self.knn.kneighbors(tvec, return_distance=True)\n #return [self.docs[docid] for docid in docids[0][:count]], [1-sim for sim in sims[0][:count]]\n results = []\n for idx in range(len(docids[0])):\n docid = docids[0][idx]\n results.append({\n \"id\": docid,\n \"text\": self.docs[docid],\n \"score\": 1-sims[0][idx], #distance to similarity\n })\n results = sorted(results, key=lambda x: -x[\"score\"])\n return results[:count]", "def run(self):\n name_desc = self.__class__.name_sphinx\n settings = self.state.document.settings\n env = settings.env if hasattr(settings, \"env\") else None\n docname = None if env is None else env.docname\n tag = self.options.get('tag', '').strip()\n n = self.__class__.node_class('')\n n[\"breftag\"] = tag\n n[\"brefsort\"] = self.options.get('sort', 'title').strip()\n n[\"brefsection\"] = self.options.get(\n 'section', True) in (True, \"True\", \"true\", 1, \"1\")\n n[\"brefcontents\"] = self.options.get(\n 'contents', False) in (True, \"True\", \"true\", 1, \"1\", \"\", None, \"None\")\n n['docname'] = docname\n if env is not None:\n targetid = 'index%slist-%s' % (name_desc,\n env.new_serialno('index%slist' % name_desc))\n targetnode = nodes.target('', '', ids=[targetid])\n return [targetnode, n]\n else:\n return [n]", "def disambiguate_all_not_disambiguated(\n celery_batch_size, total_records, indexing_queue_limit, disambiguation_queue_limit\n):\n with current_celery_app.connection_or_acquire() as conn:\n indexer_queue = conn.default_channel.queue_declare(\n queue=\"indexer_task\", passive=True\n )\n disambiguation_queue = conn.default_channel.queue_declare(\n queue=\"disambiguation\", passive=True\n )\n if (\n disambiguation_queue.message_count > disambiguation_queue_limit\n or indexer_queue.message_count > indexing_queue_limit\n ):\n click.echo(\"MQ queues are full, can't run disambiguation\")\n return\n not_disambiguated_records_search = _get_all_not_disambiguated_records_search()\n documents = not_disambiguated_records_search.scan()\n if total_records:\n documents = islice(documents, total_records)\n uuids = (document.meta.id for document in documents)\n _send_celery_group_disambiguation_task(uuids, celery_batch_size)" ]
[ "0.56112474", "0.5495247", "0.53238213", "0.52911776", "0.52044696", "0.5167606", "0.51129067", "0.5092232", "0.5007754", "0.5002671", "0.50004286", "0.49999186", "0.4982563", "0.4973685", "0.49599707", "0.4940291", "0.49395326", "0.4921682", "0.4908651", "0.49044877", "0.48902038", "0.48758358", "0.4832821", "0.48303953", "0.47716397", "0.47572058", "0.4745991", "0.4742138", "0.47371924", "0.47152492", "0.47132385", "0.47131625", "0.4688076", "0.46862122", "0.46833134", "0.46804723", "0.46698555", "0.46643546", "0.4663503", "0.46606654", "0.4659329", "0.465498", "0.4644416", "0.46425724", "0.46360552", "0.46324813", "0.4626831", "0.46264488", "0.46251085", "0.46239036", "0.46221966", "0.4621587", "0.461993", "0.46176565", "0.46024755", "0.45961905", "0.45927188", "0.45884228", "0.45766157", "0.45758662", "0.45754406", "0.45735583", "0.45731533", "0.4572288", "0.45721495", "0.4563361", "0.45544153", "0.4552927", "0.45491114", "0.45489508", "0.45430952", "0.45351616", "0.4529076", "0.45266864", "0.45246094", "0.45228133", "0.45221025", "0.45214298", "0.4519401", "0.4513785", "0.4513467", "0.45098794", "0.45098412", "0.45097324", "0.45097026", "0.45079345", "0.45058578", "0.45014307", "0.4501142", "0.44924486", "0.44911188", "0.44873267", "0.4484647", "0.44844404", "0.44818053", "0.44814134", "0.4475829", "0.44726935", "0.4467137", "0.4467082" ]
0.51931
5
Labeled tokens come back from the UI as JSON. This method pulls them from the json and dumps
def get_labels(): json_request = request.json # get the json from the server keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids) labels = [] for k in keys: # get the labels that the user input to the UI val = (json_request[k]['text'], json_request[k]['value']) labels.append(val) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def tokens():\n pass", "def act(self):\n if not self.label_candidates:\n self.label_candidates = True\n for text in self.observation.get('label_candidates', ()):\n if text:\n tokens = self.tokenize(text)\n self.add_to_dict([self.get_template(tokens)])\n\n return {'id': self.getID()}", "def read_json(self):\n utterances, labels = [], []\n for log in self.log_json:\n for turn in log['turns']:\n utterance = turn['output']['transcript']\n label = turn['output']['dialog-acts'][0]['act']\n utterances.append(utterance)\n labels.append(label)\n\n return utterances, labels", "def serialize_tokens(json_obj):\n\t# load into memory\n\tres = json.dumps(json_obj)\n\twith open(config.TOKENPATH, \"w+\") as f:\n\t\tf.write(res)\n\treturn json_obj[\"access_token\"], json_obj[\"refresh_token\"]", "def get_terms(self):\n return json.loads(self.terms)", "def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)", "def parse(self, tokenizer):\n pass", "def __str__(self):\n return self.token", "def get_payloads(self, text):\n return [json.dumps({\n 'inputs': text,\n 'parameters': {'candidate_labels': self.action_labels,\n 'hypothesis_template': 'The action that the user wants to perform is {}.'}\n }),\n json.dumps({\n 'inputs': text,\n 'parameters': {'candidate_labels': self.location_labels,\n 'hypothesis_template': 'The user wants to work with {}.'}\n })]", "def toString(self):\n return self.tokens.toString()", "def get_labels(self, labels_from_json):\n self.raw_labels = labels_from_json", "def build_expected_user_labels_response(self):\n labels = [\n {\n \"key\": \"key1\",\n \"value\": \"value1\"\n },\n {\n \"key\": \"key2\",\n \"value\": \"value2\"\n }\n ]\n return labels", "def token(self) -> str:", "def _parse_individual_tokens(self, tokens: List[str]) -> List:\r\n objs = []\r\n\r\n for token in tokens:\r\n obj = self._parse_token(token)\r\n objs.append(obj)\r\n\r\n return objs", "def tokenize(self):\n\n self.feats = {\n 'features': [], # Lists of the `InputFeatures` objects.\n 'segments': [], # Segments of the phrase. 0: Promoun, 1: A-term, 2: B-term \n 'df_ids': [], # DataFrame index.\n 'target_token_ids': [] # Indexes of the target term in the tokens lists.\n }\n unique_id = 0 # Unique ID of the dataset.\n for _, row in tqdm(self.df.iterrows()):\n segment_tokens = self.tokenize_single_row(row)\n for j, segment in enumerate(segment_tokens):\n if segment['target_token_index'] > 0:\n features = self.tokens_to_features(unique_id, segment['tokens'])\n unique_id += 1\n self.feats['features'].append(features)\n self.feats['segments'].append(j)\n self.feats['target_token_ids'].append(segment['target_token_index'] )\n self.feats['df_ids'].append(row.ID)", "def process_data(self, json_dict: dict):\n all_token_ids = []\n all_level_ids = []\n all_synset_ids = []\n all_lemma_ids = []\n all_is_highway = []\n all_targets = []\n\n def tokenize(lemma_):\n return self.tokenizer(\n lemma_,\n add_special_tokens=False,\n truncation=True,\n is_split_into_words=True,\n return_token_type_ids=False,\n ).input_ids\n\n def add_lemma(lemma_, abs_level_, synset_id_, is_highway_):\n lemma_token_ids = tokenize([lemma_])\n n_tokens_ = len(lemma_token_ids)\n token_ids.extend(lemma_token_ids)\n level_ids.extend([self.level_to_id[abs_level_]] * n_tokens_)\n synset_ids.extend([synset_id_] * n_tokens_)\n lemma_ids.extend([lemma_ids[-1] + 1] * n_tokens_)\n is_highway.extend([is_highway_] * n_tokens_)\n\n # Go through all JSON entries\n for synset in tqdm(json_dict.values()):\n token_ids = []\n level_ids = []\n synset_ids = [0]\n lemma_ids = [0]\n is_highway = []\n\n lemmas = [l.replace(\"_\", \" \") for l in synset[\"lemmas\"]]\n abs_level = (\"current\", \"current\")\n\n # Save all lemmas of the current node\n synset_token_ids = self.tokenizer.batch_encode_plus(lemmas,\n add_special_tokens=False,\n return_token_type_ids=False).input_ids\n all_targets.append(synset_token_ids)\n\n for level in (\"hypernyms\", \"hyponyms\"):\n for sub_synset in synset[level].values():\n if \"lemmas\" in sub_synset:\n lemmas = [l.replace(\"_\", \" \") for l in sub_synset[\"lemmas\"]]\n abs_level = (level, \"current\")\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n for sub_level in (\"hypernyms\", \"hyponyms\"):\n for sub_sub_lemmas in sub_synset[sub_level].values():\n lemmas = [l.replace(\"_\", \" \") for l in sub_sub_lemmas]\n abs_level = (level, sub_level)\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n # Append the global lists\n all_token_ids.append(token_ids)\n all_level_ids.append(level_ids)\n all_synset_ids.append(synset_ids[1:])\n all_lemma_ids.append(lemma_ids[1:])\n all_is_highway.append(is_highway)\n\n data = (\n all_token_ids,\n all_level_ids,\n all_synset_ids,\n all_lemma_ids,\n all_is_highway,\n all_targets\n )\n\n return data", "def json(self):\n return json.loads(self.text)", "def __init__(self):\n self.tokens = []", "def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })", "def _parse_json(model, f_name):\n # get the word index dictionary corresponding to the feature model type\n if model == \"baseline\":\n word_dict = _parse_word_dict(\"baseline_dict.txt\")\n elif model == \"hashing\":\n word_dict = _parse_word_dict(\"hashing_dict.txt\")\n elif model == \"cluster\":\n word_dict = _parse_word_dict(\"cluster_dict.txt\")\n else:\n error(\"Unknown model type %s\" % model)\n\n if os.path.isfile(f_name):\n if _svm:\n model += \"svm\"\n out = open(\"datasets/%s_%s.txt\" % (f_name[f_name.rfind(\"/\") + 1:].split(\".\")[0], model), \"w\")\n with open(f_name) as f:\n for line in f:\n obj = json.loads(line)\n txt = obj[\"text\"]\n rat = obj[\"stars\"] if \"stars\" in obj else 0\n out.write(\"%d \\t\" % rat)\n features = []\n for t in _extract(txt):\n if t in word_dict:\n while len(features) <= word_dict[t]:\n features.append(0)\n features[word_dict[t]] += 1\n for i, c in enumerate(features):\n if c == 0:\n continue\n if _svm:\n i += 1\n out.write(\"%d:%d \" % (i, c))\n out.write(\"\\n\")\n out.close()\n else:\n error(\"parse json - not a file: %s\" % f_name)", "def denormalize_token_data(self, data):\n if not data:\n return\n\n return {\"oauth_token\": data.get(\"token\"),\n \"oauth_token_secret\": data.get(\"extra\")}", "def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)", "def to_dict(self):\n return {\n 'token': self.token\n }", "def json_to_labels(data):\n labels = []\n for item in data:\n labels.append(Label(item['title'], item['color'], item['desc']))\n return labels", "def __get_token_data__(self):\n raise Exception(\"Implement me!\")", "def listTags(self, authenticationToken):\r\n pass", "def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)", "def tokens(self):\n return self.__tokens", "def tokens(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: obj.tokens(*args),\n self.begin, self.data)\n return data", "def parse_jsons(self):\n return super(VegaGraphBar, self).parse_jsons()", "def _formatter(self, result):\n seclabels = []\n if 'seclabels' in result and result['seclabels'] is not None:\n for sec in result['seclabels']:\n sec = re.search(r'([^=]+)=(.*$)', sec)\n seclabels.append({\n 'provider': sec.group(1),\n 'label': sec.group(2)\n })\n\n result['seclabels'] = seclabels\n return result", "def json(self):\n\n return json.loads(self.text)", "def get_text_input(path):\n with open(path, 'r', encoding='utf8') as f:\n sent_dict = json.load(f)\n sents = [sent_dict[i] for i in sent_dict]\n tokenized_sents = [[word[0] for word in sent] for sent in sents]\n return tokenized_sents", "def process_tweet(json_data):\n text = json_data.get('text')\n\n # Strip URLs.\n for url in json_data.get('entities').get('urls', []):\n text = text.replace(url.get('url', ''), 'http')\n\n # Tokenize text.\n tokens = twitter_tokenizer.tokenize(text)\n\n # Remove punctuation and stopwords.\n tokens = [x for x in tokens if x not in punctuation_set and x not in stopwords_set]\n\n # Stem the tokens.\n if toggles['stem_tokens']:\n tokens = [stemmer.stem(x) for x in tokens]\n\n result = {}\n result['stemmed'] = tokens\n result['user'] = json_data.get('user')\n\n return result", "def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)", "def format_output(self):\n brat = Brat.convert_to_brat(self._input_filepath, 'results/brat.txt')\n output_file = self.__output_filepath.open(encoding='utf-8')\n # Assign BRAT span to each token from output\n terms = []\n i = 0\n multiword = False\n for token_tagged in output_file.readlines():\n token_tagged = token_tagged.split()\n if not token_tagged:\n break\n token = token_tagged[0]\n tag = token_tagged[1]\n term = {\n 'token': token,\n 'tag': tag\n }\n if token == brat[i]['token']:\n term['start'] = brat[i]['start']\n term['end'] = brat[i]['end']\n terms.append(term)\n i += 1\n elif ' ' in brat[i]['token'] and token in brat[i]['token']:\n multiword = True\n term['start'] = str(int(brat[i]['start']) + brat[i]['token'].index(token))\n term['end'] = str(int(term['start']) + len(token))\n terms.append(term)\n elif any([char in token for char in ['(', ')', ':', '/']]):\n multiword = True\n term['start'] = brat[i]['start']\n while brat[i]['token'] in token:\n i += 1\n term['end'] = brat[i]['end'],\n terms.append(term)\n elif multiword:\n i += 1\n if token != brat[i]['token']:\n raise Exception('Tokens does not match: {0} {1}'.format(token, brat[i]['token']))\n term['start'] = brat[i]['start']\n term['end'] = brat[i]['end']\n terms.append(term)\n multiword = False\n i += 1\n else:\n raise Exception('Tokens does not match: {0} {1}'.format(token, brat[i]['token']))\n # Generate key phrases from previous terms\n multiword_tags = [\n 'I-Grp_Enfermedad',\n 'B-Estructura_Corporal',\n 'I-Estructura_Corporal',\n 'B-Calificador',\n 'I-Calificador'\n ]\n for term in terms:\n if term['tag'] == 'O':\n continue\n if self._key_phrases != [] and int(self._key_phrases[-1]['span'][-1][1]) == (int(term['start']) - 1) \\\n and term['tag'] in multiword_tags:\n self._key_phrases[-1]['span'].append((term['start'], term['end']))\n self._key_phrases[-1]['term'] += ' ' + term['token']\n else:\n key_phrase = {\n 'span': [(term['start'], term['end'])],\n 'label': 'Concept',\n 'term': term['token'],\n }\n self._key_phrases.append(key_phrase)\n # Format span\n for key_phrase in self._key_phrases:\n span = map(lambda tup: '{0} {1}'.format(tup[0], tup[1]), key_phrase['span'])\n key_phrase['span'] = ';'.join(span)", "def load_info_from_file(filepath):\n with open(filepath, 'r') as f:\n tokens = json.load(f)\n\n return tokens", "def _parse_token_list(token_list):\n\n def current_leaf_add(key_f, value, tree_f, guid_stack_f):\n # Helper function to add to the current leaf we're visiting.\n current = tree_f\n for x in guid_stack_f:\n current = current[x]\n\n # Try/except/else added by Brandon to avoid duplicate keys in\n # an object.\n try:\n # Simply try to access the field.\n current[key_f]\n except KeyError:\n # Field doesn't exist, simply add the value.\n current[key_f] = value\n else:\n # Trying to add to an existing key is no bueno.\n # TODO: Raise a different exception here.\n raise UserWarning('Multiple properties with the same name '\n 'encountered while parsing! Property: {},'\n 'Value: {}, Already parsed: {}'\n .format(key_f, value, tree_f))\n\n def list_to_string(list_in):\n # Helper function to turn a list of strings into one string with some\n # decent formatting.\n if len(list_in) == 0:\n return ''\n else:\n return reduce(lambda x, y: str(x) + ' ' + str(y), list_in[1:-1])\n\n # Tree variables.\n tree = {}\n guid = 0\n guid_stack = []\n\n # reverse the token list as pop() is way more efficient than pop(0)\n token_list = list(reversed(token_list))\n\n def get_full_token():\n nonlocal token_list\n # Pop, then keep going until we have a full token (i.e. 'object house',\n # not just 'object')\n ft = []\n while ft == [] or ft[-1] not in ['{', ';', '}', '\\n', 'shape']:\n ft.append(token_list.pop())\n\n return ft\n\n # Initialize our \"full_token\" variable to make the nested function\n # below work without arguments.\n full_token = []\n\n def close_out_item():\n \"\"\"Nested helper function to be used if the last element in the\n full_token == '}'\n \"\"\"\n nonlocal tree\n nonlocal guid_stack\n nonlocal guid\n\n if len(full_token) > 1:\n current_leaf_add(full_token[0], list_to_string(full_token),\n tree, guid_stack)\n guid_stack.pop()\n\n def add_item_definition():\n \"\"\"Nested helper function to be used if the last element in the\n full_token == '{'\n \"\"\"\n nonlocal guid\n nonlocal guid_stack\n nonlocal tree\n\n current_leaf_add(guid, {}, tree, guid_stack)\n guid_stack.append(guid)\n guid += 1\n\n # Wrapping this current_leaf_add is defensive coding so we don't\n # crash on malformed glm files.\n if len(full_token) > 1:\n # Do we have a clock/object or else an embedded configuration\n # object?\n if len(full_token) < 4:\n # Add the item definition.\n current_leaf_add(full_token[0], full_token[-2], tree,\n guid_stack)\n elif len(full_token) == 4:\n # We likely have an embedded/nested object.\n current_leaf_add('omfEmbeddedConfigObject',\n full_token[0] + ' ' +\n list_to_string(full_token), tree,\n guid_stack)\n else:\n # Something is wrong.\n raise UserWarning('Malformed GridLAB-D model. Token: {}'\n .format(' '.join(full_token)))\n\n # All done.\n\n # Loop over the tokens.\n while token_list:\n # Get full token.\n full_token = get_full_token()\n\n # Work with what we've collected.\n if (full_token == ['\\n']) or (full_token == [';']):\n # Nothing to do.\n continue\n elif full_token == ['}']:\n close_out_item()\n elif full_token[0] == '#set':\n if full_token[-1] == ';':\n tree[guid] = {'omftype': full_token[0],\n 'argument': list_to_string(full_token)}\n else:\n tree[guid] = {'#set': list_to_string(full_token)}\n guid += 1\n elif full_token[0] == '#include':\n if full_token[-1] == ';':\n tree[guid] = {'omftype': full_token[0],\n 'argument': list_to_string(full_token)}\n else:\n tree[guid] = {'#include': list_to_string(full_token)}\n guid += 1\n elif full_token[0] == 'shape':\n while full_token[-1] not in ['\\n']:\n full_token.append(token_list.pop())\n full_token[-2] = ''\n current_leaf_add(full_token[0], list_to_string(full_token[0:-1]),\n tree, guid_stack)\n guid += 1\n elif (len(guid_stack) == 1) and ('class' in tree[guid_stack[0]]) \\\n and (len(full_token) > 1):\n # Intentionally narrow case for handling GridLAB-D classes.\n # Note this ONLY works for simple classes with property\n # definitions (e.g. \"double consensus_iterations;\").\n # Note this WILL NOT WORK for complex class definitions\n # which have anything other than simple properties. This is\n # because the complex classes have nested functions for\n # syncing, post-sync, etc. Not handling that here.\n # ALSO NOTE: This WILL NOT WORK for classes with\n # enumerations and sets, as those have curly braces...\n # http://gridlab-d.shoutwiki.com/wiki/Runtime_Class_User_Guide\n\n # Since we're just handling the simplest of class properties\n # here, do some assertions for safety.\n assert len(full_token) == 3, ('Malformed class token! Only simple'\n 'classes are supported!')\n assert full_token[-1] == ';', ('Malformed class token! Only simple'\n 'classes are supported!')\n\n # Add the type to the 'variable_types' field and add the\n # rest to the 'variable_names' field. Note this matches up\n # with how \"sorted_write\" will handle classes.\n v_type = full_token[0]\n v_name = full_token[1]\n tree_entry = tree[guid_stack[0]]\n try:\n tree_entry['variable_types'].append(v_type)\n except KeyError:\n tree_entry['variable_types'] = [v_type]\n\n try:\n tree_entry['variable_names'].append(v_name)\n except KeyError:\n tree_entry['variable_names'] = [v_name]\n\n elif full_token[-1] == '{':\n add_item_definition()\n elif full_token[-1] == '\\n' or full_token[-1] == ';':\n\n if guid_stack == [] and full_token != ['\\n'] and \\\n full_token != [';']:\n\n # Special case when we have zero-attribute items (like\n # #include, #set, module).\n tree[guid] = {'omftype': full_token[0],\n 'argument': list_to_string(full_token)}\n guid += 1\n elif len(full_token) > 1:\n # We process if it isn't the empty token (';')\n current_leaf_add(full_token[0], list_to_string(full_token),\n tree, guid_stack)\n elif full_token[0] == 'schedule':\n # Special code for those ugly schedule objects:\n if full_token[0] == 'schedule':\n while full_token[-1] not in ['}']:\n full_token.append(token_list.pop())\n tree[guid] = {'object': 'schedule', 'name': full_token[1],\n 'cron': ' '.join(full_token[3:-2])}\n guid += 1\n\n # this section will catch old glm format and translate it. Not in the most\n # robust way but should work for now.\n # NOTE: In an ideal world, double-looping would be avoided by doing\n # the work below while looping through the token list. Oh well -\n # the point of borrowing someone else's work is to avoid doing it\n # yourself.\n _fix_old_syntax(tree)\n\n return tree", "def tokens(cls, instance):\n token = super(TumblrOAuth, cls).tokens(instance)\n if token and 'access_token' in token:\n token = dict(tok.split('=')\n for tok in token['access_token'].split('&'))\n return token", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def convert():\n if request.method == \"POST\":\n user_string = request.json['text']\n # flag stores whether previous translated word was hindi or not\n flag = request.json['flag']\n\n if user_string:\n # Call to translate function to process the string\n predictions = generate_predictions(transliterator_obj, user_string, eng_dict, hin_dict, classifier, flag)\n return json.dumps({\"lists\": predictions})\n\n else:\n # return empty list if user sends empty string\n return json.dumps({\"lists\": []})", "def get_labels(self):\n return [token.label for token in self.tokens]", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n \r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n\r\n previob = history[index - 1]\r\n \r\n\r\n return {\r\n 'word': word,\r\n 'pos': pos,\r\n 'lemma': stemmer.stem(word),\r\n\r\n 'next-word': nextword,\r\n 'next-pos': nextpos,\r\n 'next-lemma': stemmer.stem(nextword),\r\n\r\n 'next-next-lemma': stemmer.stem(nextnextword),\r\n\r\n 'prev-word': prevword,\r\n 'prev-pos': prevpos,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n\r\n 'prev-iob': previob,\r\n\r\n }", "def parse(token):\n\n pass", "def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }", "def _tokens(self):\n # get my renderer\n renderer = self.renderer\n # sign on\n yield \"\"\n yield renderer.commentLine(\"tokens\")\n # simple tokens\n yield from renderer.set(name=\"empty\")\n yield from renderer.set(name=\"comma\", value=\",\")\n yield from renderer.set(name=\"space\", value=\"$(empty) $(empty)\")\n\n # characters that don't render easily and make the makefile less readable\n yield from renderer.set(name=\"esc\", value='\"\\x1b\"')\n\n # all done\n return", "def predict_tokens(self, tokens):\n return", "def print_token(self):\n\n log.success(\"Your token : [{}]\".format(self.get_token()))", "def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type", "def get_Tokens(self):\n return self._output.get('Tokens', None)", "def tokens(self):\r\n return Tokens(self)", "def dump_token_info(w):\r\n out = []\r\n lexer.input(w)\r\n while True:\r\n tok = lexer.token()\r\n if not tok: break\r\n out.append(tok)\r\n return out", "def get_tokens(self) -> List[str]:\n return self.tokens", "def token_labels(self) -> dict[str, str]:\n # This label is used to identify the site in the `skupper link status` command\n # self.name ({skupper_network.identifier}-{ns.cluster.name}-{ns.name}) can be longer than 63 characters\n # so use cluster.name-namespaced.name instead and trim it to 63 characters\n # a namespace can't be in more than one skupper network, so it's safe to omit the skupper network identifier\n return {\"token-receiver\": f\"{self.cluster.name}-{self.namespace.name}\"[0:63]}", "def load_value_words():\n return json.load(open(value_words()))", "def get_tokens(self, document):\n raise NotImplementedError()", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def _parse_tokens(self, result: dict, token_ind: int, depth: int=0):\n while token_ind < len(self._tokens):\n cur_token = self._tokens[token_ind]\n if cur_token == ',': # redundant commas that we simply ignore everywhere except list \"[x, y, z...]\"\n token_ind += 1\n continue\n if cur_token == '}':\n return token_ind + 1\n next_token = self._tokens[token_ind + 1]\n if next_token == '{':\n result[cur_token] = dict()\n token_ind = self._parse_tokens(result[cur_token], token_ind + 2, depth + 1)\n elif next_token == ':':\n next_next_token = self._tokens[token_ind + 2]\n if next_next_token == '[':\n result[cur_token] = list()\n token_ind = self._parse_list(result[cur_token], token_ind + 3)\n else:\n if cur_token not in result:\n result[cur_token] = self._tokens[token_ind + 2]\n else:\n if not isinstance(result[cur_token], list):\n old_val = result[cur_token]\n result[cur_token] = [old_val]\n result[cur_token].append(self._tokens[token_ind + 2])\n token_ind += 3\n else:\n raise Error('Wrong character \"{}\" in position {}'.format(next_token, token_ind))\n if depth != 0:\n raise Error('Input/output braces mismatch.')\n return token_ind", "def test_get_tokens():\n pass", "def next_token(self, context, token):", "def getTokens(self):\n return self.__token", "def tag(self, tokens):\n (yyhat, _) = self.tag_with_features(tokens)\n return yyhat", "def token(self):\n return self[\"token\"]", "def format_nested(token, word, start, end, groundtruth, prediction, delimiter='\\t'):\n out = ''\n for st, sw, ss, se, sg, sp in zip(token, word, start, end, groundtruth, prediction):\n out += format(st, sw, ss, se, sg, sp, delimiter) + '\\n'\n return out", "def get_tokens(self):\r\n return self.token_set", "def __str__(self):\n return str(self.__token)", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def json_friendly(self):", "def collect_ners(self, ents):\r\n collected_ners = []\r\n for token in ents:\r\n if token.label_ in self.ners_label:\r\n collected_ners.append(token.text + '/' + token.label_)\r\n return collected_ners", "def token_values(self):\n return self._token_values", "def _BuildTokens(self):\n # Set of supported keywords for a given platform. Values should be in\n # undercase form, eg, icmp_type (not icmp-type)\n supported_tokens = {'action',\n 'comment',\n 'destination_address',\n 'destination_address_exclude',\n 'destination_port',\n 'expiration',\n 'icmp_type',\n 'stateless_reply',\n 'name', # obj attribute, not token\n 'option',\n 'protocol',\n 'platform',\n 'platform_exclude',\n 'source_address',\n 'source_address_exclude',\n 'source_port',\n 'translated', # obj attribute, not token\n 'verbatim',\n }\n\n # These keys must be also listed in supported_tokens.\n # Keys should be in undercase form, eg, icmp_type (not icmp-type). Values\n # should be in dash form, icmp-type (not icmp_type)\n supported_sub_tokens = {\n 'option': {\n 'established',\n 'first-fragment',\n 'is-fragment',\n 'initial',\n 'rst',\n 'sample',\n 'tcp-established',\n },\n 'action': {\n 'accept',\n 'deny',\n 'next',\n 'reject',\n 'reject-with-tcp-rst',\n },\n 'icmp_type': set(list(Term.ICMP_TYPE[4].keys())\n + list(Term.ICMP_TYPE[6].keys()))\n }\n return supported_tokens, supported_sub_tokens", "def _tokenize(source):\n lines = source.split(\"\\n\")\n print(\n \"{type:<10}{string:<25} {start:^12} {end:^12}\".format(\n type=\"Type\", string=\"String\", start=\"Start\", end=\"End\"\n )\n )\n print(\"-\" * 60)\n for line in lines:\n tokens = collect_tokens(line)\n for token in tokens:\n print(token)", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n\r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n previob = history[index - 1]\r\n contains_dash = '-' in word\r\n contains_dot = '.' in word\r\n allascii = all([True for c in word if c in string.ascii_lowercase])\r\n\r\n allcaps = word == word.capitalize()\r\n capitalized = word[0] in string.ascii_uppercase\r\n\r\n prevallcaps = prevword == prevword.capitalize()\r\n prevcapitalized = prevword[0] in string.ascii_uppercase\r\n\r\n nextallcaps = nextword == nextword.capitalize()\r\n nextcapitalized = nextword[0] in string.ascii_uppercase\r\n\r\n return {\r\n 'word': word,\r\n 'lemma': stemmer.stem(word),\r\n 'pos': pos,\r\n 'all-ascii': allascii,\r\n\r\n 'next-word': nextword,\r\n 'next-lemma': stemmer.stem(nextword),\r\n 'next-pos': nextpos,\r\n\r\n 'next-next-word': nextnextword,\r\n 'next-next-pos': nextnextpos,\r\n\r\n 'prev-word': prevword,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n 'prev-pos': prevpos,\r\n\r\n 'prev-prev-word': prevprevword,\r\n 'prev-prev-pos': prevprevpos,\r\n\r\n 'prev-iob': previob,\r\n\r\n 'contains-dash': contains_dash,\r\n 'contains-dot': contains_dot,\r\n\r\n 'all-caps': allcaps,\r\n 'capitalized': capitalized,\r\n\r\n 'prev-all-caps': prevallcaps,\r\n 'prev-capitalized': prevcapitalized,\r\n\r\n 'next-all-caps': nextallcaps,\r\n 'next-capitalized': nextcapitalized,\r\n }", "def create_tokens(dataframe):\n\n tokenize_dict = {}\n iterator = dataframe.to_dict('dict')['line']\n\n for key, val in iterator.items():\n tokenize_dict[key] = nltk.word_tokenize(val)\n\n for key, val in tokenize_dict.items():\n l = []\n for i in range(len(val)):\n if val[i] == '<':\n val[i] = ''.join(val[i:i+3])\n \n l = [e for e in val if e not in ('e1', 'e2', '/e1', '/e2', '>')]\n tokenize_dict[key] = ', '.join(str(s) for s in l)\n\n tokenize_dataframe = create_dataframe(tokenize_dict, ['token'])\n\n dataframe['tokens'] = tokenize_dataframe['token']\n\n return dataframe", "def parse(self):", "def parse_jsons(self):\n return super(VegaGraphScatter, self).parse_jsons()", "def tokens(self):\n return self._sentrep.tokens()", "def word_vecs(self, raw_label=False):\n utterances, labels = self.read_json()\n # print(utterances)\n # print(self.label_dict)\n utterances = [self.word2vec(u) for u in utterances]\n if raw_label:\n labels = labels\n else:\n labels = [self.label_dict[l] for l in labels]\n\n return utterances, labels", "def json(self):\n return self._parsejson(self.raw)", "def parse(self, response):", "def getToken(self):\n \n raise NotImplementedError", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def post(self):\n data = request.json\n return analyze_text(data)", "def _extract_json_objects(self, text, decoder=JSONDecoder()):\n\n processText = \"\".join(text.split())\n pos = 0\n while True:\n match_c = processText.find('{', pos)\n match_s = processText.find('[', pos)\n dif = abs(match_c-match_s)\n if dif == 1:\n match = text.find('[', pos)\n else:\n match = text.find('{', pos)\n if match == -1:\n break\n try:\n result, index = decoder.raw_decode(text[match:])\n yield result\n pos = match + index\n except ValueError:\n pos = match + 1", "def tokenize(self, file_name):\n main_body = self.cast.nodes[0].body[-1]\n token_string = self.visit(main_body)\n\n variable_map = self.dump_var_map()\n value_map = self.dump_val_map()\n\n out_file = open(file_name, \"w\")\n out_file.write(f\"{token_string}\\n\")\n\n for var in variable_map:\n out_file.write(f\"{var}\\n\")\n\n for val in value_map:\n out_file.write(f\"{val}\\n\")", "def data():\n result = {}\n for thread in DATA.threads:\n result[thread] = [formatNode(node) for node in DATA.threads[thread].tree]\n return json.dumps({\n 'checkpoints': DATA.checkpoints,\n 'threads': result\n })", "def parse_response(text_response):\n ls_word = []\n if ('textAnnotations' in text_response):\n for text in text_response['textAnnotations']:\n boxes = {}\n boxes['label'] = text['description']\n boxes['x1'] = text['boundingPoly']['vertices'][0].get('x',0)\n boxes['y1'] = text['boundingPoly']['vertices'][0].get('y',0)\n boxes['x2'] = text['boundingPoly']['vertices'][1].get('x',0)\n boxes['y2'] = text['boundingPoly']['vertices'][1].get('y',0)\n boxes['x3'] = text['boundingPoly']['vertices'][2].get('x',0)\n boxes['y3'] = text['boundingPoly']['vertices'][2].get('y',0)\n boxes['x4'] = text['boundingPoly']['vertices'][3].get('x',0)\n boxes['y4'] = text['boundingPoly']['vertices'][3].get('y',0)\n boxes['w'] = boxes['x3'] - boxes['x1']\n boxes['h'] = boxes['y3'] - boxes['y1']\n ls_word.append(boxes)\n return ls_word", "def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def get_token_data(self):\n raise NotImplementedError('Method \"get_token_data\" must be implemented in any derived class')", "def _build_label(self):\n counter = Counter()\n _, labels = self.read_json()\n counter.update(labels)\n dictionary = dict()\n for i, word in enumerate(counter.most_common()):\n dictionary[word[0]] = i\n return dictionary", "def __call__(self, token_received, **kwargs) -> str:\n print(token_received, flush=True, end=\"\")\n return token_received", "def parse_feature(self, feature_key, lines):\n ...", "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "def parse_kved(class_code):\n convert_dict_to_json(finding_classcode(\n reading_json(\"kved.json\"), class_code))" ]
[ "0.5711956", "0.55669457", "0.5513183", "0.54622537", "0.52287966", "0.5219712", "0.5186664", "0.51845807", "0.5176791", "0.5157778", "0.5122742", "0.51104176", "0.5076709", "0.5054146", "0.50533843", "0.5040823", "0.5034359", "0.502462", "0.50213176", "0.49873635", "0.4974976", "0.4970196", "0.49564272", "0.49485597", "0.49437186", "0.4939112", "0.49060458", "0.4903211", "0.49003676", "0.48996854", "0.489965", "0.4898303", "0.48937014", "0.48833942", "0.48797652", "0.4879724", "0.48720038", "0.48677227", "0.4848673", "0.48470616", "0.48408312", "0.48408312", "0.48408312", "0.48281404", "0.48234487", "0.48230216", "0.48174313", "0.48141542", "0.48118156", "0.48102775", "0.48076156", "0.48066565", "0.47882628", "0.47701883", "0.47694758", "0.4760229", "0.47539222", "0.4750607", "0.4748305", "0.47438118", "0.47416186", "0.47343066", "0.47338307", "0.4733488", "0.47222292", "0.47211453", "0.4719817", "0.47163564", "0.4713787", "0.47126102", "0.47081158", "0.4705755", "0.4698596", "0.46964055", "0.4695509", "0.46877924", "0.46871334", "0.46870846", "0.4685951", "0.46857467", "0.46848184", "0.46835402", "0.4675182", "0.46727744", "0.4671554", "0.46532723", "0.4651352", "0.4642973", "0.46336046", "0.46309087", "0.46197823", "0.4619448", "0.46154565", "0.46129763", "0.46117693", "0.46031627", "0.46000212", "0.45992875", "0.4592622", "0.4590303" ]
0.65568817
0
The UI is requesting parserator's tags. If they've been processed, send them to client side Else, send a bunch of blank tags
def tags(docid): page = request.args.get('page') filename = SETTINGS.LABELED_LOCATION + '/' + docid page_text = get_document_page(docid, page) if not os.path.isfile(filename): return spanify(page_text, page) else: with open(filename) as tokens_file: labels = json.load(tokens_file) return spanify(page_text, page, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_tag_list(self, taglist):\r\n self.do_before()\r\n for tag in taglist:\r\n self.feed(tag)\r\n self.do_after()", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def tag_complete(request):\r\n params = request.GET\r\n\r\n if request.user:\r\n username = request.user.username\r\n else:\r\n username = None\r\n\r\n if 'current' in params and params['current'] != \"\":\r\n current_tags = params['current'].split()\r\n else:\r\n current_tags = None\r\n\r\n if 'tag' in params and params['tag']:\r\n tag = params['tag']\r\n\r\n tags = TagMgr.complete(tag,\r\n current=current_tags,\r\n username=username)\r\n else:\r\n tags = []\r\n\r\n # reset this for the payload join operation\r\n if current_tags is None:\r\n current_tags = []\r\n\r\n return _api_response(request, {\r\n 'current': \",\".join(current_tags),\r\n 'tags': [t.name for t in tags]\r\n })", "def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result.append('<' + tag)\n for k, v in attrs:\n if string.lower(k[0:2]) != 'on' and", "def _get_good_request_wo_tags(self):\r\n prms = {\r\n 'url': u'http://bmark.us',\r\n 'description': u'This is my bmark desc',\r\n 'extended': u'And some extended notes about it in full form',\r\n 'tags': u'',\r\n }\r\n\r\n req_params = urllib.urlencode(prms)\r\n res = self.app.post(\r\n '/api/v1/admin/bmark?api_key={0}'.format(self.api_key),\r\n params=req_params,\r\n )\r\n return res", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def test_no_tags(self):\n self.request.log(\"Hello World\")\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 0", "def handle_starttag(self, tag, attrs):\n \n if self.intermediate_tags > 0:\n self.intermediate_tags += 1\n return\n \n self.filtering = self.typogrify._should_be_filtered(tag, attrs)\n self.intermediate_tags = 1 if not self.filtering else 0", "def on_parse(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def tag(self, sent):\n # WORK HERE!!", "def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result = self.result + '<' + tag\n for k, v in attrs:\n if (string.lower(k[0:2]) != 'on' and\n string.lower(v[0:10]) != 'javascript'):\n self.result = '%s %s=\"%s\"' % (self.result, k, v)\n endTag = '</%s>' % tag\n self.endTagList.insert(0, endTag)\n self.result = self.result + '>'", "def process_tags(self):\n nolf = self.unixtext.replace(\"\\n\", \" \")\n res = EMERGENCY_RE.findall(nolf)\n if res:\n # TODO: this can be based off the IBW Tags too\n self.is_emergency = True\n match = WINDHAIL.match(nolf)\n if match:\n gdict = match.groupdict()\n self.windtag = gdict['wind']\n self.windtagunits = gdict['windunits']\n self.haildirtag = gdict['haildir']\n self.winddirtag = gdict['winddir']\n self.hailtag = gdict['hail']\n\n match = WINDTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.winddirtag = gdict['winddir']\n self.windtag = gdict['wind']\n self.windtagunits = gdict['windunits']\n\n match = HAILTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.haildirtag = gdict['haildir']\n self.hailtag = gdict['hail']\n\n match = TORNADOTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.tornadotag = gdict['tornado']\n\n match = TORNADODAMAGETAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.tornadodamagetag = gdict['damage']\n\n match = WATERSPOUTTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.waterspouttag = gdict['waterspout']\n\n for token in FLOOD_TAGS.findall(self.unixtext):\n self.flood_tags[token[0]] = token[1]", "def process_message(self, tag, value):\n return False", "def handle_request(self, request: dict) -> dict:\n \"\"\"Creating a sorted tuple of tags to be searched\"\"\"\n search_tags = tuple(sorted([tag['name'] for tag in request['selected_tags']]))\n\n # This is checking if the search tags matches exactly\n complete_match = self.complete_match.get(search_tags)\n if complete_match:\n return {\n \"snippet\": str(complete_match[0]),\n \"next_tags\": complete_match[1],\n \"status\": {\"code\": 0, \"msg\": \"Valid tags with snippet\"},\n \"selected_tags\": request['selected_tags']\n }\n\n partial_match = self.partial_match.get(search_tags)\n # This is checking if the search tags matches partially\n if partial_match:\n if isinstance(partial_match[-1], list):\n next_tags = partial_match[-1]\n partial_match.pop(-1)\n\n if partial_match and len(partial_match) == 1:\n return {\n \"snippet\": str(partial_match[0]),\n \"next_tags\": [],\n \"status\": {\"code\": 0, \"msg\": \"Valid tags with snippet\"},\n \"selected_tags\": request['selected_tags']\n }\n elif partial_match and len(partial_match) > 1:\n return {\n \"snippet\": None,\n \"next_tags\": next_tags,\n \"status\": {\"code\": 1, \"msg\": \"Valid tags but no snippet\"},\n \"selected_tags\": request['selected_tags']\n }\n # None of the tag is matching\n return {\n \"snippet\": None,\n \"next_tags\": [],\n \"status\": {\"code\": 2, \"msg\": \"Invalid tags\"},\n \"selected_tags\": request['selected_tags']\n }", "def handle_starttag(self, tag, attrs):\n try:\n if tag == \"article\": # Set flag for news feed parsing to true\n for name, value in attrs:\n if name == 'class' and 'grid_12 alpha enrichi' in value:\n self.article_section = True\n elif tag == \"a\" and self.article_section == True: # get a link from the news feed\n for name, value in attrs:\n if name == \"href\":\n if value not in self.links and \"/journaliste/\" not in value:\n self.links.append(value)\n elif tag == \"div\" and not self.article_body: # Set flag from article body to true\n for name, value in attrs:\n if name == 'id' and value == 'articleBody':\n self.article_body = True\n elif tag == 'div' and self.article_body: # Increment number of open div in the main div of article (used to determine when the main article div is closed)\n self.div_open_in_article_body += 1\n elif tag == 'p' and self.article_body: # Suspend aqcuisition for \"lire aussi\" section\n for name, value in attrs:\n if name == 'class' and value == 'lire':\n self.suspend_acquisition = True\n elif tag == 'section' and self.article_body:\n self.suspend_acquisition == True\n elif tag == 'iframe' and self.article_body:\n self.suspend_acquisition == True\n elif tag == 'body':\n for name, value in attrs:\n if name == \"class\":\n self.category = value\n except:\n pass", "def _canProcessTags(self, grammar, pos_tags):\n badTags = []\n for tag in pos_tags:\n if tag not in grammar.tags:\n badTags.append(tag)\n logger.debug(\"Grammar can't handle tag:\" + tag)\n if badTags:\n return False\n else:\n return True", "def tags():", "def cleanup(self):\n self.result.extend(self.endTagList)", "def parse(self, response):", "def process(self, request):\n pass", "def get_tags(request):\n try:\n tags = []\n for tag in Tag.objects.all():\n tags.append({\"title\": tag.title, \"id\": tag.pk})\n\n return format_ajax_response(True, \"Knowledgebase tags retrieved successfully.\", {\"tags\": tags})\n except Exception as ex:\n logger.error(\"Failed to get_tags: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the knowledgebase tags.\")", "def extract_and_tag_test():\n test_untagged_path = os.getcwd() + \"/data/test/test_untagged/\"\n test_untagged_directory = os.fsencode(test_untagged_path)\n\n print(\"Tagging text. Please wait...\")\n for file in os.listdir(test_untagged_directory):\n filename = os.fsdecode(file)\n try:\n if filename.endswith(\".txt\"):\n text = entity_process.read_data(test_untagged_path, file)\n text = text.lower()\n header,body = entity_process.split_text(text)\n header_array = header.splitlines()\n\n\n start_time, end_time = entity_process.extract_time(header)\n location = entity_process.extract_location(header_array, body)\n speaker = entity_process.extract_speaker(header_array, body)\n\n entity_tagger.tag_all(filename, text, start_time, end_time, location, speaker)\n except Exception as e:\n raise e\n return \"No files found here!\"\n print(\"Tagging complete! Text saved to\" + os.getcwd() + \"/out\")", "def clean(self):\n # Calls handle_starttag, handle_endtag, and handle_data\n self.feed()\n\n # Clean up any parent tags left open\n if self.current_parent_element['tag'] != '':\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n\n # Remove empty <p> added after lists\n self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\\g<1>', self.cleaned_html)\n\n self._remove_pre_formatting()\n\n return self.cleaned_html", "def handle_data(self, data):\n if len(self.current_tags) > 0:\n self.current_tags[-1].add_data(data)", "def parse_response(self):\n pass", "def unknown_starttag(self, tag, attrs):\n if VALID_TAGS.has_key(tag):\n\n self.result = self.result + '<' + tag\n\n for k, v in attrs:\n\n if k.lower().startswith( 'on' ):\n raise IllegalHTML, 'Javascipt event \"%s\" not allowed.' % k\n\n if v.lower().startswith( 'javascript:' ):\n raise IllegalHTML, 'Javascipt URI \"%s\" not allowed.' % v\n\n self.result = '%s %s=\"%s\"' % (self.result, k, v)\n\n endTag = '</%s>' % tag\n if VALID_TAGS.get(tag):\n self.result = self.result + '>'\n else:\n self.result = self.result + ' />'\n\n elif NASTY_TAGS.get( tag ):\n raise IllegalHTML, 'Dynamic tag \"%s\" not allowed.' % tag\n\n else:\n pass # omit tag", "def test_tags_tag_search_no_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags('')\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an empty string as the tag\" \\\n + \"returned an error response code on the page\" \\\n + \"%s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))", "def handle_endtag(self, tag):\n if tag in PARENT_ELEMENTS:\n self.current_parent_element['tag'] = ''\n self.current_parent_element['attrs'] = ''\n\n if tag == 'li':\n self.parsing_li = True\n if tag != 'br':\n self.cleaned_html += '</{}>'.format(tag)", "def handle_endtag(self, tag):\n\n if self.intermediate_tags > 0:\n self.intermediate_tags -= 1\n \n # Widont filter needs to be handled here\n if self.filtering:\n content = self.data_buffer[-self.filtered_data_length:]\n content = self.typogrify.widont(tag, content)\n self.data_buffer = self.data_buffer[:-self.filtered_data_length] + content", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def handle_starttag(self, tag, attrs):\n # Parse the tag attributes\n attrs_dict = dict(t for t in attrs)\n\n # If the tag is a predefined parent element\n if tag in PARENT_ELEMENTS:\n # If parser is parsing another parent element\n if self.current_parent_element['tag'] != '':\n # close the parent element\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n\n self.current_parent_element['tag'] = tag\n self.current_parent_element['attrs'] = {}\n\n self.cleaned_html += '<{}>'.format(tag)\n\n # If the tag is a list item\n elif tag == 'li':\n self.parsing_li = True\n\n # Parse the class name & subsequent type\n class_name = attrs_dict['class']\n list_type = class_name[10:]\n\n # Check if parsing a list\n if self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol':\n cur_list_type = self.current_parent_element['attrs']['class']\n # Parsing a different list\n if cur_list_type != list_type:\n # Close that list\n self._close_list()\n\n # Open new list\n self._open_list(list_type)\n # Not parsing a list\n else:\n # if parsing some other parent\n if self.current_parent_element['tag'] != '':\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n # Open new list\n self._open_list(list_type)\n\n self.cleaned_html += '<{}>'.format(tag)\n\n # If the tag is a line break\n elif tag == 'br':\n # If parsing a paragraph, close it\n if self.current_parent_element['tag'] == 'p':\n self.cleaned_html += '</p>'\n self.current_parent_element['tag'] = ''\n self.current_parent_element['attrs'] = {}\n # If parsing a list, close it\n elif self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol':\n self._close_list()\n # If parsing any other parent element, keep it\n elif self.current_parent_element['tag'] in PARENT_ELEMENTS:\n self.cleaned_html += '<br />'\n # If not in any parent element, create an empty paragraph\n else:\n self.cleaned_html += '<p></p>'\n\n # If the tag is something else, like a <b> or <i> tag\n else:\n # If not parsing any parent element\n if self.current_parent_element['tag'] == '':\n self.cleaned_html += '<p>'\n self.current_parent_element['tag'] = 'p'\n self.cleaned_html += '<{}'.format(tag)\n\n for attr in sorted(attrs_dict.keys()):\n self.cleaned_html += ' {k}=\"{v}\"'.format(\n k=attr,\n v=attrs_dict[attr]\n )\n\n self.cleaned_html += '>'", "def proceed(self, source):\n\n for line in source:\n line = line.rstrip()\n if istag(line) and isinstance(maketag(line), Time): # here, handle Time tags\n self.time_annotate_date(maketag(line)) # annotate Date tag and queue Time tag\n if not default_options.mdjpp_time_propagate:\n continue\n # not a tag!\n if not istag(line) or isinstance(maketag(line), Time):\n if self.metastable_tag: # and len(line):\n # open metastable tag\n self.open_tag(self.metastable_tag)\n self.metastable_tag = None\n # print line\n if self.printable():\n self.printme_text(line)\n # tag!\n else:\n # make regular tag\n this_tag = maketag(line)\n # no metastable tag\n if not self.metastable_tag:\n # make new metastable tag\n self.metastable_tag = this_tag\n # does new tag close last tag?\n self.close_tag(this_tag)\n # metastable tag present\n else:\n # is metastable tag or this tag of Date type?\n if isinstance(self.metastable_tag, Date) or isinstance(this_tag, Date):\n # open metastable tag\n self.open_tag(self.metastable_tag)\n # recreate metastable tag with new tag\n self.metastable_tag = this_tag\n # does new tag close last tag?\n self.close_tag(this_tag)\n else:\n # add new tag to metastable tag\n self.metastable_tag.append(this_tag)", "def get_tags(self):\n return ''", "def get_tags(self):\n return ''", "def process_response(self, request, response):\n\n if settings.DEBUG:\n return response\n\n if 'text/html' in response['Content-Type'] and settings.COMPRESS_HTML:\n response.content = strip_spaces_between_tags(\n response.content.strip())\n response.content = RE_NEWLINE.sub(\" \", response.content)\n response.content = RE_MULTISPACE.sub(\" \", response.content)\n response.content = RE_SPACETAG1.sub(\">\", response.content)\n response.content = RE_SPACETAG2.sub(\"<\", response.content)\n return response", "def parse(self, response):\n return super().parse(response)", "def parse(self, response):\n return super().parse(response)", "def tagger():", "def processTags(request, media, form, update):\n if update:\n if 'tags' in request.POST:\n tag_names = form.cleaned_data['tags'].split(',')\n media.tag_set.clear()\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name.strip())\n media.tag_set.add(tag)\n media.save()\n else:\n if 'tags' in request.POST:\n tag_names = form.cleaned_data['tags'].split(',')\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name.strip())\n media.tag_set.add(tag)\n media.save()", "def _verify_tags(self):\n for tag in self.tags:\n if tag.lower() in VASP_TAG_LIST:\n continue\n else:\n print((\"Warning: unknown INCAR tag '\" + tag + \"' with value '\" + str(self.tags[tag]) + \"'\"))", "def handle_data(self, data):\n if data.strip():\n self._content_list.append((self._current_tag, data))\n self._html += f\"{{{'placeholder_'+str(self._index)}}}\"\n self._index += 1", "def test_format_bad_tags(self):\n tags = self.c._format_tags(None)\n self.assertEqual(0, len(tags))", "def process_messages(self):\n pass", "def handle_starttag(self, tag, attrs):\n if verbose(): print(\"TIParser.handle_starttag(self, %s, %s)\"\n % (tag, attrs))\n\n self.standard_tag_checks(tag, attrs)\n if tag not in self.nostack:\n self.stack.append(tag)", "def process_response(self, request, response):\n \n if not getattr(request, 'toolbar', False):\n return response\n if getattr(request, '_cms_toolbar_tag_used', False):\n return response\n if not response['Content-Type'].startswith(HTML_TYPES):\n return response\n response.content = _patch(response.content, request)\n return response", "def handle_endtag(self, tag):\n try:\n if tag == \"article\":\n self.article_section = False\n elif tag == \"div\" and self.article_body and self.div_open_in_article_body == 0:\n self.article_body = False\n elif tag == 'div' and self.article_body and self.div_open_in_article_body > 0:\n self.div_open_in_article_body -= 1\n elif tag == 'p' and self.suspend_acquisition == True:\n self.suspend_acquisition == False\n elif tag == 'section' and self.suspend_acquisition == True:\n self.suspend_acquisition == False\n elif tag == 'iframe' and self.suspend_acquisition == True:\n self.suspend_acquisition == False\n\n except:\n pass", "def func(self):\n if not self.args:\n return self.display_tags()\n super(CmdArxTag, self).func()", "def handle_starttag(self, tag, attrs):\n self._current_tag = tag\n if tag == \"a\":\n self._html += self._handle_anchor_tag(attrs)\n elif tag == \"img\":\n self._html += self._handle_img_tag(attrs)\n elif tag == \"iframe\":\n self._html += self._handle_iframe_tag(attrs)\n elif attrs:\n self._html += f\"<{str(tag)}\"\n for key, value in attrs:\n if not value:\n # When value is 'None', python leaves it empty but we sometimes need the value 'None'\n value = \"None\"\n self._html += f' {key}=\"{value}\"'\n self._html += self._close_tag(tag, False)\n else:\n self._html += self._close_tag(tag, True)", "def handle_starttag(self, tag, attrs):\n\n # Medium export files have a footer with junk that's not part of the original post.\n # Stop processing entirely if we hit the document footer.\n if self.seen_footer:\n return\n if tag == \"footer\":\n self.seen_footer = True\n\n # Keep track of where we are in the DOM by putting this tag on a stack\n self.tag_stack.append(tag)\n\n # Convert any html tag attributes to a dictionary just so they are easier to look up.\n attr_dict = self.attrs_to_dict(attrs)\n\n # In Mobiledoc format, the 'parent' elements of a doc can be one of a fixed set of tags:\n # - p\n # - h1, h2, h3, h4, h5, h6\n # - blockquote\n # - ul or ol\n # So when we hit one of these tags in a Medium html, we'll start accumulating child Mobiledoc elements of this element.\n if tag in [\"p\", \"h1\", \"h2\", \"h3\", \"h4\", \"blockquote\", \"ul\", \"ol\", \"div\"]:\n # State variables to accumulate child Mobiledoc elements of this tag.\n # We assume Medium html doc never have more than one of these parent elements at a time, so these\n # variables just keep track of the children of the single current parent element.\n # This won't work for arbitrary html files not produced by Medium.\n self.current_markers = []\n self.current_list_item_markers = []\n\n if tag == \"div\" and \"class\" in attr_dict and \"graf\" in attr_dict[\"class\"]:\n self.inside_link_summary_div = True\n else:\n # Otherwise, we have one of the many possible kinds of child elements. We need to convert each into\n # an equivalent Mobiledoc representation and add it to the current parent element.\n\n # <a href=''> HTML links turn into Mobiledoc 'markup' elements with href data\n if tag == \"a\":\n markup = [\n \"a\",\n [\n \"href\",\n attr_dict[\"href\"]\n ]\n ]\n self.markups.append(markup)\n\n # <img> turn into Mobiledoc 'card' elements with src data. They *could* be 'markup' elements but\n # cards are recommended in Ghost with the new editor.\n # We can also find the post's featured image and guess if the image was displayed as 'wide' on medium and\n # replicate that on Ghost.\n elif tag == \"img\":\n image_attributes = {\n \"src\": attr_dict[\"src\"]\n }\n\n # Even though medium doesn't include image display width in the export,\n # you can guess that the image was shown wide based on the size of the CDN image it links to.\n if \"/max/1000/\" in attr_dict[\"src\"]:\n image_attributes[\"cardWidth\"] = \"wide\"\n\n # Check if this was the medium post's featured image.\n # If it was, we definitely want to save that off as document metadata.\n if \"data-is-featured\" in attr_dict and attr_dict[\"data-is-featured\"] == \"true\":\n image_attributes[\"featured_image\"] = True\n\n card = [\n 'image',\n image_attributes\n ]\n self.cards.append(card)\n\n # 10 in Mobiledoc is the magic number for 'Card'\n section = [10, len(self.cards) - 1]\n self.sections.append(section)\n\n # <pre> turn into Mobiledoc code 'card' elements with code content data. They *could* be 'markup' elements but\n # cards are recommended in Ghost with the new editor.\n # We also have to deal with the issue that Medium makes each line of code a new <pre> tag. So if the last\n # element was a <pre>, just keep appending to the last card instead of creating a new one.\n elif tag == \"pre\":\n # If the last tag wasn't a <pre>, create a new code block\n if self.last_section_tag != \"pre\":\n card = [\n 'code',\n {\"code\": \"\"}\n ]\n self.cards.append(card)\n\n # 10 in Mobiledoc is the magic number for 'Card'\n section = [10, len(self.cards) - 1]\n self.sections.append(section)\n else:\n # If the last section was a <pre>, just keep appending.\n # We also need to add a line break between each appended <pre> to maintain formatting..\n self.cards[-1][1][\"code\"] += \"\\n\\n\"\n\n # Some Medium embeds become <iframe> tags in the export file.\n # This includes things like embedded subscription forms or some kinds of external content.\n # <iframe> tags turn into Mobiledoc card elements with an <iframe> tag that links to the same place as before.\n elif tag == \"iframe\":\n # Generate an <iframe> tag in text that replicates the original on in the Medium export\n attr_strings = []\n for k, v in attr_dict.items():\n attr_strings.append(f'{k}=\"{v}\"')\n attr_string = \" \".join(attr_strings)\n html_markup = f\"<iframe {attr_string}></iframe>\"\n\n # Create the Mobiledoc Card\n card = [\n 'html',\n {\"html\": html_markup}\n ]\n self.cards.append(card)\n\n # 10 in Mobiledoc is the magic number for 'Card'\n section = [10, len(self.cards) - 1]\n self.sections.append(section)\n\n # Handle Github gists in the Medium doc. They appear in the export as <script> tags.\n # So we'll create a Mobiledoc card element with a <script> tag that links to the same place as before.\n elif tag == \"script\" and \"gist.github.com\" in attr_dict[\"src\"]:\n # Handle embedded gists\n attr_strings = []\n for k, v in attr_dict.items():\n attr_strings.append(f'{k}=\"{v}\"')\n attr_string = \" \".join(attr_strings)\n html_markup = f\"<script {attr_string}></script>\"\n card = [\n 'html',\n {\"html\": html_markup}\n ]\n self.cards.append(card)\n\n # 10 in Mobiledoc is the magic number for 'Card'\n section = [10, len(self.cards) - 1]\n self.sections.append(section)\n\n # <hr> tags become special \"hr\" cards in Mobiledoc.\n # We also need to skip the first <hr> because Medium adds an extra one at the top of every exported doc.\n elif tag == \"hr\":\n if self.seen_first_hr:\n card = [\"hr\", {}]\n self.cards.append(card)\n\n # 10 in Mobiledoc is the magic number for 'Card'\n section = [10, len(self.cards) - 1]\n self.sections.append(section)\n self.seen_first_hr = True\n\n\n # <br> tags translate to different Mobiledoc elements depending on their context\n elif tag == \"br\":\n # We know Medium's <br> tags never have a matching closing tag, so remove this element from the stack.\n self.tag_stack.pop()\n\n if \"pre\" in self.tag_stack:\n # - A <br> in a <pre> just needs to be appeneded to the current code block as a line break\n self.cards[-1][1][\"code\"] += \"\\n\"\n else:\n # - A <br> inside a <p>, <blockquote>, etc needs to be converted to a Mobiledoc \"soft-return\" atom.\n atom = [\"soft-return\", \"\", {}]\n self.atoms.append(atom)\n\n # Add a mobiledoc element to point to that new mobiledoc atom\n marker = [1, [], 0, len(self.atoms) - 1]\n self.current_markers.append(marker)", "def has_tags_in_content(self):\n\t\treturn self.get_content() and re_tag.search(self.get_content())", "def handle_startendtag(self, tag, attrs):\r\n\r\n source = ''\r\n key = ''\r\n\r\n # only parse tags that are conpot template tags ( <condata /> )\r\n if tag == 'condata':\r\n\r\n # initialize original tag (needed for value replacement)\r\n origin = '<' + tag\r\n\r\n for attribute in attrs:\r\n\r\n # extend original tag\r\n origin = origin + ' ' + attribute[0] + '=\"' + attribute[1] + '\"'\r\n\r\n # fill variables with all meta information needed to\r\n # gather actual data from the other engines (databus, modbus, ..)\r\n if attribute[0] == 'source':\r\n source = attribute[1]\r\n elif attribute[0] == 'key':\r\n key = attribute[1]\r\n\r\n # finalize original tag\r\n origin += ' />'\r\n\r\n # we really need a key in order to do our work..\r\n if key:\r\n # deal with databus powered tags:\r\n if source == 'databus':\r\n self.result = self.databus.get_value(key)\r\n self.payload = self.payload.replace(origin, str(self.result))\r\n\r\n # deal with eval powered tags:\r\n elif source == 'eval':\r\n result = ''\r\n # evaluate key\r\n try:\r\n result = eval(key)\r\n except Exception as e:\r\n logger.exception(e)\r\n self.payload = self.payload.replace(origin, result)", "def parse_and_alert(self):\n self.parse_feed()\n self.alert_new_posts()", "def parse(self, response):\n quantity = response.meta.get('quantity', 0)\n javascript = \"\".join(response.xpath('//script[contains(text(), \"sharedData\")]/text()').extract())\n json_data = json.loads(\"\".join(re.findall(r'window._sharedData = (.*);', javascript)))\n # with open('hi.txt', 'w') as f:\n # f.write(json.dumps(json_data, indent=4))\n data_media = json_data[\"entry_data\"][\"TagPage\"][0][\"tag\"][\"top_posts\"][\"nodes\"]\n data_media += json_data[\"entry_data\"][\"TagPage\"][0][\"tag\"][\"media\"][\"nodes\"]\n\n for img in data_media:\n if quantity < settings.QUANTITY_IMAGES:\n item = self.add_item(img, response.meta)\n quantity += 1\n yield item\n else:\n Task.objects.filter(keywords=response.meta['keyword']).update(\n instagram_status='done')\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n r.publish('instagram', response.meta['keyword'])\n return\n\n next_href = json_data[\"entry_data\"][\"TagPage\"][0][\"tag\"][\"media\"][\"page_info\"][\"has_next_page\"]\n if next_href:\n url = response.urljoin(\n '?max_id=' +\n json_data[\"entry_data\"][\"TagPage\"][0][\"tag\"][\"media\"][\"page_info\"][\"end_cursor\"])\n yield scrapy.Request(url, self.parse,\n meta={'keyword': response.meta['keyword'],\n 'quantity': quantity})", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def handle_body(self, tag, attrs):\n self.body = 'open'", "def parse(self, data):\n self.links = []\n self.images = []\n self.current_tags = []\n self.reset()\n self.feed(data)", "def process_multi_body_format(commands):", "def check_tags(self):\n if(self.tags is None or not self.tags.get('subscriber', False)):\n self.filters |= Filters.NonSubs\n\n if(self.tags is None or not self.tags.get('user-type', 0) > 0):\n self.filters |= Filters.NonMods", "def identify_and_parse_page(self, response):\n if self.initial_page_filter(response):\n if self.is_index_page(url=response.url, response=response):\n self.process_index_page(response)\n elif self.is_captcha_page(response.url, response):\n self.process_captcha(response)\n elif self.is_results_page(response.url, response):\n items = self.process_question_answer_page(response)\n if self.duplicate_url:\n yield Request(url=self.duplicate_url, callback=self.identify_and_parse_page)\n self.duplicate_url = None\n for item in items:\n yield item\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))", "def handle_data(self, data):\n if self.current_parent_element['tag'] == '':\n self.cleaned_html += '<p>'\n self.current_parent_element['tag'] = 'p'\n\n self.cleaned_html += data", "def handle_startendtag(self, tag, attrs):\n if verbose(): print(\"TIParser.handle_startendtag(self, %s, %s)\"\n % (tag, attrs))\n\n if tag == 'script':\n self.errmsg(\"<script/> does not load javascript effectively.\"\n + \" Please use '<script ... > </script>' instead.\")\n self.standard_tag_checks(tag, attrs)", "def post_process(text):\n # XXX update to spit out HTML - no need for requests GDocs can take html\n verbose = False\n request_list = []\n chars = iter(text)\n normal_text = []\n knownsigils = {\"end\":('',\"NONE\"),\n \"^\": (\"0123456789+-\",\"SUPERSCRIPT\"),\n \"_\": (\"0123456789\",\"SUBSCRIPT\")\n }\n c = next(chars, \"end\")\n while (True):\n if (c in knownsigils.keys()):\n if len(normal_text): request_list.append((''.join(normal_text), \"NORMAL\"))\n normal_text.clear()\n (c,token) = _gettoken(c,chars,knownsigils)\n if (token is not None): request_list.append(token)\n if (c==\"end\"):\n break\n else:\n continue\n else:\n normal_text.append(c)\n c = next(chars, \"end\")\n return request_list", "def tagBodyEncode(self):\n self.body=json.dumps(self.params)\n self.taghandler.tagSet(\"domain\", self.domain)\n self.taghandler.tagSet(\"category\", self.category)\n self.taghandler.tagSet(\"methodname\", self.methodname)\n self.taghandler.tagSet(\"login\", self.login) \n self.taghandler.tagSet(\"passwd\", self.passwd)\n #self.body=self.body.strip()", "def process_sentence_pos_tags(input_file, group_tags):\n\n print('Reading file and POS tagging...')\n if input_file is not None:\n f = open(input_file, 'r', encoding='utf-8', errors='ignore')\n sentences = nltk.sent_tokenize(f.read())\n sentence_tag_tokens = [nltk.pos_tag(nltk.word_tokenize(\n sentence, language='english'), lang='eng')\n for sentence in sentences]\n else:\n sentence_tag_tokens = nltk.corpus.treebank.tagged_sents()[0:1000]\n\n sentences = []\n tags = []\n for pos_tags in sentence_tag_tokens:\n sentence_tmp = ''\n pos_tags_tmp = []\n for word, tag in pos_tags:\n sentence_tmp += word + ' '\n # Group tags\n if group_tags:\n # Preprocess tags\n if re.match('VB.*$', tag): # Group all verbs\n tag = 'VB'\n elif re.match('JJ.*$', tag): # Group all adjectives\n tag = 'JJ'\n elif re.match('NN$|NNS$', tag): # Group all nouns\n tag = 'NN'\n elif re.match('NNP$|NNPS$', tag): # Group all proper nouns\n tag = 'NNP'\n elif re.match('RB.*$', tag): # Group all adverbs\n tag = 'RB'\n\n if tag in concepts:\n pass\n else:\n tag = 'OTHER'\n pos_tags_tmp.append((word, tag))\n pos_tags_tmp.append((' ', 'SPACE'))\n sentences.append(sentence_tmp)\n tags.append(pos_tags_tmp)\n print('...completed.')\n return sentences, tags", "def compute(self, batch: Dataset) -> List[TaggingResponse]: # type: ignore\n syntax_options: SyntaxOptions = assert_not_none(self.config.syntax)\n spacy_model = spacy.load(syntax_options.spacy_model)\n\n utterances = batch[self.config.columns.text_input]\n records: List[TaggingResponse] = []\n\n for utterance in utterances:\n tag: Dict[Tag, bool] = {\n smart_tag: False\n for family in [SmartTagFamily.extreme_length, SmartTagFamily.partial_syntax]\n for smart_tag in SMART_TAGS_FAMILY_MAPPING[family]\n }\n\n doc = spacy_model(clean_utterance(utterance))\n # Remove punctuation for word count and smart tags\n tokens = [token.text for token in doc if not token.is_punct]\n\n if len(tokens) >= syntax_options.long_utterance_min_word:\n tag[SmartTag.long] = True\n if len(tokens) <= syntax_options.short_utterance_max_word:\n tag[SmartTag.short] = True\n\n sub_toks = [tok for tok in doc if (tok.dep_ in syntax_options.subj_tags)]\n obj_toks = [tok for tok in doc if (tok.dep_ in syntax_options.obj_tags)]\n vrb_toks = [tok for tok in doc if (tok.pos_ in self.verb_tags)]\n if not sub_toks:\n tag[SmartTag.no_subj] = True\n if not obj_toks:\n tag[SmartTag.no_obj] = True\n if not vrb_toks:\n tag[SmartTag.no_verb] = True\n\n # Some issues occur with other languages such as french if using doc.sents directly.\n # Hence, we use an English sentencizer that seems to work better for similar languages.\n doc_sentencizer_en = self.spacy_sentencizer_en(clean_utterance(utterance))\n sentence_count = len(list(doc_sentencizer_en.sents))\n if sentence_count > 1:\n tag[SmartTag.multi_sent] = True\n\n adds = {DatasetColumn.word_count: len(tokens)}\n records.append(TaggingResponse(tags=tag, adds=adds))\n\n return records", "def tag(self, text):\n\t\tpass", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def intf_ENTTAG(E):\n if not inc.entid_or_LST_of_entids(E.The,2) or not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: tag\")\n print(intf_ENTTAG.__doc__)\n return # Without doing much of anything.\n refreshview= False # No need unless view attributes (@) have been set.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of VALs.\n myeids= [x.val for x in myeids] # Should now be a list of VALs.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n for mytag in mytags:\n if len(mytag) > 1 and '@' == mytag[1]:\n refreshview= True\n existing_att_tags= MMEL.El[myeid].has_tag_starting_with(mytag[0:2])\n if existing_att_tags:\n for et in existing_att_tags:\n MMEL.El[myeid].del_tag(et)\n print(\"Tagging entity #%d with tag ''%s''\" % (myeid,mytag))\n if not MMEL.El[myeid].has_tag(mytag):\n MMEL.El[myeid].add_tag(mytag)\n else:\n print(\"Warning: No entity #%d. Skipping.\" % myeid)\n if refreshview: OUT.default(MMEL,E) # AUTODUMP ", "def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')", "def bdg(self, irc, msg, args):\n joke = self.fetchtags(irc,\n 'http://www.blaguesdegeek.com/aleatoire.html',\n 'p', {'class':'contenu'}, 1)\n for l in joke[0].contents:\n if l.string:\n l = self.sanitize(l)\n if l != '':\n irc.reply(l, prefixNick=False)", "def page_list_tag(self):\n\n\n alt_task_store_name = None\n alt_task_store = None\n try:\n alt_task_store_name = self.last_request_get_dict[\"alt_task_store_name\"][0]\n if alt_task_store_name == \"task_store_trash\":\n alt_task_store = self.task_store_trash\n else:\n alt_task_store_name = None\n alt_task_store = None\n except Exception as exc:\n util.dbgprint(\"expected exception asa {}\".format(str(exc)))\n used_task_store = self.task_store\n if alt_task_store:\n used_task_store = alt_task_store\n\n try:\n tag = self.last_request_get_dict[\"tag\"][0]\n list_taskid_desc = used_task_store.filter_tag(tag)\n except Exception as exc:\n util.dbgprint(\"exception apa, semi-expected {}\".format(str(exc)))\n return self.page_list_notes(no_history=True)\n\n history_id = self.save_history([\"tag\", \"action\"], alt_task_store_name=alt_task_store_name)\n\n title = \"woolnote - notes in \" + tag\n\n page_header_first_text = \"notes in \" + tag\n page_header_link_button_name = \"reset filter\"\n page_header_link_request_dict = {\"action\": \"show_list\"}\n page_header_list_of_warnings = None\n\n if self.error_msg_queue_list:\n page_header_list_of_warnings = self.helper_convert_msg_queue_list_to_list_for_output()\n\n return html_page_templates.page_list_notes_template(list_taskid_desc=list_taskid_desc,\n self_sess_action_auth=self.sess_action_auth, title=title,\n primary_task_store=self.task_store, alt_task_store=alt_task_store,\n alt_task_store_name=alt_task_store_name, history_back_id=history_id,\n virtual_folders=self.woolnote_config.virtual_folders,\n single_task_line_ids=set(self.woolnote_config.single_note_line_id.keys()),\n page_header_first_text=page_header_first_text,\n page_header_optional_link_button_name=page_header_link_button_name,\n page_header_optional_link_button_request_dict=page_header_link_request_dict,\n page_header_optional_list_of_warnings=page_header_list_of_warnings)", "def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def view_tags():\n tags = dict([ [k[8:],v] for k,v in os.environ.items() if k.startswith(\"HTTPBIN_\") ])\n\n if not tags:\n return Response(response=\"{}\", status=404, mimetype=\"application/json\")\n\n return jsonify(tags)", "def tags(request, error='', message='', tag_id=None):\n tags = Tag.objects.all()\n return render_to_response('feedback/tags.html', {\n 'error': error,\n 'message': message,\n 'tags': tags, 'tag_id': tag_id,\n }, context_instance=RequestContext(request))", "def _parse(self):\n pass", "def render_POST(self, request):\n jsonPayload = request.content.read()\n jsonParsed = json.loads(jsonPayload)\n\n if self.explode:\n request.setResponseCode(500)\n return \"sadness for you, today.\"\n\n if jsonParsed[\"Type\"] == \"pre-hook\" and self.pre:\n return self._renderPreHook(request, jsonParsed)\n elif jsonParsed[\"Type\"] == \"post-hook\" and self.post:\n return self._renderPostHook(request, jsonParsed)", "def preprocess(self, requests):\r\n input_batch = None\r\n for idx, data in enumerate(requests):\r\n text = data.get(\"data\")\r\n if text is None:\r\n text = data.get(\"body\")\r\n input_text = text.decode('utf-8')\r\n\r\n ################input处理\r\n question = input_text\r\n entity = self.NER(question)\r\n print('your question:{}\\nentity:{}'.format(question,entity))\r\n ################处理完毕\r\n return [entity]", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def parse_request(self, request):\n request.process_inputs()", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def independent_tags(self):\n if not RerankingParser._parser_model_loaded:\n raise ValueError(\"You need to have loaded a parser model in \"\n \"order to calculate most likely tags.\")\n return Tree(self.sentrep.makeFailureTree('X')).tags()", "def test_tags_search_content_blank_1(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n start_url = po.current_url()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_content([])\n har_entry = self.browser.page_load_details()\n\n end_url = po.current_url()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is True, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" searching for content with no tags did not return an error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))", "def render(\n self, context: Context, buffer: TextIO\n ) -> Union[Optional[bool], NoReturn]:\n if context.disabled_tags:\n self.raise_for_disabled(context.disabled_tags)\n return self.render_to_output(context, buffer)", "def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT", "def handle_data(self, data):\n \n line_num, offset = self.getpos()\n new_pos = self.new_line_pos[line_num] + offset\n self.data_buffer += self.html_doc[self.current_pos:new_pos]\n\n content = data\n if self.filtering:\n content = self.typogrify._apply_filters(content, self.lasttag)\n self.filtered_data_length = len(content)\n\n self.data_buffer += content\n self.current_pos = new_pos + len(data)", "def handleContentComplete():", "def check_for_whitespace(data):\n for keys, value in data.items():\n if keys != 'tags':\n if not value.strip():\n abort(make_response(jsonify({\n 'status': 400,\n 'error':'{} field cannot be left blank'.format(keys)}), 400))\n if keys == 'tags':\n for tags in data['tags']:\n if not tags.strip():\n abort(make_response(jsonify({\n 'status': 400,\n 'error':'tags field cannot be left blank'}), 400))\n return True", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def test_tags_search_content_blank_2(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n start_url = po.current_url()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_content([])\n har_entry = self.browser.page_load_details()\n\n end_url = po.current_url()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" searching for content with no tags returned an error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))", "def search_by_tags(request):\n resultTopics = []\n resultPosts = []\n if request.method == 'POST':\n data = request.data\n print(data)\n search_query = data['query']\n data_tags = list(set(data['tags']))\n print(data_tags)\n tagObjects = []\n if len(data_tags) > 0:\n tagObjects = Tag.objects.filter(hidden_tags__overlap=data_tags) | Tag.objects.filter(reduce(operator.and_, (Q(wikidataID=tag_id) for tag_id in data_tags)))\n for tagObject in tagObjects:\n print(\"LOL\")\n tag_topics = tagObject.topics.all()\n tag_posts = tagObject.posts.all()\n for topic in tag_topics:\n if topic not in resultTopics:\n resultTopics.append(topic)\n for post in tag_posts:\n if post not in resultPosts:\n resultPosts.append(post)\n # for tag in data[\"tags\"]:\n # try:\n # tagObjects = Tag.objects.filter(wikidataID=tag)\n # except Tag.DoesNotExist:\n # continue;\n # for tagObject in tagObjects:\n # tag_topics = tagObject.topics.all()\n # tag_posts = tagObject.posts.all()\n # for topic in tag_topics:\n # if topic not in resultTopics:\n # resultTopics.append(topic)\n # for post in tag_posts:\n # if post not in resultPosts:\n # resultPosts.append(post)\n print(resultTopics);\n print(resultPosts);\n\n query_topics = Topic.objects.filter(name__icontains=search_query)\n query_posts = Post.objects.filter(content__icontains=search_query)\n for topic in query_topics:\n if topic not in resultTopics:\n resultTopics.append(topic)\n for post in query_posts:\n if post not in resultPosts:\n resultPosts.append(post)\n\n all_relations = Relation.objects.all()\n for topic in resultTopics:\n for relation in all_relations:\n if (topic == relation.topic_from) and (relation.topic_to not in resultTopics):\n resultTopics.append(relation.topic_to)\n if (topic == relation.topic_to) and (relation.topic_from not in resultTopics):\n resultTopics.append(relation.topic_from)\n\n TopicSerializer.Meta.depth = 1\n PostNestedSerializer.Meta.depth = 1\n\n topicSerializer = TopicNestedSerializer(resultTopics, many=True)\n #topicSerializer.Meta.depth = 1\n postSerializer = PostNestedSerializer(resultPosts, many=True)\n #postSerializer.Meta.depth = 1\n\n return Response({'topics':topicSerializer.data, 'posts':postSerializer.data})", "def _process_html(self) -> None:\n opinion_json = self.request[\"response\"].json()\n for case in opinion_json:\n url = self._get_url(case[\"docketNumber\"], case[\"docketEntryId\"])\n status = (\n \"Published\"\n if case[\"documentType\"] == \"T.C. Opinion\"\n else \"Unpublished\"\n )\n self.cases.append(\n {\n \"judge\": case[\"judge\"],\n \"date\": case[\"filingDate\"][:10],\n \"docket\": case[\"docketNumber\"],\n \"url\": url,\n \"name\": titlecase(case[\"caseCaption\"]),\n \"status\": status,\n }\n )", "def handle_tag_search(self, tag_text):\n log.debug(\"Handling tag search: %s\", tag_text)\n tags = tag_text.split()\n self.filter_tags = tags\n self.current_selected = 0\n self._refresh()", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def tag_sents(self, sents):\n # WORK HERE!!", "def tags(self):\n # See also. Sentence.__repr__().\n ch, I,O,B = self.chunk, INSIDE+\"-\", OUTSIDE, BEGIN+\"-\"\n tags = [OUTSIDE for i in range(len(self.sentence.token))]\n for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]\n if tag == WORD:\n tags[i] = encode_entities(self.string)\n elif tag == POS and self.type:\n tags[i] = self.type\n elif tag == CHUNK and ch and ch.type:\n tags[i] = (self == ch[0] and B or I) + ch.type\n elif tag == PNP and self.pnp:\n tags[i] = (self == self.pnp[0] and B or I) + \"PNP\"\n elif tag == REL and ch and len(ch.relations) > 0:\n tags[i] = [\"-\".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]\n tags[i] = \"*\".join(tags[i])\n elif tag == ANCHOR and ch:\n tags[i] = ch.anchor_id or OUTSIDE\n elif tag == LEMMA:\n tags[i] = encode_entities(self.lemma or \"\")\n elif tag in self.custom_tags:\n tags[i] = self.custom_tags.get(tag) or OUTSIDE\n return tags", "def testTagJobs(self):\n self.assertTrue(\"C#\" in self.app._tag_jobs(\"C#\"))\n self.assertTrue(\"C++\" in self.app._tag_jobs(\"c++\"))\n self.assertTrue(\"Objective C\" in self.app._tag_jobs(\"obj-c\"))\n self.assertTrue(\".NET\" in self.app._tag_jobs(\".NET\"))\n self.assertEqual(0, len(self.app._tag_jobs(\"random text to see\")))", "def feed(self):\n HTMLParser.feed(self, self.dirty_html)", "def __list_all_tags(self):\n\n tags_dict = get_data.get_tagnames_dict()\n if len(tags_dict) > 0:\n first_str = 'tag'\n second_str = 'top posts scraped'\n third_str = 'recent posts scraped'\n descriptor = '{:<40} {:<20} {}'\n print('')\n print(descriptor.format(first_str, second_str, third_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-',\n len(third_str) * '-'))\n for number, tag in tags_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + tag\n second = str(get_data.get_top_tag_post_count(tag))\n third = str(get_data.get_recent_tag_post_count(tag))\n print(descriptor.format(first, second, third))\n else:\n print('no tags found in the database')", "def pos_tags(self):\n \n msg(\"Getting POS tag list...\")\n tags = []\n \n # loop through sentences\n for sent in self.tagged_sents:\n \n # loop through tagged words\n for (word, pos) in sent:\n \n # add tag if it's not already in list\n if pos not in tags:\n tags.append(pos)\n\n msg(\"done\\n\")\n \n return tags" ]
[ "0.594429", "0.57760024", "0.57655793", "0.5725321", "0.5628237", "0.56022376", "0.55851656", "0.55792296", "0.5511034", "0.55045795", "0.54749936", "0.545959", "0.545458", "0.5436897", "0.54287684", "0.54231405", "0.5336545", "0.52850664", "0.5254251", "0.5225636", "0.5221031", "0.52201325", "0.5201322", "0.519538", "0.5191731", "0.519101", "0.5189561", "0.51572347", "0.51532453", "0.51466376", "0.5142163", "0.5138853", "0.5135116", "0.5128888", "0.5128888", "0.50979745", "0.5091106", "0.5091106", "0.5081202", "0.5059695", "0.50561327", "0.504759", "0.5043235", "0.5026158", "0.50227773", "0.50147057", "0.49902245", "0.4989843", "0.49831605", "0.49802893", "0.4978882", "0.4977709", "0.49746287", "0.49642235", "0.49571836", "0.49559706", "0.49404946", "0.49377152", "0.49335688", "0.49233258", "0.49112272", "0.49082145", "0.4893103", "0.48821187", "0.48798883", "0.48626783", "0.48452264", "0.48450163", "0.48396045", "0.48383296", "0.48339266", "0.48253933", "0.4819402", "0.48162958", "0.4812857", "0.47983888", "0.4791583", "0.478779", "0.47774592", "0.4776762", "0.47764158", "0.47761747", "0.47736344", "0.47665584", "0.4759279", "0.47582108", "0.47515357", "0.4750534", "0.47490844", "0.47490844", "0.47483057", "0.47478458", "0.4742644", "0.47420695", "0.47362974", "0.47322312", "0.4729567", "0.47246003", "0.47226334", "0.4721276", "0.47202814" ]
0.0
-1
The UI is sending tagged tokens back to the server. Save them to train parserator
def tokens_dump(docid): tagged_strings = set() labels = get_labels() tagged_sequence = labels # replacing prep_inputs method. still works? tagged_strings.add(tuple(tagged_sequence)) outfile = SETTINGS.XML_LOCATION + "/" + docid + ".xml" try: os.remove(outfile) except OSError: pass appendListToXMLfile(tagged_strings, MODULE, outfile) if len(queue) == 0: return "All done!" else: return queue.pop(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, tokenizer):\n pass", "def __init__(self):\n self.tokens = []", "def tokenize(self):\n\n x = [] # input documents\n for file_path in glob.glob(self.train_dir + '*.txt'):\n file_as_string = open(file_path).read()\n x.append(file_as_string)\n\n self.tokenizer.fit_on_texts(x)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)", "def train(self, corpus):\n self.tokens = []\n self.tags = []\n sentences = corpus.split(NEW_LINE)\n for sentence in sentences:\n start = START_SIGHT + SLASH + START_SIGHT + SPACE + START_SIGHT + SLASH + START_SIGHT + SPACE\n end = SPACE + END + SLASH + END\n sentence = start + sentence + end \n tokens = sentence.split(SPACE)\n for t in tokens:\n token = t.rsplit(SLASH, 1)\n if (len(token) > 1):\n self.tokens.append(token) \n self.tags.append(token[TAG_INDEX])\n \n nonsense_cases = set([(END, START_SIGHT), (START_SIGHT, END),\n (START_SIGHT, START_SIGHT, END),\n (END, START_SIGHT, START_SIGHT)])\n self.bigram_tags = [b for b in zip(self.tags[:-1], self.tags[1:]) if b not in nonsense_cases]\n self.trigram_tags = [t for t in zip(self.tags[:-1], self.tags[1:], self.tags[2:])\\\n if not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases and\\\n not (t[WORD_INDEX], t[TAG_INDEX]) in nonsense_cases]", "def process_new_tokens(tokens,processed_tokens_set, model, dictionary):\n if hasattr(model, 'using_pretrained') and model.using_pretrained is not None:\n processed_tokens_set.update(tokens)\n update_embedding_layer(processed_tokens_set, model, dictionary)", "def tokens():\n pass", "def predict_tokens(self, tokens):\n return", "def create_tokens_li():\n cnt=0\n for file in docs:\n file_name = open(\"./corpus/\"+ str(file) + \".txt\")\n print(cnt)\n cnt+=1\n words = file_name.read()\n tokens_doc = nltk.word_tokenize(words)\n tokens_doc = [w.lower() for w in tokens_doc]\n #tokens_doc = [snowball_stemmer.stem(token) for token in tokens_doc]\n tokens_doc = [token for token in tokens_doc if token not in nltk.corpus.stopwords.words('english')]\n tokens_li.append(tokens_doc)\n\n\n #storing in json file\n with open('savers/tokens.json', 'w') as fp:\n json.dump(tokens_li, fp)", "def tokenize_and_save( inputs ):\n # split into words\n # split on whitespaces and make it all lowercase\n print( 'performing word tokenization' )\n tokenized = []\n file_count = 0\n start = time.time()\n for i, r in enumerate( inputs ):\n tokenized.append( preprocessor( r ) )\n if i % 10000 == 0:\n sys.stdout.write( '\\r tokenized: {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n if i != 0 and i % 100000 == 0:\n sys.stdout.write( '\\n' )\n print( 'saving tokinzed data to file' )\n pickle.dump( tokenized, open( os.path.join( DATA_HOME, TOKENIZED_FILES.format( file_count ) ), 'wb' ) )\n file_count += 1\n tokenized = []\n \n sys.stdout.write( '\\r processed: {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )\n print( 'saving tokinzed data to file' )\n pickle.dump( tokenized, open( os.path.join( DATA_HOME, TOKENIZED_FILES.format( file_count ) ), 'wb' ) )", "def next_token(self, context, token):", "def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)", "def token(self, value):\r\n self._token = value", "def save(self, *args, **kwargs):\n self._update_search_tokens()\n super().save(*args, **kwargs)", "def _build_token_dict(self, corpus: List[List[str]]):\n self.token2idx = self.load_from_vocab_file(self.vocab_path)\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def train(self):\n self.transitions = {}\n if self.order > len(self.tokens) - 1:\n print(\"Unable to train: Hit upper bound on order, given corpus.\")\n for i in range(0, len(self.tokens) - self.order):\n ngram = tuple(self.tokens[i:i+self.order])\n if ngram in self.transitions:\n self.transitions[ngram].append(self.tokens[i+self.order])\n elif ngram not in self.transitions:\n self.transitions[ngram] = [self.tokens[i+self.order]]", "def save_token(self):\n db.session.add(self)\n db.session.commit()", "def tokenize(self):\n self.__create_tokens()\n copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)\n if self.__copy:\n copy_obj.copy_file(self.__write_to, \"tokenize.data\")\n copy_obj.rename(self.__write_to, self.__file)\n os.remove(self.__write_to)", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def __get_token_data__(self):\n raise Exception(\"Implement me!\")", "def init_tokens(self):\n raise NotImplementedError('Abstract method.')", "def tag(self, sent):\n # WORK HERE!!", "def tokenize(self):\n\n self.feats = {\n 'features': [], # Lists of the `InputFeatures` objects.\n 'segments': [], # Segments of the phrase. 0: Promoun, 1: A-term, 2: B-term \n 'df_ids': [], # DataFrame index.\n 'target_token_ids': [] # Indexes of the target term in the tokens lists.\n }\n unique_id = 0 # Unique ID of the dataset.\n for _, row in tqdm(self.df.iterrows()):\n segment_tokens = self.tokenize_single_row(row)\n for j, segment in enumerate(segment_tokens):\n if segment['target_token_index'] > 0:\n features = self.tokens_to_features(unique_id, segment['tokens'])\n unique_id += 1\n self.feats['features'].append(features)\n self.feats['segments'].append(j)\n self.feats['target_token_ids'].append(segment['target_token_index'] )\n self.feats['df_ids'].append(row.ID)", "def __encode_token(self, text: list) -> dict:\n encoded_input = self.tokenizer(\n [str(string) for string in text],\n truncation=True,\n padding=True,\n return_tensors=\"pt\",\n )\n\n return encoded_input", "def __init__(self, token):\n\n self.token = token", "def semcor2token(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'typetoken'\r\n if not output_dir.is_dir():\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write('\\t'.join([word.string, word.string, 'punc\\n']))\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')\r\n else:\r\n token = Token.from_tag(word)\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')", "def extractTokensToPredict(data_path):\n \n sentences = []\n ids = []\n \n for event, element in etree.iterparse(data_path, tag=\"sentence\"):\n current_sentence = []\n current_ids = []\n if event == 'end':\n #For every child of the sentence tag\n for child in element:\n #Get the lemma of the token\n lemma = child.attrib['lemma']\n if '&apos;' in lemma:\n #If it is present, substitute it\n lemma = re.sub(r'(&apos;)', '\\'', lemma)\n #Check also for &apos;&apos = \"\"\n if '\\'\\'' in word:\n lemma = re.sub(r'(\\'\\')', '\\'', lemma)\n if child.tag == 'instance':\n current_ids.append(child.attrib['id'])\n else:\n current_ids.append('0')\n current_sentence.append(lemma)\n if current_sentence and current_ids:\n sentences.append(current_sentence)\n ids.append(current_ids)\n #Clear to save memory\n element.clear()\n \n print(\"File completely parsed. Total number of sentences %i \\n\" %len(sentences))\n print()\n return sentences, ids", "def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass", "def token(self) -> str:", "def train_punkt_sent_tokenizer(train_ref,train_pickle):\r\n\timport nltk.tokenize.punkt\r\n\ttokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()\r\n\t\r\n\t# read training corpus\r\n\timport codecs\r\n\ttext = codecs.open(train_ref).read()\r\n\ttokenizer.train(text)\r\n\r\n\t# dump pickled tokenizer\r\n\timport pickle\r\n\tout = open(train_pickle,\"wb\")\r\n\tpickle.dump(tokenizer, out)\r\n\tout.close()", "def add_tokens(self, sample):\n # Text\n inputs = self._tokenizer.encode_plus(sample['text'],\n add_special_tokens=True,\n max_length=self._max_text_length,\n padding='max_length', # TODO padding here or in model (together with item_glove)?\n truncation=True, # truncate to 512 (added for MSNBC dataset)\n return_attention_mask=True)\n # TODO warn if text was truncated\n #if len(TODO) > self._max_text_length:\n # self._logger.info(f'Truncate long input sentence ({len(TODO)} tokens) to {self._max_text_length}')\n sample['text_tokenized'] = inputs['input_ids']\n sample['text_attention_mask'] = inputs['attention_mask']\n # Item name (mention/surface form)\n inputs = self._tokenizer.encode(sample['item_name'],\n add_special_tokens=False)\n sample['item_name_tokenized'] = inputs", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def token(self, token):\n\n self._token = token", "def token(self, token):\n\n self._token = token", "def _process_reviews(self):\n params = {'add_special_tokens': True, 'max_length': self._max_len,\n 'return_token_type_ids': False, 'pad_to_max_length': True,\n 'return_attention_mask': True, 'return_tensors': 'pt'}\n\n self._tokens = self._review.apply(self._tokenizer.encode_plus, **params)", "def __call__(self, token_received: str, **kwargs) -> str:\n pass", "def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1", "def _tokens_to_disk(self, folder=None, filename=None, tokens=None):\n assert folder and filename and tokens and isinstance(tokens, list)\n tokens = tokens or self.tokens\n assert tokens, \"No availale tokens to save to disk!\"\n assert isinstance(tokens, list), \"Wrong format of tokens!\"\n filepath = os.path.join(folder, filename)\n with open(filepath, 'wb') as f:\n pickle.dump(tokens, f)\n self.log.debug(\"Written tokens to file {}\".format(filepath))", "def tokens(self):\n return self._sentrep.tokens()", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n \r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n\r\n previob = history[index - 1]\r\n \r\n\r\n return {\r\n 'word': word,\r\n 'pos': pos,\r\n 'lemma': stemmer.stem(word),\r\n\r\n 'next-word': nextword,\r\n 'next-pos': nextpos,\r\n 'next-lemma': stemmer.stem(nextword),\r\n\r\n 'next-next-lemma': stemmer.stem(nextnextword),\r\n\r\n 'prev-word': prevword,\r\n 'prev-pos': prevpos,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n\r\n 'prev-iob': previob,\r\n\r\n }", "def __str__(self):\n return self.token", "def assign_token(self, frame):\n \tprint(str(frame))", "def tokenize(self, file_name):\n main_body = self.cast.nodes[0].body[-1]\n token_string = self.visit(main_body)\n\n variable_map = self.dump_var_map()\n value_map = self.dump_val_map()\n\n out_file = open(file_name, \"w\")\n out_file.write(f\"{token_string}\\n\")\n\n for var in variable_map:\n out_file.write(f\"{var}\\n\")\n\n for val in value_map:\n out_file.write(f\"{val}\\n\")", "def after_parsing(self):", "def __call__(self, tokenized_text):\n raise NotImplementedError()", "def exchange_tokens(self):\n raise NotImplementedError()", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def __init__(self, tokens):\n self.mdl = self.train(tokens)", "def act(self):\n if not self.label_candidates:\n self.label_candidates = True\n for text in self.observation.get('label_candidates', ()):\n if text:\n tokens = self.tokenize(text)\n self.add_to_dict([self.get_template(tokens)])\n\n return {'id': self.getID()}", "def add_token(self,token):\n\t\tif not token:\n\t\t\tlogging.error(\"Token cannot be empty!\")\n\t\t\texit()\n\n\t\tself.tokens.append(token.lower())\n\t\t#self.user_defined_token = token.lower()", "def data_to_token_ids(self, input_path, target_path, train = True):\n # Set up the path\n path_target = os.path.join(self.data_dir, target_path)\n\n # Initialize list\n tokens_ids = []\n tokens_length = []\n labels = []\n\n # Tokenize\n print(\"Tokenizing data in %s\" % path_target)\n self.initialize_vocabulary()\n counter = 0\n for file in input_path:\n path_input = os.path.join(self.data_dir, file)\n with open(path_input, 'r', newline=\"\\n\", encoding='utf8') as f:\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\"Tokenizing line %d\" % counter)\n if not train:\n line = self.word_sub.sub(r\"\", line)\n tokens_ids.append(self.sentence_to_token_ids(line))\n tokens_length.append(len(tokens_ids[-1]))\n # Insert labels for classification\n if \"pos\" in file:\n labels.append(1)\n elif \"neg\" in file:\n labels.append(0)\n\n # Print statistics\n print(\"Maximum length {}\".format(max(tokens_length)))\n print(\"Average length {}\".format(sum(tokens_length)/len(tokens_length)))\n print(\"Number of sentences {}\".format(len(tokens_length)))\n n_unks = sum([tokens.count(3) for tokens in tokens_ids])\n n_words = sum([len(tokens) for tokens in tokens_ids])\n print(\"Number of unks {}\".format(n_unks))\n print(\"Number of words {}\".format(n_words))\n print(\"Ratio unks/words {}%\".format(n_unks/n_words*100))\n\n # Print longest sentences\n np_tokenls_length = np.array(tokens_length)\n idx = np.argsort(np_tokenls_length)[-10:]\n for i in idx:\n print([self.dict_vocab_reverse.get(id) for id in tokens_ids[i]])\n\n return tokens_ids, tokens_length, labels", "def write_tok_to_file(self):\n dir_path = os.path.join(self.output_path, 'tokens')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n if not amr_strings:\n continue\n tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])\n f.write(tok + '\\n')\n f.close()", "def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()", "def multiword_tokens(self, mwts):\n self._mwts = mwts", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def predict(self, sample: InputSample) -> List[str]:\n doc = self.model(sample.full_text)\n tags = self._get_tags_from_doc(doc)\n if len(doc) != len(sample.tokens):\n print(\"mismatch between input tokens and new tokens\")\n\n return tags", "def pos_tagger(self, text):\n\n body = {'text': text}\n body = json.dumps(body)\n url = self.base_url + '/language-service/phoenix-language/nlp/pos'\n headers = {\"ApiKey\": self.api_key, \"Content-type\": \"application/json\"}\n response = requests.post(url=url, data=body, headers=headers).json()\n return response", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def setOwnTokens(self):\n\t\tself.removeOwnPunctuation()\n\t\tself.removeOwnStopWords()", "def post(self):\n dic = escape.json_decode(self.request.body)\n saveEditor(dic)\n # useful code goes here\n self.write(json.dumps({'status': 'ok', 'sent': dic}))\n self.finish()", "def parse(token):\n\n pass", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n\r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n previob = history[index - 1]\r\n contains_dash = '-' in word\r\n contains_dot = '.' in word\r\n allascii = all([True for c in word if c in string.ascii_lowercase])\r\n\r\n allcaps = word == word.capitalize()\r\n capitalized = word[0] in string.ascii_uppercase\r\n\r\n prevallcaps = prevword == prevword.capitalize()\r\n prevcapitalized = prevword[0] in string.ascii_uppercase\r\n\r\n nextallcaps = nextword == nextword.capitalize()\r\n nextcapitalized = nextword[0] in string.ascii_uppercase\r\n\r\n return {\r\n 'word': word,\r\n 'lemma': stemmer.stem(word),\r\n 'pos': pos,\r\n 'all-ascii': allascii,\r\n\r\n 'next-word': nextword,\r\n 'next-lemma': stemmer.stem(nextword),\r\n 'next-pos': nextpos,\r\n\r\n 'next-next-word': nextnextword,\r\n 'next-next-pos': nextnextpos,\r\n\r\n 'prev-word': prevword,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n 'prev-pos': prevpos,\r\n\r\n 'prev-prev-word': prevprevword,\r\n 'prev-prev-pos': prevprevpos,\r\n\r\n 'prev-iob': previob,\r\n\r\n 'contains-dash': contains_dash,\r\n 'contains-dot': contains_dot,\r\n\r\n 'all-caps': allcaps,\r\n 'capitalized': capitalized,\r\n\r\n 'prev-all-caps': prevallcaps,\r\n 'prev-capitalized': prevcapitalized,\r\n\r\n 'next-all-caps': nextallcaps,\r\n 'next-capitalized': nextcapitalized,\r\n }", "def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)", "def tokenize(essays, essay_len=500):\n # normalise scores into range of [0-1]\n normalised_scores = normaliseScores(essays)\n logging.info(\"Begin tokenization of essays...\")\n # Begin the timer \n t = time()\n # Fit_on_texts method creates the vocabulary index based on word frequency\n nltk_tokenizer.fit_on_texts(essays['essay'])\n logging.info(\"Tokenization Done!\")\n logging.info(\"Time taken to tokenize essays: {} seconds \".format(round(time() - t), 2))\n logging.info(\"Tokenization Summary: \")\n logging.info(\"Number of unique words found: {}\".format(len(nltk_tokenizer.word_index)))\n logging.info(\"Number of essays used for tokenisation: {}\".format(nltk_tokenizer.document_count)) \n # Transform each essay into a sequence of integers by converting each word to their unique integer asasigned in word_index\n sequence_vectors = nltk_tokenizer.texts_to_sequences(essays['essay'])\n print(sequence_vectors[0])\n # Pad the vectors so that all of them is essay_len long\n data = pad_sequences(sequence_vectors, maxlen=essay_len)\n label = normalised_scores\n # Reshape the label tensor to (num_essays, 1)\n label = np.reshape(label, (len(label), 1))\n logging.info('Shape of data tensor: {}'.format(data.shape))\n logging.info('Shape of label tensor: {}'.format(label.shape))\n return data, label", "def attachTokens(tokens, dynamicVocab=False, parse=False):\n tokenTxt, posTxt = '', ''\n global importantFrequentWordDic\n if dynamicVocab:\n for t in tokens:\n if parse:\n if len(tokens) > 1:\n if t.getLemma() in importantFrequentWordDic:\n tokenTxt += t.getTokenOrLemma() + '_'\n else:\n tokenTxt += unk + '_'\n else:\n tokenTxt += t.getTokenOrLemma() + '_'\n else:\n if t.getLemma() in importantFrequentWordDic:\n tokenTxt += t.getTokenOrLemma() + '_'\n else:\n tokenTxt += unk + '_'\n tokenTxt = tokenTxt[:-1]\n else:\n tokenTxt = '_'.join(t.getTokenOrLemma() for t in tokens)\n posTxt = '_'.join(t.posTag for t in tokens)\n return tokenTxt, posTxt.lower()", "def print_token(self):\n\n log.success(\"Your token : [{}]\".format(self.get_token()))", "def parse(self):", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def __parse_corpus(self, corpus):\n corpus = self.__handle_corpus_unkwon_words(corpus)\n start_token = ' '.join([NGramModel.START_SENTENCE_TOKEN]*(self.__n-1))\n word_list = corpus.replace(NGramModel.START_SENTENCE_TOKEN, start_token).split()\n \n for n in range(1, self.__n+1): \n self.__ngram_counts[n] = {}\n for ngram, count in Counter(self.__generate_n_grams(word_list, n)).items():\n self.__ngram_counts[n][' '.join(ngram)] = count", "def tagger():", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def __init__(self, text, no_ne_label = 'O'):\n # Adding re.UNICODE with \\s gets rid of some stupid special unicode whitespaces\n # That's neccessary, because otherwise the stanford POS tagger will split words at\n # these whitespaces and then the POS sequences have different lengths from the\n # token sequences\n text = re.sub(r\"[\\t\\s]+\", \" \", text, flags=re.UNICODE)\n tokens_str = [token_str.strip() for token_str in text.strip().split(\" \")]\n self.tokens = [Token(token_str) for token_str in tokens_str if len(token_str) > 0]\n self.no_ne_label = no_ne_label", "def predict(self, tokens: TokenSeq) -> PosSeq:\n _, pos_tags = self.predict_greedy(tokens)\n # _, _, pos_tags = self.predict_viterbi(tokens)\n return pos_tags", "def save(self):\n # EXERCISE:\n # - save self.access_token, self.user_id, self.save_message to access token file AccessData.ACCESS_TOKEN_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved access token in file %s' % (AccessData.ACCESS_TOKEN_FILE))", "def push_tokens(self, iterable):\n self.tokens = itertools.chain(iter(iterable), iter([self.next]), self.tokens)\n self.gettok()", "def OnTokenVisible(self, event):\n\n for j in range(len(self.canvas_SG)):\n items = self.canvas_SG[j].find_withtag('beattoken') # it's the same for lines and text\n for i in items:\n # Get the state and reverse it\n state = self.canvas_SG[j].itemcget(i,'state')\n if state == 'normal':\n self.canvas_SG[j].itemconfig(i, state='hidden')\n else:\n self.canvas_SG[j].itemconfig(i, state='normal')", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def sent_tokenize_tag_with_unks(self, path, fname):\n assert os.path.exists(path)\n fpath = os.path.join(path, fname)\n with open(fpath, 'r') as f:\n lines = f.read().split('\\n')\n \n # Tokenize file content\n all_tags = []\n for i, line in enumerate(lines):\n if line.strip() != \"\":\n tags = line.strip().split()\n tag_ids = torch.LongTensor(len(tags)+2)\n tag_ids[0] = self.tag2idx['<SOS>']\n for j, tag in enumerate(tags):\n if tag not in self.tag2idx:\n tag_ids[j+1] = self.tag2idx[\"<UNK>\"]\n else:\n tag_ids[j+1] = self.tag2idx[tag]\n tag_ids[j+2] = self.tag2idx['<EOS>']\n all_tags.append(tag_ids)\n return all_tags", "def get_tokens(self):\r\n return self.token_set", "async def _tokenset(self, ctx: commands.Context, token: str):\n self.config[ctx.message.server.id] = token\n dataIO.save_json('data/football/config.json', self.config)\n await self.bot.say('football-data API token set')", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def write_tokens(self, tokenizer):\n output_file = '{}ktT.xml'.format(tokenizer.filename[:-5])\n with open(output_file, 'w') as f:\n print 'writing tokens to {}'.format(output_file)\n f.write(''.join(tokenizer.token_output))", "def get_token_data(self):\n raise NotImplementedError('Method \"get_token_data\" must be implemented in any derived class')", "def tokenize(doc):\n text = doc\n doc = doc.lower()\n doc = re.sub('[,;]', ' ', doc)\n doc = re.split('\\s+', doc)\n doc = sorted(list(filter(None, doc)))\n ent = le.stanfordTagger(text)\n print(ent)\n l = []\n for item in ent:\n if ent[item] in ['LOCATION', 'GPE','PERSON']:\n l.append(item)\n ent = l#ent = sorted(list(le.stanfordTagger(text).keys()))\n #print(ent)\n #ent = [e.lower() for e in ent]\n crime_type = fileCrimeClassify.extractCrimeWord(text, returnOnlyLabels=True)\n crime_type = [c.lower() for c in crime_type]\n #print(crime_type + ent)\n #print(doc)\n return doc, ent + crime_type", "def get_tokens(self, document):\n raise NotImplementedError()", "def encode(self, tokens):\n encoded = []\n for sentence in tqdm(tokens):\n tmp = []\n for token in sentence:\n try:\n index = self.vocab.index(token)\n tmp.append(index)\n except:\n tmp.append(self.unk_token_idx)\n encoded.append(tmp)\n return encoded", "def test_TreebankTokenReader():", "def tokenize(self, path):\n assert os.path.exists(path)\n # add the start of sentence token\n sentence_sep = [BOS]\n with open(path, 'r') as f:\n sentences = [BOS]\n for sentence in tqdm(f, desc='Processing file: {}'.format(path)):\n sentences += sentence.split() + sentence_sep\n # split into list of tokens\n self.data = sentences", "def _post_training(self):\n self._write_state_key()", "def train_vectorizer (train_texts):\n\n tokenizer = text.Tokenizer(num_words=TOP_K)\n tokenizer.fit_on_texts(train_texts)\n\n train_texts = tokenizer.texts_to_sequences(train_texts)\n # get and set max sequence length\n max_length = len(max(train_texts, key=len))\n if max_length > MAX_SEQUENCE_LENGTH:\n max_length = MAX_SEQUENCE_LENGTH\n\n # saving\n with open('tokenizer.pickle', 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return tokenizer, tokenizer.word_index, max_length", "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "def print_token(self):\n\t\tcurrent_time= datetime.now()\n\t\tcurrent_time=current_time.strftime('%Y-%m-%d %I:%M %p')\n\t\tuser_tz = self.user_id.tz or pytz.utc\n\t\tlocal = pytz.timezone(user_tz)\n\t\tcurrent_time = datetime.strftime(pytz.utc.localize(datetime.strptime(str(current_time),\"%Y-%m-%d %I:%M %p\")).astimezone(local),\"%d-%m-%Y %I:%M %p\") \n\t\tself.ensure_one()\n\t\tself.sent = True\n\t\taddress = self.env['ip.address.setting'].search([])\n\t\turl = 'http://'+address.ip_address+':'+address.port_no+'/hw_proxy/print_xml_receipt'\n\t\tdata = {\n\t\t\t\t\"jsonrpc\": \"2.0\",\n\t\t\t\t\"params\": {\"receipt\": u'<receipt align=\"center\" font=\"a\" value-thousands-separator=\",\" width=\"30\"><h3>'+self.user_id.company_id.name+'</h3><div>--------------------------------</div><p align=\"center\">Date:'+current_time+'</p><br/><p>Your Token no is generated successfully!!</p><br/><p>Token No:</p><h1>'+self.pid+'</h1><br/><br/><div>--------------------------------</div><p align=\"left\">Please take your seat,we will attain you soon!!</p><div font=\"a\"><br/>' + \\\n\t\t\t\t\t\t\t\t\t\tu'</div></receipt>'},\n\t\t\t}\n\t\treq = urllib2.Request(url,json.dumps(data), headers={\"Content-Type\":\"application/json\",})\n\t\tresult = urllib2.urlopen(req)\n\t\t# action = self.env.ref('bi_queue_management.action_token_management_tree')\n\t\t# result = action.read()[0]\n\t\t# res = self.env.ref('bi_queue_management.bi_token_management_form', False)\n\t\t# result['views'] = [(res and res.id or False, 'form')]\n\t\t# return result\n\n\t\t# context ={\n\t\t# \t\t 'default_name':'',\n\t\t# \t\t 'default_phone':'',\n\t\t# \t\t}\n\t\t# return {\n\t\t# \t\t'type': 'ir.actions.act_window',\n\t\t# \t\t'res_model': 'bi.token.management',\n\t\t# \t\t'target' : 'inline',\n\t\t# \t\t'view_mode':'form',\n\t\t# \t\t}", "def request(self, token):\n pass", "def generate_sent(self):\n n = self._n\n\n sent = []\n prev_tokens = ['<s>'] * (n - 1)\n token = self.generate_token(tuple(prev_tokens))\n while token != '</s>':\n # WORK HERE!!\n sent += list((token,))\n prev_tokens += list((token,))\n prev_tokens = prev_tokens[1:]\n token = self.generate_token(tuple(prev_tokens))\n\n return sent", "def _save_train(self, context):\n last_train = context.user_data['last_train']\n saved_trains: dict = self._saved_trains(context)\n if last_train not in saved_trains.values():\n train_label = Train.from_json(last_train).one_line_description()\n saved_trains[train_label] = last_train\n\n # free memory\n context.user_data['last_train'] = {}", "def train(self, text, className):\n self.data.increaseClass(className)\n\n tokens = self.tokenizer.tokenize(text)\n tokens = [token for token in tokens if token not in stop]\n tokens = [x[0] for x in nltk.pos_tag(tokens) if 'VB' not in x[1] or 'DT' not in x[1] or 'IN' not in x[1] or 'W' not in x[1]]\n\n for token in tokens:\n self.data.increaseToken(token, className)", "def act(self):\n text = self.observation.get('text')\n if text:\n self.add_to_dict(filter_service_words(self.tokenize(text)))\n return {'id': self.getID()}" ]
[ "0.61516076", "0.6044435", "0.5898327", "0.5834319", "0.58167106", "0.5813318", "0.5673961", "0.5612146", "0.5571863", "0.55666846", "0.55541897", "0.5546777", "0.553209", "0.5514731", "0.5511445", "0.5496212", "0.5490531", "0.54755586", "0.54755586", "0.54755586", "0.5452871", "0.54495066", "0.54413027", "0.5410169", "0.53935", "0.53911084", "0.5387911", "0.5387818", "0.5386794", "0.5372961", "0.5372287", "0.5370557", "0.5360246", "0.5359226", "0.53518015", "0.53518015", "0.5339754", "0.5332217", "0.53251046", "0.53223735", "0.5318463", "0.5316431", "0.531464", "0.5303479", "0.5297668", "0.5284611", "0.52751404", "0.5259044", "0.5257823", "0.5257823", "0.5238262", "0.5230829", "0.521473", "0.5192911", "0.5179073", "0.5175049", "0.51744664", "0.517363", "0.5170003", "0.51674503", "0.5151675", "0.5151132", "0.51427275", "0.51168084", "0.5115243", "0.5104732", "0.5095577", "0.5092249", "0.5086836", "0.50753593", "0.5073236", "0.5068289", "0.5066406", "0.50638473", "0.50602764", "0.5056829", "0.50491345", "0.5045733", "0.50444376", "0.5044088", "0.5043555", "0.50378615", "0.5025649", "0.50153774", "0.50128865", "0.50112766", "0.50025296", "0.5001658", "0.4999231", "0.49984083", "0.49928147", "0.49894267", "0.49847504", "0.4978289", "0.49766082", "0.49752158", "0.4970773", "0.49700347", "0.49675557", "0.49663138" ]
0.5427992
23
A custom sort. Favors cases where there are already labels for the tokens from parserator. For big corpuses, parsing takes time so you don't want to parse the whole corpus just to see how it is doing
def sort_have_labels(doc_cloud_id): filename = SETTINGS.LABELED_LOCATION + "/" + doc_cloud_id if os.path.isfile(filename): return 0 return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]:\n return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset())))", "def sort_by_tokens(self, token_order):\n\n remaining_tokens = list(set(self.dictionary.tokens) - set(token_order))\n token_order = token_order + remaining_tokens\n idx_order = [self.dictionary.add_token(token) for token in token_order]\n\n self.Nx += [0] * (len(token_order) - len(self.Nx))\n self.sort_by_idxs(idx_order)\n\n # We are no longer sorted according to unigram frequencies.\n self.sorted = False", "def sort_train_labels_knn(Dist, y):\n return y[Dist.argsort(kind='mergesort')]", "def sort_by_parser_scores(self):\n self.parses.sort(key=lambda parse: -parse.parser_score)", "def sort_train_labels_knn(Dist, y):\n order = Dist.argsort(kind='mergesort')\n return y[order]", "def term_sort(want_query,not_query,inv_ind):\n \n# want_toks = tokenize_func(want_query)\n# not_toks = tokenize_func(not_query)\n \n wants = []\n for tok in want_query:\n if tok in inv_ind:\n l = len(inv_ind[tok])\n wants.append((tok,l))\n wants.sort(key = lambda x: x[1]) \n \n nots = []\n for tok in not_query:\n if tok in inv_ind:\n l = len(inv_ind[tok])\n nots.append((tok,l))\n nots.sort(key = lambda x: x[1])\n \n return wants,nots", "def sort_features_human_friendly_order(tokens, features):\n preferred_ordered_features = []\n\n # Short features last\n features = sorted(features, key=len, reverse=True)\n \n for token in tokens:\n # Iterate from last (shortest features first), and remove in-place*\n for feature in reversed(features):\n # Only add those that begins with current token\n if feature.startswith(token):\n preferred_ordered_features.append(feature)\n features.remove(feature)\n return preferred_ordered_features", "def ysort(L):\r\n return sorted(L, key=lambda x: x.freq)", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )", "def filter_sort_results(scores, labels, multilabel=False, max_class_count=3):\n if multilabel:\n assert len(scores) == len(labels)\n max_class_count = len(labels)\n ids_order = range(max_class_count)\n else:\n max_class_count = min(len(labels), max_class_count)\n ids_order = np.argsort(scores)[::-1][:max_class_count]\n return ids_order", "def sort_terms(naf: KafNafParser, terms: Iterable[Cterm]) -> List[Cterm]:\n def get_offset(term: Cterm):\n tokens = [naf.get_token(tid) for tid in naf.get_dict_tokens_for_termid(term.get_id())]\n token = sort_tokens(tokens).pop()\n return token.get_sent(), int(token.get_offset())\n return sorted(terms, key=get_offset)", "def sort(self):\n\n # momentarily convert into numpy, to take advantage of their easy \n # sorting.\n top_indices = np.argsort([-n for n in self.Nx])\n self.Nx = [self.Nx[i] for i in top_indices]\n self.dictionary = h.dictionary.Dictionary([\n self.dictionary.tokens[i] for i in top_indices])\n\n self.sorted = True\n\n return top_indices", "def sorted_by_count_and_word(word_counts):\n\n return sorted(word_counts.items(), key=reversed_tuple)", "def words_in_sorted_order(self):\n print 'Words in sorted order:'\n self.words_in_sorted_order_utils(self.root)", "def sort_by_idxs(self, idx_order):\n self.Nx = [self.Nx[idx] for idx in idx_order]\n self.dictionary = h.dictionary.Dictionary(\n [self.dictionary.tokens[idx] for idx in idx_order])\n\n # We are no longer sorted according to unigram frequencies.\n self.sorted = False", "def sorted_by_count_desc_and_word(word_counts):\n\n return sorted(word_counts.items(), key=reversed_and_negated_tuple)", "def sort_by_reranker_scores(self):\n self.parses.sort(key=lambda parse: (parse.reranker_score,\n parse.parser_score),\n reverse=True)", "def sort_multi_lists(labels):\n unilabels = uniform_list_length(labels)\n intlist = [[i] * 3 for i in range(len(unilabels))]\n # sort_func = itemgetter(*range(len(unilabels[0])))\n sort_func = lambda item: (item[0][0], item[0][1], item[0][2])\n sort_idx = [ii[0] for (i, ii) in sorted(zip(unilabels, intlist), key=sort_func)]\n sort_labels = [unilabels[i] for i in sort_idx]\n return undo_uniform_list_length(sort_labels), sort_idx", "def sortby(self):\n ...", "def sort(self, reverse=True):\n count_word = list()\n indexs = list()\n for w in self.word2index:\n if w in self.special:\n continue\n count_word.append((self.word_count[w], w))\n indexs.append(self.word2index[w])\n\n count_word.sort(reverse=reverse)\n indexs.sort(reverse=reverse)\n\n for index, (_, word) in zip(indexs, count_word):\n self.word2index[word] = index\n self.index2word[index] = word", "def labeler(self, labels, tokens):\n encoded = []\n for idx, document in enumerate(tqdm(tokens)):\n tmp = [0 for char in range(len(document))]\n for name in labels[idx]:\n if re.match(r\"[^a-zA-Z]\", name):\n pattern = list(name)\n else:\n pattern = name\n # for indexes in re.finditer(name, document):\n # tmp[indexes.span()[0]:indexes.span()[1]] = [1 for _ in range(indexes.span()[1] - indexes.span()[0])]\n for i in range(len(document)):\n if document[i] == pattern[0] and document[i:i+len(pattern)] == pattern:\n tmp[i:i+len(pattern)] = [1 for _ in range(len(pattern))]\n encoded.append(tmp)\n\n # # Sanity check\n # for doc, enc in zip(tokens, encoded):\n # print(f\"{len(doc)}, {len(enc)}\")\n\n return encoded", "def sort_output_desc_asc(word_counts):\n\n # sort by item (-item[1] refers to reverse list of second item)\n sorted_items = sorted(word_counts.items(), key=lambda item: (-item[1], item[0]))\n \n for key, value in sorted_items:\n print \"{} {}\".format(value, key)", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def sortNgrams(hashtable):\n\tsorted = map(lambda (x, y): (y,x), hashtable.items())\n\tsorted.sort() # sort on basis of frequency\n\tsorted.reverse() # revert order: most frequent first\n\treturn map(lambda (y, x): (x, y), sorted)", "def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l", "def sort_and_cut(counter: Counter, limit: int):\n # ignoring the alphabetical part, it's fine to do\n # [word_type for (word_type, count) in counter.most_common(limit)]\n tokens_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])\n tokens_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)\n vocab_tokens = [i[0] for i in tokens_and_frequencies[:limit]]\n return vocab_tokens", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 50: # Controls that we get only the 50 most common keywords\n aux = aux+1\n yield type, (int(count), word)", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 15: # Controls that we get only the 15 most common keywords\n aux = aux+1\n yield type, (int(count), word)", "def benchmark_sort_key(benchmark):\n if not \"label\" in benchmark:\n return \"\"\n return benchmark[\"label\"]", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def order_ideal(self, gens):", "def natural_sort( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )\n return l", "def sort_words(words):\n return sorted(words)", "def NaturalSort(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)", "def test_dotted_sorting(self):\n assert natsort(['1.5', '1.0']) == ['1.0', '1.5']", "def process(filename):\r\n x = open(filename, \"r\")\r\n words_from_songs=[]\r\n for line in x:\r\n array =line.split(\":\")\r\n songid= array[0]\r\n lyrics=array[1]\r\n lyrics=lyrics.replace(\"\\n\", \"\")\r\n lyrics=lyrics.split(\" \")\r\n for i in range(len(lyrics)):\r\n words_from_songs.append((lyrics[i],songid))\r\n words_from_songs=radixSortNumbers(words_from_songs)\r\n max1 = longestWord(words_from_songs)\r\n counting = []\r\n for _ in range(max1+1):\r\n counting.append([])\r\n for k in range(len(words_from_songs)-1,0,-1):\r\n counting[len(words_from_songs[k][0])].append(words_from_songs[k])\r\n new_list = []\r\n # for i in range(len(counting)-1,0,-1):\r\n # for k in range(len(counting[i])):\r\n # new_list.insert(0,counting[i][k])\r\n # for i in range(len(counting) - 1, 0, -1):\r\n # new_list = countingSort(new_list, i - 1)\r\n\r\n for i in range(len(counting)-1,0,-1):\r\n for k in range(len(counting[i])):\r\n new_list.insert(0,counting[i][k])\r\n new_list = countingSort(new_list,i-1)\r\n y = open(\"sorted_words.txt\",\"w\")\r\n for i in range(len(new_list)):\r\n y.write(str(new_list[i][0])+\":\"+str(new_list[i][1]+\"\\n\"))", "def get_order_from_categories(otu_table, category_labels):\r\n category_labels = array(category_labels)\r\n sample_order = []\r\n\r\n for label in unique(category_labels):\r\n label_ix = category_labels == label\r\n selected = [s for (i, s) in zip(label_ix, otu_table.SampleIds) if i]\r\n sub_otu_table = filter_samples_from_otu_table(\r\n otu_table,\r\n selected,\r\n 0,\r\n inf)\r\n data = asarray([val for val in sub_otu_table.iterObservationData()])\r\n label_ix_ix = get_clusters(data, axis='column')\r\n\r\n sample_order += list(nonzero(label_ix)[0][array(label_ix_ix)])\r\n return array(sample_order)", "def set_trec_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:(x.get_score(),x.get_doc()),reverse=True)\n for r in self._run[k]:\n print r.get_str()", "def sort(self, input):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n normal_input = regex.sub('', input.lower())\n array = list(normal_input.replace(' ',''))\n array.sort()\n return ''.join(array)", "def sort_words(filename = \"English.txt\"):\n word_list = create_word_list(filename)\n s = SortedLinkedList()\n for i in word_list:\n s.add(i)\n return s", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sortTermsAlphabetically(terms):\n # Tutorial for sorting credit:\n # https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-lambda-function/\n sorted_list = sorted(terms, key=lambda i: (i[\"term_header\"], i[\"rating\"]))\n return sorted_list", "def main(str_text):\n\n frequencies = count_value(str_text)\n sorted_data = sort_dict(frequencies)\n\n return sorted_data", "def similarity_sort(texts: List[str]) -> List[str]:\n df = cluster_text(texts, n=len(texts) // 2)\n return df[\"text\"].tolist()", "def sort(self):\n self.words = set(sorted(self.words))", "def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res", "def sort_sentence(sentence):\r\n #Parte la cadena con la primera funcion y la manda como parametro\r\n #A la segunda que devuelve el arreglo de palabras\r\n words = break_words(sentence)\r\n return sort_words(words)", "def word_count_sort(word_count_list):\n\n for index in range(1, len(word_count_list)):\n # initialize pointers\n value = word_count_list[index] # starts at the tuple in index 1\n position = index - 1 # initialize to start at 0\n\n # move items to a higher index position while their value is less than the value at the next index\n # compare values in tuple[1] but swap entire tuple\n while position >= 0 and word_count_list[position][1] < value[1]:\n word_count_list[position + 1] = word_count_list[position] # swap the tuple at position into next index\n position -= 1 # decrement to fill lower index and break loop\n\n word_count_list[position + 1] = value # move higher number left one index\n\n return word_count_list", "def sort_key(self):\n ...", "def sort_results(self):\n pass", "def order(self, searcher, docnums, reverse = False):\n raise NotImplementedError", "def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs", "def fuzzy_token_sort_ratio(thing_1, thing_2):\n return fuzz.token_sort_ratio(thing_1, thing_2)", "def sort_terms(term_postings_list):\n print(\" -- Sorting terms...\")\n sorted_dictionary = OrderedDict() # keep track of insertion order\n sorted_terms = sorted(term_postings_list)\n for term in sorted_terms:\n result = [int(docIds) for docIds in term_postings_list[term]]\n result_tftd = calculate_tftd(result)\n sorted_dictionary[term] = result_tftd\n return sorted_dictionary", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def sort_nicely(l): \n import re\n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key=alphanum_key)", "def sort_word_analysis(self):\n\n\t\treverse_word_analysis = [(value,key) for key, value in self.word_analysis.items()]\n\t\treverse_word_analysis.sort(reverse=True)\n\n\t\tvocab_list = [\t(reverse_word_analysis[i][1], #[1]: the word; [0]: the attribute values \n\t\t\t\t\t\t(reverse_word_analysis[i][0][3], #stem: 3rd index \n\t\t\t\t\t\treverse_word_analysis[i][0][2], #frequency; 2nd index\n\t\t\t\t\t\tself.sentence_index[reverse_word_analysis[i][0][4]], #the sentence location index; 4th index \n\t\t\t\t\t\treverse_word_analysis[i][0][5], #selection criteria: 5th index \n\t\t\t\t\t\t)) for i in range(10)]\n\t\t\n\t\tself.vocab_list = vocab_list\n\t\t\n\t\treturn vocab_list", "def sort(self):\r\n\t\t\r\n\t\t# get variables, add i\r\n\t\tv = self.scan(p=False)\r\n\t\tv.append('i')\r\n\t\t\r\n\t\t# reverse so least weighted variables come first\r\n\t\tv.reverse()\r\n\t\t\r\n\t\t# assign a weight to each variable, based on position in list\r\n\t\tw = {}\r\n\t\tfor n,i in enumerate(v):\r\n\t\t\tw[i] = 1000 ** (n + 1)\r\n\t\t\t\r\n\t\t# assign score based on weights and exponents\r\n\t\ts = {}\r\n\t\tfor i in self:\r\n\t\t\t\r\n\t\t\t# sum weights\r\n\t\t\tc = 0\r\n\t\t\tfor k,j in i.items():\r\n\t\t\t\t\r\n\t\t\t\t# adjust weights based on exponent\r\n\t\t\t\tif k != 'i':\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j)\r\n\t\t\t\t\t\r\n\t\t\t\t# i is adjusted based on even or odd exponents\r\n\t\t\t\telse:\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j % 2)\r\n\t\t\t\t\t\r\n\t\t\t# use score as key\r\n\t\t\ts[c] = i\r\n\t\t\t\t\r\n\t\t# sort keys largest to smallest\r\n\t\ty = s.keys()\r\n\t\ty.sort()\r\n\t\ty.reverse()\r\n\t\t\r\n\t\t# new term list\r\n\t\tn = [s[k] for k in y]\r\n\t\t\r\n\t\treturn Li(n,c=False)", "def sort_sentence(sentence):\r\n words = break_words(sentence)\r\n return sort_words(words)", "def _sort_majorana_term(term):\n if len(term) < 2:\n return term, 0\n center = len(term) // 2\n left_term, left_parity = _sort_majorana_term(term[:center])\n right_term, right_parity = _sort_majorana_term(term[center:])\n merged_term, merge_parity = _merge_majorana_terms(left_term, right_term)\n return merged_term, (left_parity + right_parity + merge_parity) % 2", "def order(self, searcher, docnums, reverse = False):\n return docnums", "def sort_ft_similar_word_findings(self, ft_found_terms):\n similarity_scores = []\n for entry in ft_found_terms:\n corresponding_term = entry[\"corresponding_term\"]\n\n similarity = self.model_request.similarity(self.base_word, corresponding_term)\n similarity_scores.append((entry, similarity))\n\n similarity_scores.sort(reverse=True, key=lambda tup: tup[1])\n\n return similarity_scores", "def reversesort(self):\n ...", "def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def natsort(lst):\n lst.sort(key=natsort_key)", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def vertex_sort_(self, vertices, labels):\n if self._method_calling == 3:\n return (sorted(list(vertices),\n key=lambda x: float('inf')\n if labels[x] is None else labels[x]), labels)\n else:\n return (sorted(vertices, key=lambda x: labels[x]), labels)", "def sort_sentence(sentence):\n # whats difference with break_words(words)? keep the unaffected data while the listing undergoes change.\n words = break_words(sentence)\n return sort_words(words)", "def radix_sort_rot(self, labels):\n n = len(labels)\n result = 0\n if n == 0:\n return result\n\n for b in range(self.bits):\n # The output array elements that will have sorted arr\n output = [0]*n\n\n # initialize count array as 0\n count = [0, 0]\n\n # Store count of occurrences in count[]\n for i in range(n):\n count[(labels[i] >> b) % 2] += 1\n\n # Change count[i] so that count[i] now contains actual\n # position of this digit in output array\n count[1] += count[0]\n\n # Build the output array\n for i in range(n-1, -1, -1):\n index = (labels[i] >> b)\n output[count[index % 2] - 1] = labels[i]\n count[index % 2] -= 1\n\n # Copying the output array to arr[],\n # so that arr now contains sorted numbers\n labels = output\n\n previous, occ = labels[0], 1\n for i in range(1, len(labels)):\n label = labels[i]\n if label == previous:\n occ += 1\n else:\n result ^= self.ROT(previous ^ occ, occ)\n occ = 1\n previous = label\n if occ > 0:\n result ^= self.ROT(previous ^ occ, occ)\n return result", "def kwiksort(dict_prefs, list_els, runs=10, random_seed=None):\n best_score=float(\"-infinity\")\n if random_seed is not None:\n np.random.seed(random_seed)\n for run in range(runs):\n ordering=_kwiksort(list_els,dict_prefs)\n score=eval_ordering(ordering,dict_prefs)\n if score>best_score:\n best_score=score\n best_order=ordering\n return best_order", "def _natural_key_sort(string_to_sort):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_to_sort)]", "def sort(self):\r\n return self.sort_targets([self])", "def sort_sentence(sentence):\n words =break_words(sentence)\n return sort_words(words)", "def sort_1(l):\n pass", "def sort_slide_names(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def reorder_labels(labels, order):\n new_ordered_labels = [0 for i in range(len(labels))]\n for i in range(len(order)):\n new_ordered_labels[i] = labels[order[i]]\n return new_ordered_labels", "def custom_sort(pseudo):\n # Unpack\n pred = pseudo[\"pred_list\"]\n lab = pseudo[\"lab_list\"]\n name = pseudo[\"name_list\"]\n \n # Sort\n sorted_list = list(zip(pred, lab, name))\n sorted_list.sort(key=lambda x: x[0], reverse=True)\n \n pred_sorted = [row[0] for row in sorted_list]\n lab_sorted = [row[1] for row in sorted_list]\n name_sorted = [row[2] for row in sorted_list]\n \n # Re-pack\n pseudo = {\n \"pred_list\": pred_sorted,\n \"lab_list\": lab_sorted,\n \"name_list\": name_sorted\n }\n \n return pseudo", "def mapper2(input_file):\n\tstart2 = time.time()\n\tfrequencies = defaultdict(int)\n\twith open(input_file) as f:\n\t\ttextstr = f.read()\n\t\t\n\ttokens = re.findall(token_regex, textstr)\n\t\n\tfor token in tokens:\n\t\tfrequencies[token.lower()] += 1\n\t\t\n\tsorted_results = sorted(frequencies.items(), key=operator.itemgetter(1), reverse=True)\n\tprint 'runtime', time.time() - start2\n\treturn sorted_results[0]", "def featurize(tokens, feature_fns):\n ###TODO\n \n # step 1 -> feats creation\n feats = defaultdict(lambda: 0)\n \n # step 2 -> call particular feature function for each feature\n for feature in feature_fns : \n feature(tokens,feats)\n\n # step 3 -> sort before return\n return(sorted(feats.items(), key=lambda x: x[0]))", "def order(text):\n return ' '.join(sorted(text.split(' '), key=get_number)) if text else ''", "def sort_sentence(sentence):\n\twords = break_words(sentence)\n\treturn sort_words(words)", "def sort_sentence(sentence):\n\twords = break_words(sentence)\n\treturn sort_words(words)", "def sort_string(raw_str):", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def sorted_nicely( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def sort_and_cast_doc_in_posting_list(word_posting_list, itemgetterparam=1):\n temp = {}\n for key, val in word_posting_list.items():\n temp[int(key)] = float(val)\n otemp = sorted(temp.items(), key=operator.itemgetter(itemgetterparam))\n return dict(otemp)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def top_coefs(clf, label, n, vocab):\n ###TODO\n \n # step 1 -> get .coef_\n coefficient = clf.coef_[0] #***** \n \n # step 2 -> check label and sort\n if label == 1: # positive class -> descending sorting\n # get indices of sorted list i.e. [2,3,1] -> sorting [1,2,3] -> indices[3,1,2]\n top_coef_ind = np.argsort(coefficient)[::-1][:n] # requires very less time by this methos of sorting and get sorted element's indices \n \n if label == 0: # negative class -> ascending sorting\n top_coef_ind = np.argsort(coefficient)[::1][:n]\n \n \n #step 3 -> get all top coefficient' indices\n #print('top_coef_ind = ',top_coef_ind)\n top_coef = abs(coefficient[top_coef_ind])\n #print('top_coef = ',top_coef)\n \n #step 4 -> get all top coefficient' terms i.e. tokens\n rev_Vocab = {}\n \n for term,colId in vocab.items():\n rev_Vocab.setdefault(colId,term)\n #alternatives -> check for fasted \n #vocab.__class__(map(reversed, vocab.items()))\n #rev_Vocab = lambda vocab: {v:k for k, v in vocab.items()}\n #rev_Vocab = lambda vocab: dict( zip(vocab.values(), vocab.keys()) )\n \n \n top_coef_terms = []\n \n for colId in top_coef_ind:\n top_coef_terms.append(rev_Vocab[colId])\n \n #step 5 -> get touple (top_coef_terms, top_coef) and send\n return ([x for x in zip(top_coef_terms, top_coef)])", "def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)", "def __init__(self, document, k):\n doc = document\n self.n = k\n lst = []\n for l in range(1, self.n+1): #loops through every substring up to n\n for i in range(0, len(doc)-l): #loops through each index up to the length - l because that's where we need to start chopping up\n lst.append(doc[i:i+l]) #adds it to the list\n for i in range(len(doc)-l, len(doc)): #loops through the last indices that need to be chopped\n lst.append(doc[i:]) #adds them to the list as well\n self.ststr = lambda x, y: 0 if x == y else (-1 if x < y else 1) #creates the compare function\n self.sortedList = mysort(lst, self.ststr) #sorts the list and stores it\n '''\n for i in range(0, len(doc) - k):\n lst.append(doc[i:i + k])\n for i in range(len(doc) - k, len(doc)):\n lst.append(doc[i:])\n '''", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_index(self):\n def s(t):\n return tuple(sorted(t, key=self.clade_order))\n self.scf.index = self.scf.index.map(s)\n self.scf.index.names = [s.split('_')[0] for s in self.scf.index[0]]", "def induced_sorting(\n lms, tails, heads, SA, type_suffix, text, n, m, alpha, bucket_sizes, sigma\n):\n for i in range(m - 1, -1, -1): # place LMS suffixes at the end of their buckets\n nfs = tails[text[lms[i]]]\n SA[nfs] = lms[i]\n tails[text[lms[i]]] -= 1\n\n for i in range(n): # place the L-type suffixes at the fronts of their buckets\n if SA[i] > 0 and type_suffix[SA[i] - 1] == L_TYPE:\n nfs = heads[text[SA[i] - 1]]\n SA[nfs] = SA[i] - 1\n heads[text[SA[i] - 1]] += 1\n\n # reset bucket counters\n heads, tails = bucket_intervals(alpha, bucket_sizes, sigma)\n\n for i in range(\n n - 1, -1, -1\n ): # place the S-type suffixes at the ends of their buckets\n if SA[i] > 0 and type_suffix[SA[i] - 1] == S_TYPE:\n nfs = tails[text[SA[i] - 1]]\n SA[nfs] = SA[i] - 1\n tails[text[SA[i] - 1]] -= 1", "def _wiki_sort_key(doc):\n url = doc['url']\n return 1 if url.startswith('https://en.wikipedia') else -1", "def sort_terms(self):\n sorted_index = OrderedDict({})\n for k in sorted(self.inverted_index.keys()):\n sorted_index[k] = self.inverted_index[k]\n self.inverted_index = sorted_index", "def train(self):\n self.transitions = {}\n if self.order > len(self.tokens) - 1:\n print(\"Unable to train: Hit upper bound on order, given corpus.\")\n for i in range(0, len(self.tokens) - self.order):\n ngram = tuple(self.tokens[i:i+self.order])\n if ngram in self.transitions:\n self.transitions[ngram].append(self.tokens[i+self.order])\n elif ngram not in self.transitions:\n self.transitions[ngram] = [self.tokens[i+self.order]]", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l" ]
[ "0.6858186", "0.6613914", "0.657223", "0.64046764", "0.640126", "0.63479173", "0.62306327", "0.6093973", "0.6057724", "0.59947205", "0.59803766", "0.5903831", "0.5857513", "0.5817867", "0.5800997", "0.57934964", "0.5732152", "0.57284874", "0.57194114", "0.5718229", "0.57175237", "0.57164854", "0.57054406", "0.5677624", "0.56732047", "0.56700236", "0.5664938", "0.5664004", "0.564787", "0.5644353", "0.5641878", "0.5615334", "0.56115365", "0.56069225", "0.5603279", "0.55950314", "0.55919147", "0.5562565", "0.55560154", "0.55539423", "0.5535073", "0.5532107", "0.5524645", "0.5506075", "0.5499276", "0.54941034", "0.5493136", "0.54822433", "0.54733384", "0.5470306", "0.54555136", "0.54460263", "0.5423545", "0.54101044", "0.5401951", "0.5399394", "0.53843075", "0.5380339", "0.53796774", "0.53723204", "0.5368981", "0.53685874", "0.5364335", "0.536168", "0.5359574", "0.5358621", "0.5350033", "0.5349454", "0.5342486", "0.5339143", "0.5335454", "0.53327644", "0.53313994", "0.5330718", "0.531882", "0.5306755", "0.5305448", "0.5297142", "0.5277266", "0.52736396", "0.52677715", "0.5265983", "0.5265702", "0.5265702", "0.52651304", "0.5263904", "0.5258833", "0.5254443", "0.52498186", "0.5248962", "0.5246825", "0.5237719", "0.5235833", "0.52323854", "0.52275693", "0.52266896", "0.5222754", "0.52151525", "0.5211009", "0.5208432", "0.5208419" ]
0.0
-1
Allows the user to change his/her password. If the existing password matches, and both new password fields match, then the password is changed. A ?next=/... query parameter can be added, so after the password is changed, the user is redirected back to the original referring page.
def password_req(request): next = request.POST.get('next', request.META.get('HTTP_REFERER', DEFAULT_REDIRECT)) args = default_context(request, username=request.user.username, next=next) try: password = request.POST['password'] pw1 = request.POST['pw1'] pw2 = request.POST['pw2'] except KeyError: pass else: if pw1 != pw2: args['mismatch'] = True elif not request.user.check_password(password): args['error'] = True else: request.user.set_password(pw1) request.user.save() return HttpResponseRedirect(next) return render_to_response('registration/password.html', args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_password(self, old_password, new_password):\n data = dict(password = new_password)\n data['old-password'] = old_password\n return self.app.post('/_changePassword', data = data, follow_redirects = True)", "def change_password():\n form = PasswordResetForm()\n\n if form.validate_on_submit():\n # Update user\n current_user.password = crypto_manager.hash(form.password.data)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Password updated correctly'), 'success')\n\n return redirect(url_for('admin.profile_edit'))\n\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n\n flash(_('Error updating password, contact an administrator'), 'error')\n\n return render_template('admin/profile/change_password.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/change_password.html', form=form)", "def changepassword():\n try:\n if request.method == 'POST':\n # Makes sure the passwords match and that it meets complexity\n validate = check_pass(\n request.form['newpass'], request.form['connewpass'])\n if validate == \"Passed\":\n data = [request.form['newpass'], session[\n 'username'], request.form['oldpass']]\n with Database() as database:\n database.updateUserPassword(data)\n return redirect(url_for('profile', username=session['username']))\n else:\n flash(validate)\n return render_template('changepass.html')\n\n else:\n return render_template('changepass.html')\n\n except Exception as e:\n flash(\"Oops, something went wrong... Try again.\")\n return render_template('changepass.html')", "def change_password():\n\n from .forms import ChangeCredentialsForm\n\n username = current_user.get_id()\n form = ChangeCredentialsForm(request.form)\n\n if form.validate_on_submit():\n logger.info(username + \" wants to change something.\")\n if request.form['username'] != username:\n logger.info(\"User \" + username + \" wants to change the username.\")\n app.rename_user(username, request.form['username'],\n request.form['newPassword1'])\n else:\n logger.info(\"Changing password of user \" + username + \".\")\n app.add_user_and_password(request.form['username'],\n request.form['newPassword1'])\n\n logger.info(\"Successfully changed credentials of \"\n + username + '.')\n return redirect(url_for('home'))\n\n else:\n return render_template('change-credentials.html',\n form=form,\n username=username)", "def change_my_password():\n form = ChangePassword()\n if request.method == 'GET':\n return render_template('changemypassword.html', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n old_password = form.password.data\n new_password_hash = generate_password_hash(form.password1.data)\n account = db.check_item(\"username\", username)\n if account is not None:\n if check_password_hash(str(account['password_hash']), old_password):\n db.update_password_username(username, new_password_hash)\n flash('Your password has been changed')\n return redirect(url_for('login'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n return render_template('changemypassword.html', form=form)", "def change_password():\n\n if request.method == 'POST':\n current_password = request.form['current_password']\n new_password = request.form['new_password']\n\n # If current password is correct, update and store the new hash\n if current_user.check_password_hash(current_password):\n current_user.generate_password_hash(new_password)\n else:\n return 'Current password you entered is wrong! Please try again!'\n\n # Commit the changes we made in the object to the database\n success, reason = commit_transaction()\n if not success:\n return f'Error occurred while changing your password - {reason}!'\n\n log(f'<code>{current_user.name}</code> has updated their password!</code>')\n\n # Log the user out, and redirect to login page\n logout_user()\n return redirect(url_for('login'))\n return render_template('change_password.html')", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Updated password!')\n return redirect('profile')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'accounts/forms.html', {\n 'form': form\n })", "def change_password():\n\n if request.method == \"POST\":\n\n # Ensure current password is not empty\n if not request.form.get(\"current_password\"):\n return apology(\"must provide current password\", 400)\n\n # Query database for user_id\n rows = db.execute(\"SELECT hash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])\n\n # Ensure current password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"current_password\")):\n return apology(\"invalid password\", 400)\n\n # Ensure new password is not empty\n if not request.form.get(\"new_password\"):\n return apology(\"must provide new password\", 400)\n\n # Ensure new password confirmation is not empty\n elif not request.form.get(\"new_password_confirmation\"):\n return apology(\"must provide new password confirmation\", 400)\n\n # Ensure new password and confirmation match\n elif request.form.get(\"new_password\") != request.form.get(\"new_password_confirmation\"):\n return apology(\"new password and confirmation must match\", 400)\n\n # Update database\n hash = generate_password_hash(request.form.get(\"new_password\"))\n rows = db.execute(\"UPDATE users SET hash = :hash WHERE id = :user_id\", user_id=session[\"user_id\"], hash=hash)\n\n # Show flash\n flash(\"Password Changed!\")\n return redirect(\"/\")\n\n return render_template(\"change_password.html\")", "def changepassword():\n if request.method == \"POST\":\n\n # Ensure password was submitted\n if not request.form.get(\"newpassword\"):\n return apology(\"must provide password\", 400)\n # Ensure passwords match\n elif request.form.get(\"newpassword\") != request.form.get(\"confirmation\"):\n return apology(\"passwords do not match\", 400)\n elif request.form.get(\"newpassword\").isalpha() == True:\n return apology(\"password must contain at least one numeric symbol\")\n\n # encrypt new password\n hash = generate_password_hash(request.form.get(\"newpassword\"))\n print(hash)\n # update user's password in database\n result = db.execute(\"UPDATE users SET hash = :hash WHERE id = :id\", hash=hash, id = session[\"user_id\"])\n\n if not result:\n return apology(\"password not available\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"changepass.html\")", "def change_password(request):\n\n form = ChangePasswordForm(user=request.user)\n context = {\n 'form': form,\n 'submit_button_text': _('Update password'),\n 'back_button_text': _('Cancel'),\n 'show_back_button': True,\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = ChangePasswordForm(request.POST, user=request.user)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n user = request.user\n if not user.check_password(form.cleaned_data['old_password']):\n messages.error(request, _('Password was not changed! You typed your old password in incorrectly, please try again.'), extra_tags='alert alert-warning')\n else:\n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n update_session_auth_hash(request, request.user)\n # redirect to a new URL:\n messages.success(request, _('Your password was changed.'), extra_tags='alert alert-success')\n form = ChangePasswordForm(user=request.user)\n context.update({'form': form})\n return render(request, 'change_password_form.html', context)\n\n\n return render(request, 'change_password_form.html', context)", "def edit_password():\n form = EditPasswordForm()\n\n if request.method == 'POST' and form.validate():\n\n user = Users.query.filter_by(id=current_user.id).first()\n\n if not user.check_password(form.old_password.data):\n flash('Incorrect old password', 'warning')\n return redirect(url_for('auth.edit_password'))\n\n user.set_password(form.new_password.data)\n\n try:\n db.session.commit()\n flash('Your password has been changed.', 'success')\n except IntegrityError:\n db.session.rollback()\n flash('ERROR! Unable to change your password, please check your details are correct and try again.',\n 'warning')\n\n return redirect(url_for('auth.account'))\n\n return render_template('auth/edit_account/edit_password.html', form=form)", "def change_password(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\tform = AdminPasswordChangeForm(user=request.user, data=request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\tcontext_dict[\"message\"] = \"Password changed successfully\"\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=\"\",\n\t\t\t\tactivity_type=\"Changed password\"\n\t\t\t)\n\t\t\thistory.save()\n\t\telse:\n\t\t\tcontext_dict[\"message\"] = \"Password not changed\"\n\treturn render(request, \"changePassword.html\", context_dict)", "def change_user_password():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n user = get_user_by_id(user_id)\n if request.method == 'POST':\n old_password = request.form['old-password']\n new_password = request.form['new-password']\n confirm_password = request.form['confirm-password']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, user_id):\n is_password_updated = update_user_password(user_id, old_password, new_password, confirm_password)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if is_password_updated == \"OK\":\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=\"Password successfully updated!\", today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=is_password_updated, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def password():\n\n if request.method == 'POST':\n print 'Changing password'\n # query for user's hash of password\n pw_hash = datastore.get_user_by_user_id(engine, session['user_id'])['hash']\n\n # check all boxes filled, old password is correct, new and confirmation match\n if not request.form.get('old') or not check_password_hash(pw_hash, request.form.get('old')):\n flash('Incorrect old password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') or not request.form.get('confirmation'):\n flash('Must confirm new password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') == request.form.get('confirmation'):\n flash('New passwords don\\'t match!', 'danger')\n return render_template('password.html')\n\n # update hash in database\n datastore.update_password_hash(engine, session['user_id'], generate_password_hash(request.form.get('new')))\n\n # redirect to portfolio\n flash('Password changed!', 'info')\n print 'Password changed!'\n return redirect(url_for('index'))\n\n else:\n print 'Loading change password page'\n return render_template('password.html')", "def password_change_done(request):\n messages.success(\n request, _('Your password has been successfully changed.')\n )\n return redirect('common:current_user_details')", "def changePassword():\n\n if request.method == \"GET\":\n\n #Query for the current user that is logged in.\n user = db.execute(\"SELECT username from users WHERE id = :id\", id=session['user_id'])\n\n\n return render_template(\"changePassword.html\", user=user)\n\n if request.method == \"POST\":\n\n #Query for the current user that is logged in and get the hash.\n new_pass = db.execute(\"SELECT username, hash from users WHERE id = :id\", id=session['user_id'])\n\n old_password = request.form.get(\"old_password\")\n password = request.form.get(\"password\")\n confirmation = request.form.get(\"confirmation\")\n\n #Check if the user entered an input\n if not password:\n return apology(\"Please enter a password\", 400)\n if not confirmation:\n return apology(\"Please enter a password confirmation\", 400)\n\n #Check if the password and the confirmation password is the same.\n if password==confirmation:\n hashpw = generate_password_hash(password)\n\n else:\n return apology(\"Password doesn't match\", 400)\n\n #Check if the entered old password is correct.\n if check_password_hash(new_pass[0]['hash'], old_password)==True:\n db.execute(\"UPDATE users SET hash = :hashpw WHERE id = :id\", hashpw=hashpw, id=session['user_id'])\n flash('You successfully changed your password!')\n else:\n return apology (\"Hash doesn't match\", 400)\n\n return redirect (\"/\")", "def post(self):\n DA = DataAccessor()\n session = getSessionByRequest(self)\n user = getSessionUser(session)\n \n old = self.request.get('old')\n new = self.request.get('new')\n new2 = self.request.get('new2')\n\n if old != user.password:\n setSessionMessage(session, \"Invalid Password\")\n self.redirect('/admin')\n\n if (new != new2) :\n setSessionMessage(session, \"Your new passwords did not match. Please try again.\", True)\n else:\n setSessionMessage(session, \"You have successfully changed your password.\", False)\n \n #Reset the password\n DA.update(user, password=new)\n\n #Reset the session.\n session.generated = False\n session.put()\n self.redirect('/admin')", "def ChangePassword():\n if self.ChangePassword():\n # Update successful, return to main screen\n self.confirm_pass.set('')\n self.password.set('')\n Return()\n else:\n return", "def change_password(user, old_pw, new_pw, confirm_pw, lang):\n LOG.debug(\"Entering change_password\")\n _t = Translator(lang)\n\n success = False\n\n # is the old password given?\n if not old_pw:\n LOG.debug(\"Old pwd is empty\")\n message = _t.get(Keywords.oldPwdEmpty) # 'The old password field is empty.'\n # is the new password given?\n elif not new_pw:\n LOG.debug(\"New pwd is empty\")\n message = _t.get(Keywords.newPwdEmtpy) # 'The new password field is empty.'\n # is the confirmation password given?\n elif not confirm_pw:\n LOG.debug(\"Confirm pwd is empty\")\n message = _t.get(Keywords.confPwdEmpty) # 'The password confirmation field is empty.'\n # is new password equals the confirmation?\n elif not new_pw == confirm_pw:\n LOG.debug(\"New pwds not equal\")\n message = _t.get(Keywords.newPwdNotEqual) # 'The new passwords are not equal'\n # is new old password equals the new one?\n elif old_pw == new_pw:\n LOG.debug(\"Pwds are the same\")\n message = _t.get(Keywords.pwdsSame) # 'The new and old password are the same'\n else:\n # is the old password valid?\n if not user.validate_password(old_pw):\n LOG.debug(\"Old password is wrong\")\n message = _t.get(Keywords.oldPwdWrong) # 'Your old password is wrong.'\n else:\n user.change_password(new_pw)\n\n LOG.debug(\"Password was changed\")\n message = _t.get(Keywords.pwdChanged) # 'Your password was changed'\n success = True\n\n return message, success", "def password_change(request):\n status = 200\n pform = ChangePasswordForm(request.user, request.POST)\n\n if pform.is_valid():\n status = pform.save(request)\n if status == 200:\n messages.success(request, _('Your password was successfully changed'))\n return redirect('profile')\n\n return render(request, 'gui/profile/profile_password_form.html', {\n 'user': request.user,\n 'pform': pform,\n }, status=status)", "def change_password(request):\n if not request.user.is_authenticated:\n return JsonResponse({}, status=401)\n\n try:\n body = json.loads(request.body)\n except (TypeError, json.decoder.JSONDecodeError):\n return JsonResponse({'error': 'Cannot parse request body'}, status=400)\n\n old_password = body.get('oldPassword')\n new_password = body.get('newPassword')\n\n if not old_password or not new_password:\n return JsonResponse({'error': 'Missing payload'}, status=400)\n\n if not request.user.check_password(old_password):\n return JsonResponse({'error': 'Incorrect old password'}, status=400)\n\n try:\n validate_password(new_password, user)\n except ValidationError as err:\n return JsonResponse({'error': err.messages[0]}, status=400)\n\n request.user.set_password(new_password)\n request.user.save()\n update_session_auth_hash(request, request.user)\n\n return JsonResponse({})", "def update_password(): \n \n form = PasswordForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n \n hashed_pw = bcrypt.hashpw(form.new_password.data.encode('utf-8'), bcrypt.gensalt())\n user = mongo.db.user.find_one({'username': session['username']})\n \n if bcrypt.checkpw(request.form['password'].encode('utf-8'), user['hashed_password']):\n mongo.db.user.find_one_and_update({'username': session['username']}, {'$set':{'hashed_password':hashed_pw}})\n \n flash(f'Password reset was successful, please login again.','success')\n return redirect(url_for('login'))\n \n return render_template('pages/settings.html', \n title='Password', \n form=form\n )", "def updatepassword():\n if request.method == \"POST\":\n\n password = request.form.get(\"password\")\n password2 = request.form.get(\"confirmation\")\n\n if not password:\n return apology(\"must provide password\", 400)\n\n elif not (password == password2):\n return apology(\"passwords must match\", 400)\n\n elif not password2:\n return apology(\"must confirm password\", 400)\n\n rows = db.execute(\n \"SELECT password FROM users WHERE id = ?\", (session_get_int(\"user_id\"), )).fetchall()\n\n if (check_password_hash(rows[0][\"password\"], password)):\n return apology(\"password cannot be the same as existing password\", 400)\n\n else:\n db.execute(\"UPDATE users SET password = ? WHERE id = ?\",\n (generate_password_hash(password), session_get_int(\"user_id\")))\n con.commit()\n\n return redirect(\"/profile\")\n else:\n return redirect(\"/profile\")", "def change_password(change_account):\n change_data(change_account, changed_data='password')", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def password_change_view(request):\n extra_context = {'title': _('Current user password change')}\n\n if request.user.user_options.block_password_change:\n messages.error(\n request, _(\n 'Changing the password is not allowed for this account.'\n )\n )\n return HttpResponseRedirect(reverse(settings.HOME_VIEW))\n\n return password_change(\n request, extra_context=extra_context,\n template_name='appearance/generic_form.html',\n post_change_redirect=reverse('authentication:password_change_done'),\n )", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('view-profile', args=[request.user.id]))\n else:\n print \"form not valid\"\n else:\n form = PasswordChangeForm(user=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('change-password'),\n 'title' : \"Change Password\"\n })", "def change_password(self, user, current_password, password):\n\n if not password:\n raise DoorstepError('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise DoorstepError('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def reset_password(self, old_password, new_password):\n verb = \"POST\"\n url = urljoiner(self.baseurl, [self.path, \"$me\", \"password\"])\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = {\"oldPassword\": old_password,\n \"newPassword\": new_password}\n\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.post(url, data=data, headers=headers)\n self.handle_error_message(r)\n print(\"password successfully reset!\")\n self.auth_data['password'] = new_password\n try:\n self.login()\n except Exception as e:\n pass", "def change_password_user():\n\n form = ChangePasswordForm(request.form)\n\n if form.validate_on_submit():\n\n if not request.form['old_password'] or request.form['old_password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n\n hashed_password = user_manager.hash_password(request.form['password'])\n\n # Modificamos el password del usuario\n current_user.password = hashed_password\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error modifying password of user, make sure username and email are unique','error')\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n else:\n flash('Congratulations, update your password!','success')\n return redirect(url_for('user_ksat.show_user'))\n\n\n return render_template('user/change_password_user.html', title='Change Password', form=form)", "def change_password(self, request, **kwargs):\n self.method_check(request, allowed=['post'])\n self.throttle_check(request)\n\n data = json.loads(request.body)\n\n username = None\n old_password = None\n new_password = None\n\n if \"username\" in data:\n username = data[\"username\"]\n print username\n else:\n if \"email\" in data:\n username = data[\"email\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if \"old_password\" in data:\n old_password = data[\"old_password\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if \"new_password\" in data:\n new_password = data[\"new_password\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if (old_password is not None and new_password is not None and\n username is not None):\n member = authenticate(username=username, password=old_password)\n\n if member is not None:\n member.set_password(new_password)\n member.save()\n return self.create_response(request, {})", "def change_password():\n if request.method == \"POST\":\n error = None\n if \"username\" in session:\n username = session[\"username\"]\n old_password = request.form[\"old_password\"]\n new_password1 = request.form[\"new_password1\"]\n new_password2 = request.form[\"new_password2\"]\n\n error = change_password_tests(new_password1, new_password2)\n\n if not is_valid_login(username, old_password):\n error = \"Incorrect old password\"\n\n if error:\n flash(error)\n else:\n with open(PASSFILE, \"r\") as passfile, open(TEMPFILE, \"a\") as tempfile:\n for record in passfile:\n try:\n r_username, r_salt_hash = record.split()\n\n # same_username & same_password exist to\n # avoid the linter's 'Line too long' flag\n same_username = username == r_username\n same_password = sha256_crypt.verify(old_password, r_salt_hash)\n\n if same_username and same_password:\n t_salt_hash = sha256_crypt.hash(new_password1)\n tempfile.write(username + \" \" + t_salt_hash + \"\\n\")\n else:\n tempfile.write(r_username + \" \" + r_salt_hash + \"\\n\")\n except ValueError:\n pass\n\n # remove the password backup file that *may* have been previously created\n # fail silently if the file does not exist\n try:\n os.remove(PASSFILE + \".bak\")\n except OSError:\n pass\n\n # this keeps a backup of the previous passfile\n os.rename(PASSFILE, PASSFILE + \".bak\")\n os.rename(TEMPFILE, PASSFILE)\n flash(\"Password changed\")\n return render_template(\"index.html\")\n else:\n flash(\"Must be logged in to change password.\")\n return redirect(url_for(\"login\"))\n\n return render_template(\"changepassword.html\")", "def change_password(username, current_password, new_password):\n\n if current_password == \"\": # nosec (not a hardcoded password)\n current_password = getpass.getpass()\n\n is_password_ok = authenticate_user(username, current_password)\n if not is_password_ok:\n return False\n\n if new_password == \"\": # nosec (not a hardcoded password)\n new_password = getpass.getpass()\n\n global db\n if db is None:\n init_db()\n user_model = Query()\n user = db.search(user_model.username == username)[0]\n\n salt = user['salt']\n password = hash_password(new_password, salt)\n api_key = gen_api_key(username)\n\n user_id = db.update({'password': password, 'api_key': api_key}, doc_ids=[user.doc_id])\n\n return {\n 'result': 'success',\n 'eid': user_id,\n 'user_created': user,\n 'api_key': api_key\n }", "async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}", "def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")", "def reset_password(request):\r\n params = request.params\r\n\r\n # now also load the password info\r\n current = params.get('current_password', None)\r\n new = params.get('new_password', None)\r\n\r\n # if we don't have any password info, try a json_body in case it's a json\r\n # POST\r\n if current is None and new is None:\r\n params = request.json_body\r\n current = params.get('current_password', None)\r\n new = params.get('new_password', None)\r\n\r\n user_acct = request.user\r\n\r\n if not UserMgr.acceptable_password(new):\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'error': \"Come on, let's try a real password this time\"\r\n })\r\n\r\n # before we change the password, let's verify it\r\n if user_acct.validate_password(current):\r\n # we're good to change it\r\n user_acct.password = new\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'message': \"Password changed\",\r\n })\r\n else:\r\n request.response.status_int = 403\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'error': \"There was a typo somewhere. Please check your request\"\r\n })", "def change_password(self, password, newpassword):\n cred = {\"newpasswd\": newpassword, \"passwd\": password}\n return self.put(\"passwd\", cred)", "def ChangePassword(self):\n \n username = self.username.get().lstrip().rstrip()\n if not username:\n messagebox.showerror('Error', 'No username entered.')\n return False\n \n if not self.PasswordMatch():\n messagebox.showerror('Error', 'Password fields do not match.')\n return False\n password = self.password.get().lstrip().rstrip()\n \n for user in self.user_db:\n if user['User'] == username:\n if user['Password'] == password:\n messagebox.showerror('Error',\n 'New password unchanged from the ' \\\n 'old password.')\n return False\n user['Password'] = password\n messagebox.showinfo('Success!', 'Password updated!')\n return True\n \n messagebox.showerror('Error', f'{username} not found in database.')\n return False", "def change_password(self, user, current_password, password):\n\n if not password:\n raise Exception('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise Exception('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def profile():\n\n # User entered new password\n if request.method == \"POST\":\n\n # Ensure current password entered\n if not request.form.get(\"old\"):\n return apology(\"Please enter current password\")\n\n # Query database for current password\n rows = db.execute(\"SELECT * FROM users WHERE id = ?\", session[\"user_id\"])\n\n # Ensure old password matches current password\n if not check_password_hash(rows[0][\"hash\"], request.form.get(\"old\")):\n return apology(\"Invalid password\")\n\n # Ensure user entered a new password\n if not request.form.get(\"new\"):\n return apology(\"Please enter a new password\")\n\n # Ensure old and new passwords are different\n if request.form.get(\"new\") == request.form.get(\"old\"):\n return apology(\"Must enter a new password\")\n\n # Update new password in database\n db.execute(\"UPDATE users SET hash = ? WHERE id = ?\", generate_password_hash(\n request.form.get(\"new\"), method='pbkdf2:sha256', salt_length=8), session[\"user_id\"])\n\n # Redirect to homepage\n return redirect(\"/\")\n\n else:\n\n # User reached page via a link\n return render_template(\"profile.html\")", "def pass_change(request):\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n return home(request, \"Password Changed Successfully\")\n \n else:\n form = PasswordChangeForm(instance=request.user)\n \n ctx = _make_context(request, \"pass_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)", "def change_password(password: str, old_password=None) -> None:\n logging.debug(\"called\")\n if not pwd_gate:\n raise SecretSharingError(\"No password gate given.\")\n if pwd_gate[\"pwd\"]:\n if not bcrypt.checkpw(old_password.encode(ENCODING), pwd_gate.get(\"pwd\").encode(ENCODING)):\n raise PasswordError(\"Old password doesn't match.\", old_password)\n else:\n if not pw_is_viable(password):\n raise PasswordError(\"Password not complex enough.\", password)\n pwd_gate[\"pwd\"] = bcrypt.hashpw(password.encode(ENCODING), bcrypt.gensalt()).decode(ENCODING)\n else:\n if not pw_is_viable(password):\n raise PasswordError(\"Password not complex enough.\", password)\n pwd_gate[\"pwd\"] = bcrypt.hashpw(password.encode(ENCODING), bcrypt.gensalt()).decode(ENCODING)", "def test_user_not_logged_in_redirects_from_change_password(self):\n get_response = self.client.get(self.change_password_url)\n post_response = self.client.post(self.change_password_url, {})\n self.assertRedirects(get_response, self.login_url)\n self.assertRedirects(post_response, self.login_url)", "def changePassword(self, oldPassword, newPassword):\n if self.checkPassword(oldPassword):\n pswFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"adminPass.psw\")\n tempFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"adminPassTmp.psw\")\n newHash = (hashlib.sha1(str(newPassword).encode('utf-8')).hexdigest())\n\n f = open(tempFile, \"w+\")\n f.write(newHash)\n f.close()\n shutil.copyfile(tempFile, pswFile)\n os.remove(tempFile)\n return True\n else:\n return False", "def change_user_password(self, user, new_pass):\n return self.update(user, password=new_pass)", "def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return", "def password():\n\n # User reached route via POST\n if request.method == 'POST':\n\n # Ensure passwords that were submitted\n if not request.form.get('password'):\n return apology('must provide password', 400)\n\n elif not request.form.get('new_password') or not request.form.get('confirmation'):\n return apology('must provide a new password', 400)\n\n elif request.form.get('new_password') != request.form.get('confirmation'):\n return apology(\"passwords doesn't match\")\n\n user = db.execute('SELECT * FROM users WHERE id = :id', id=session['user_id'])\n\n # Ensure username exists and password is correct\n if len(user) != 1 or not check_password_hash(user[0]['hash'], request.form.get('password')):\n return apology('invalid username and/or password', 400)\n\n db.execute('UPDATE users SET hash = :hash WHERE id = :id',\n hash=generate_password_hash(request.form.get('new_password')),\n id=session['user_id']\n )\n\n return redirect('/logout')\n else:\n return render_template('password.html')", "def test_password_change(self):\n newpassword = 'neo.h1m1tsu!'\n pw_change_url = reverse('auth_password_change')\n\n # Step 1 - access the password change URL\n self.client.login(username=self.username, password=self.password)\n response = self.client.get(pw_change_url)\n # WARNING: uses Django's admin template\n self.assertTemplateUsed(\n response, 'registration/password_change_form.html')\n\n # Step 2 - POST existing and new password to change password\n data = {\n 'old_password': self.password,\n 'new_password1': newpassword,\n 'new_password2': newpassword,\n }\n response = self.client.post(pw_change_url, data=data, follow=True)\n self.assertRedirects(response, reverse('auth_password_change_done'))\n self.assertEqual(response.status_code, 200)\n # WARNING: uses Django's admin template\n self.assertTemplateUsed(\n response, 'registration/password_change_done.html')\n\n # Check that new password properly set\n self.client.logout()\n self.assertTrue(\n self.client.login(username=self.username, password=newpassword))", "def change_user_password(self, instance, user, new_pass):\n return instance.change_user_password(user, new_pass)", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def password_change(self, request):\n view_func, args, kwargs = resolve(self.change_password_path)\n\n assert issubclass(kwargs['password_change_form'],\n StrictPasswordChangeForm), (\n \"Use django_auth_policy StrictPasswordChangeForm for password \"\n \"changes.\")\n\n # Provide extra context to be used in the password_change template\n is_exp = request.session.get('password_is_expired', False)\n is_tmp = request.session.get('password_is_temporary', False)\n if not 'extra_context' in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['is_enforced'] = True\n kwargs['extra_context']['is_temporary'] = is_tmp\n kwargs['extra_context']['is_expired'] = is_exp\n return view_func(request, *args, **kwargs)", "def change_password(self, user, old_password, new_password):\n\n if not user.check_password(old_password):\n raise InvalidPassword('The provided old password is incorrect.')\n\n user.set_password(new_password)\n user.save()\n\n return user", "def change_pwd(self):\r\n if self.field_pwd.text() == \"\":\r\n self.label_chg_pwd.setText(\"Password cannot be empty\")\r\n return None\r\n self.encryptor.set_key_from_password(self.field_pwd.text())\r\n self.label_chg_pwd.setText(\"Password typed\")\r\n self.label_chg_pwd.setStyleSheet(\"color:#01ac2d\")\r\n self.label_chg_key.clear()\r\n self.field_key.clear()\r\n QtWidgets.QMessageBox.information(self, \"Password Change\", \r\n (\"Your password has been successfully changed.\\n\\n\"\r\n \"You can now encrypt / decrypt files.\"))", "def old_password_check(form, field):\n old_password = field.data\n password = current_user.password\n r = pwd_context.verify(old_password, current_user.password)\n if not r:\n raise validators.ValidationError('old password is wrong')", "def put_password():\n # pylint: disable=too-many-branches\n\n # get user\n user = g.user\n\n # prep regex\n re_password = re.compile(AdministratorAdminSchema.re_password)\n\n # validate data\n errors = {}\n if ('previous_password' not in request.json or\n not request.json['previous_password']):\n if 'previous_password' not in errors:\n errors['previous_password'] = []\n errors['previous_password'].append(\"Missing data for required field.\")\n elif ('previous_password' in request.json and\n not user.check_password(request.json['previous_password'])):\n if 'previous_password' not in errors:\n errors['previous_password'] = []\n errors['previous_password'].append(\"Incorrect password.\")\n\n if 'password1' not in request.json or not request.json['password1']:\n if 'password1' not in errors:\n errors['password1'] = []\n errors['password1'].append(\"Missing data for required field.\")\n if ('password1' in request.json and\n not re_password.match(request.json['password1'])):\n if 'password1' not in errors:\n errors['password1'] = []\n errors['password1'].append(\"Please choose a more complex password.\")\n\n if 'password2' not in request.json or not request.json['password2']:\n if 'password2' not in errors:\n errors['password2'] = []\n errors['password2'].append(\"Missing data for required field.\")\n if 'password1' in request.json and 'password2' in request.json:\n if request.json['password1'] != request.json['password2']:\n if 'password2' not in errors:\n errors['password2'] = []\n errors['password2'].append(\"New passwords must match.\")\n\n if errors:\n return jsonify({\"error\": errors}), 400\n\n # check previous passwords\n if user.roles[0].password_policy and user.roles[0].password_reuse_history:\n prev_passwords = AdministratorPasswordHistory.query.\\\n filter(AdministratorPasswordHistory.administrator_id == user.id).\\\n order_by(AdministratorPasswordHistory.set_date.desc()).\\\n limit(user.roles[0].password_reuse_history)\n for record in prev_passwords:\n print(\"TEST \", record.password)\n if bcrypt.checkpw(request.json.get('password1').encode('utf-8'),\n record.password.encode('utf-8')):\n errors['password1'] = [\"This password has recently been used.\"]\n break\n\n if errors:\n return jsonify({\"error\": errors}), 400\n\n # save user and password history\n user.password = request.json.get('password1')\n pass_history = AdministratorPasswordHistory(administrator=user,\n password=user.password,\n set_date=datetime.now())\n db.session.add(pass_history)\n db.session.commit()\n\n # response\n return jsonify({'success': 'true'}), 200", "def change_Password(): \r\n try:\r\n\r\n UserName=request.args.get(\"UserName\")\r\n validate_otp=request.args.get(\"OTP\") \r\n NewPassword=request.args.get(\"NewPassword\")\r\n hashed_Password = hashlib.md5(NewPassword.encode()).hexdigest() \r\n user_details=otp_access(UserName)\r\n otp=user_details[0]['otp']\r\n with open('api.key', 'r') as apikey:\r\n key=apikey.read().replace('\\n', '')\r\n if request.headers.get('API_KEY') == key:\r\n if str(otp)==str(validate_otp):\r\n msg=update_Password(UserName,hashed_Password)\r\n #This function calling makes the user use OTP until Password gets changed after that validity of OTP will be expired.\r\n new_otp=randint(10000,100000)\r\n # This will checks the new generated OTP and old OTP\r\n if str(otp)==str(new_otp):\r\n new_otp=randint(10000,100000)\r\n update_otp(UserName,new_otp)\r\n else:\r\n update_otp(UserName,new_otp)\r\n else:\r\n msg=\"Something went wrong check the OTP or UserName!!!!\"\r\n else:\r\n msg=\"Enter correct API KEY for Authentication.\"\r\n except IndexError:\r\n msg=f\"{UserName} does not exist , kindly enter correct UserName.\"\r\n return msg", "def test_user_logged_in_post_changes_password(self):\n form_data = {\n \"old_password\": self.password,\n \"new_password\": \"newtestpassword12\",\n \"new_password2\": \"newtestpassword12\"\n }\n login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(login)\n post_response = self.client.post(self.change_password_url, form_data)\n self.assertEqual(post_response.status_code, 302)\n self.assertRedirects(post_response, reverse('account:overview'), target_status_code=302)\n user = User.objects.get(pk=self.user.id)\n self.assertTrue(user.check_password('newtestpassword12'))\n logout = self.client.logout()\n login = self.client.login(username=self.username, password=\"newtestpassword12\")\n self.assertTrue(login)", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def change_password(token, old_password, new_password):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n response = get_user_data_by_email(token, session['data']['user'], include_hash=True)\n if 'data' not in response:\n return response\n\n userdata = response['data']\n if check_password_hash(userdata['password_hash'], old_password):\n query_db('UPDATE Users SET password_hash = ? WHERE email = ?',\n [generate_password_hash(new_password), userdata['email']])\n return {'success': True, 'message': 'Password changed.', 'code': 200}\n else:\n return {'success': False, 'message': 'Wrong password.', 'code': 400}", "def user_change_password(request, pk):\n try:\n user = validations_utils.user_validation(pk) # Validates if user exists or not.\n validations_utils.user_token_validation(request.auth.user_id, pk) # Validates user's Token authentication.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n if request.method == 'PUT':\n try:\n request.data['current_password']\n except KeyError:\n return Response(messages.REQUIRED_CURRENT_PASSWORD,\n status=status.HTTP_400_BAD_REQUEST)\n try:\n new_password = request.data['new_password']\n if new_password is None or not re.match(r'[A-Za-z0-9@#$%^&+=]+', new_password):\n return Response(messages.PASSWORD_NECESSITY, status=status.HTTP_406_NOT_ACCEPTABLE)\n else:\n pass\n except KeyError:\n return Response(messages.REQUIRED_NEW_PASSWORD, status=status.HTTP_400_BAD_REQUEST)\n data_keys = request.data.keys()\n # Change Password will only require current_password and new_password.\n if 'current_password' in data_keys and 'new_password' in data_keys:\n current_password = request.data['current_password']\n new_password = request.data['new_password']\n try:\n password = utils.change_password(current_password, new_password, user) # Changes password.\n return Response(password, status=status.HTTP_200_OK)\n except ValidationException as e:\n return Response(e.errors, status=e.status)", "def set_new_password(self, new_password):\n self.password = new_password", "def changePassword(self, loginName, password, newPassword):\n return self.talk(\n 'purchase',\n data=self.__makeLoginDict(loginName, password,\n {'newPassword': newPassword}))", "def force_password_login(request):\n params = request.GET.copy()\n params[settings.FORCE_OLD_LOGIN_EXPERIENCE_PARAM] = \"1\"\n return redirect(reverse(\"login\") + f\"?{urlencode(params)}\")", "def _change_password(self, user, password):\r\n user.set_password(password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def test_change_password(self):\n self.test_login_user()\n url = reverse('change_password')\n data = {'oldPassword': \"ctest12345\", 'newPassword': 'test12345'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_41_password_change(self):\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': password,\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n assert \"Yay, you changed your password succesfully!\" in res.data, res.data\r\n\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': \"wrongpassword\",\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Your current password doesn't match the one in our records\"\r\n assert msg in res.data\r\n\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': '',\r\n 'new_password':'',\r\n 'confirm': '',\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Please correct the errors\"\r\n assert msg in res.data", "def change_password(self, username=None, user_data=None):\n if not username:\n raise ValueError(\"Please provide a username.\")\n\n if not user_data:\n raise ValueError(\"Please provide correct user information.\")\n\n user_data = self._to_string(data=user_data)\n uri = 'json/users/' + username + '?_action=changePassword'\n data = self._post(uri=uri, data=user_data, headers=self.headers)\n if data.status_code == 200:\n return True\n else:\n return False", "def update_password(self, new_password, callback=None):\n op = self._PASSWORD_CHANGE_OPERATION(self, new_password)\n if callback is not None:\n op.done_sig.connect(callback)\n op.go()\n return op", "def passwordCode(code):\n #Check if code exists and for the correct purpose. Else abort\n if (hl.checkCode(code,\"Password\")):\n user = hl.getUserFromCode(code)\n else:\n abort(404)\n\n if request.method == 'POST':\n #Get new password and handle\n passwordform(user)\n #Mark code as used\n hl.flagCode(code)\n #return\n return redirect(url_for('confirm', confirmed = 'Changed Password'))\n\n return render_template('password.html')", "def test_login_after_password_change(self):\n old_password = self.user['password1']\n self.change_password()\n response = self.client.post(\n reverse('users:login'), {'username': self.user['username'], 'password': old_password}\n )\n self.assertEqual(response.status_code, 200)", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def start_change_password_process_step_2(self, expected_url_1):\n # click on the button \"Changer de mot de passe\"\n change_password_button = self.driver.find_element_by_id(\n \"change_password_button\"\n )\n change_password_button.click()\n # wait for page loading\n WebDriverWait(\n self.driver,\n timeout=10\n ).until(EC.url_changes(expected_url_1))\n # check the new url\n expected_url_2 = expected_url_1 + \"change_password/?\"\n return expected_url_2", "def update_password_request(params, uid=None, check_current=False):\n user = get_user(uid=uid, include_pw_hash=True)\n\n if check_current and not api.user.confirm_password(\n params[\"current-password\"], user[\"password_hash\"]\n ):\n raise PicoException(\"Your current password is incorrect.\", 422)\n\n if params[\"new-password\"] != params[\"new-password-confirmation\"]:\n raise PicoException(\"Your passwords do not match.\", 422)\n\n db = api.db.get_conn()\n db.users.update(\n {\"uid\": user[\"uid\"]},\n {\"$set\": {\"password_hash\": api.common.hash_password(params[\"new-password\"])}},\n )", "def password_reset(request):\n host = settings.TACC_USER_PORTAL_HOST\n return redirect(f\"{host}/password-reset?{urlencode(request.GET)}\")", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def update_password(self, user, old_password, new_password):\n try:\n cur = self.conn.cursor()\n author_dict = self.get_author_by_name(user)\n author_id = author_dict['author_id']\n condition = self.password_check(user, old_password)\n\n if condition is not False:\n hashed_password = hash_string(new_password)\n query = ('UPDATE password '\n 'SET password = ? '\n 'WHERE password.author_id = ? ')\n cur.execute(query, (hashed_password, author_id))\n self.conn.commit()\n return True\n else:\n return condition\n\n except TypeError:\n return False", "def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))", "def passwordform(name = None):\n if request.method == 'POST':\n if name == None:\n name = session['name']\n\n password = request.form['pass1']\n confirmPassword = request.form['passconfirm']\n if password == confirmPassword:\n hl.changePassword(name,confirmPassword)", "def change(ctx, password, clear, new_password, remember):\n if clear and new_password:\n ctx.fail(\"--clear cannot be combined with --new-password.\")\n\n _init_session(ctx, password, False, prompt=\"Enter the current password\")\n\n session = ctx.obj[\"session\"]\n keys = ctx.obj[\"oath_keys\"]\n device_id = session.device_id\n\n if clear:\n session.unset_key()\n if device_id in keys:\n del keys[device_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Password cleared from YubiKey.\")\n else:\n if remember:\n try:\n keys.ensure_unlocked()\n except ValueError:\n raise CliFail(\n \"Failed to remember password, the keyring is locked or unavailable.\"\n )\n if not new_password:\n new_password = click_prompt(\n \"Enter the new password\", hide_input=True, confirmation_prompt=True\n )\n key = session.derive_key(new_password)\n if remember:\n keys.put_secret(device_id, key.hex())\n keys.write()\n click.echo(\"Password remembered.\")\n elif device_id in keys:\n del keys[device_id]\n keys.write()\n session.set_key(key)\n click.echo(\"Password updated.\")", "def test_old_password_login_check(self):\n old_password = self.user['password1']\n self.change_password()\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': old_password})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def update_password(self, new_password=None):\n\n self.password = generate_password_hash(new_password)\n\n if self.save(verbose=False):\n self.logger.warn('Updated password! %s' % self)\n else:\n raise AttributeError('Password update failed!')", "def test_account_password_change(self):\r\n params = {\r\n 'current_password': 'admin',\r\n 'new_password': 'not_testing'\r\n }\r\n\r\n res = self.testapp.post(\r\n \"/api/v1/admin/password?api_key=\" + str(API_KEY),\r\n params=params,\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n self.assertTrue(\r\n 'message' in user,\r\n \"Should have a message key in there: {0}\".format(user))\r\n\r\n params = {\r\n 'current_password': 'not_testing',\r\n 'new_password': 'admin'\r\n }\r\n res = self.testapp.post(\r\n \"/api/v1/admin/password?api_key=\" + str(API_KEY),\r\n params=params,\r\n status=200)\r\n\r\n self._check_cors_headers(res)", "def change_password(self, state_token, old_password, new_password, relay_state=None):\n request = {\n 'stateToken': state_token,\n 'oldPassword': old_password,\n 'newPassword': new_password,\n 'relayState': relay_state\n }\n\n response = ApiClient.post_path(self, '/credentials/change_password', request)\n return Utils.deserialize(response.text, AuthResult)", "def post(self):\n # userId is retrieved from jwt identity\n userId = get_jwt_identity()\n data = ChangePasswordInputSchema().load(request.json)\n UserLoginService.change_password(userId,\n existing_password=data[\"existingPassword\"],\n new_password=data[\"newPassword\"])\n return {}, 200", "def set_password(self, request, pk=None):\n user = User.objects.get(id=pk)\n serializer = PasswordSerializer(data=request.data)\n\n if serializer.is_valid():\n if not user.check_password(serializer.data.get('old_password')):\n return Response({'old_password': ['Wrong password.']},\n status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(serializer.data.get('new_password'))\n user.save()\n return Response({'status': 'password set'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def change_password(self, user):\n if not self.is_valid():\n return None\n password = self.clean_password2()\n user.set_password(password)\n user.save()\n return user", "def change_user_pwd(uid, pwd, new_pwd):\r\n session = tables.get_session()\r\n if session is None:\r\n return {'oldMatch': False}\r\n response = {}\r\n try:\r\n user_account = UserAccount()\r\n password = user_account.get_field_by_key(UserAccount.password, UserAccount.user_id, uid,\r\n session)\r\n if password is None or password != pwd:\r\n return {'oldMatch': False}\r\n response['oldMatch'] = True\r\n response['newMatch'] = user_account.update_password(uid, new_pwd, session)\r\n session.commit()\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Check user password failed: %s', err)\r\n return response\r\n finally:\r\n session.close()\r\n return response", "def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))", "def change_password(self, cr, uid, ids, old_passwd, new_passwd, context=None):\n res = {'flag':False,\n 'info':''}\n records = self.browse(cr, uid, ids, context=context)\n if records and len(records) == 1:\n mem_obj = records[0]\n if not mem_obj:\n res['info'] = u'获取会员信息失败!'\n if not mem_obj.m_normal:\n res['info'] = u'会员状态不可用!'\n else:\n if self.check(cr, uid, ids,old_passwd):\n self.set_password(cr, uid, ids,new_passwd)\n res['flag'] = True\n else:\n res['info'] = u'原密码输入错误!'\n \n return res", "def update_password(self, username, old_password, new_password):\n\n return self.user_manager.update_password(username, old_password, new_password)", "def doChangeUser(self, login, password, **kwargs):\n IUserChanger(self.context).setPassword(password)", "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)", "def put(self, request):\n try:\n user = request.user\n if user.check_password(request.data[\"current_password\"]):\n user.set_password(request.data[\"password\"])\n user.save()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response(\"Current password is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except KeyError:\n return Response(\"The data format is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def edit_user_view(request):\n form = EditUserForm(request.POST or None, instance=request.user)\n\n if form.is_valid():\n\n new_user_data = form.save()\n if form.cleaned_data['new_password_1']:\n new_user_data.set_password(form.cleaned_data['new_password_1'])\n request.session['new_password'] = True\n\n new_user_data.save()\n # enable change of user password without logout\n update_session_auth_hash(request, request.user)\n request.session['edit_succes'] = True\n return HttpResponseRedirect('/user_account/user_edit/')\n\n edit_state = request.session['edit_succes'] \\\n if 'edit_succes' in request.session else None\n\n new_password = request.session['new_password'] \\\n if 'new_password' in request.session else None\n\n request.session.update({'edit_succes': None, 'new_password': None})\n\n return render(\n request,\n \"user_account/user_edit.html\",\n {'form': form, 'edit_succes': edit_state, 'new_password': new_password}\n )", "def start_change_password_process_step_1(self):\n # start from the home page\n start_url = self.home_url\n self.driver.get(start_url)\n # click on the link \"Mon espace\"\n WebDriverWait(\n self.driver,\n timeout=10\n ).until(EC.element_to_be_clickable((\n By.ID,\n \"private_space_link\"\n ))).click()\n # wait for page loading\n WebDriverWait(\n self.driver,\n timeout=10\n ).until(EC.url_changes(start_url))\n # check the new url\n expected_url_1 = start_url + \"profile/\"\n return expected_url_1", "def put(self, request):\n try:\n user = request.user\n if user.check_password(request.data[\"current_password\"]):\n user.set_password(request.data[\"password\"])\n user.save()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response(\"Current password is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except KeyError:\n return Response(\"The data format is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)" ]
[ "0.7684161", "0.7678293", "0.7636813", "0.7593512", "0.7537619", "0.7510125", "0.75019175", "0.748036", "0.7474108", "0.7473725", "0.74614745", "0.7304231", "0.7270338", "0.72622114", "0.72228485", "0.7220767", "0.72164667", "0.71916544", "0.7117814", "0.71164465", "0.71038467", "0.7096304", "0.706657", "0.7053996", "0.7041359", "0.7030442", "0.7029426", "0.6990347", "0.6989456", "0.6964674", "0.6963241", "0.6946669", "0.6929306", "0.68986386", "0.68860674", "0.6866938", "0.68248004", "0.6813766", "0.6800332", "0.6793857", "0.6790922", "0.67847264", "0.6769705", "0.67396975", "0.6730941", "0.6713236", "0.6684193", "0.6682585", "0.66613525", "0.66550714", "0.6637779", "0.66316384", "0.6629098", "0.66207564", "0.6618647", "0.6608403", "0.65783316", "0.6573546", "0.656423", "0.6557145", "0.6549146", "0.65447056", "0.6539147", "0.6498107", "0.6488221", "0.64713573", "0.64671355", "0.6465939", "0.64636576", "0.6451564", "0.6448431", "0.64247423", "0.64138883", "0.6409311", "0.6408006", "0.64052427", "0.63870215", "0.63801336", "0.6355641", "0.63377345", "0.63325626", "0.63206464", "0.6319594", "0.63119215", "0.6308351", "0.6294429", "0.62916523", "0.62836766", "0.62624055", "0.6250342", "0.62481093", "0.6241757", "0.62399316", "0.62313676", "0.6212804", "0.61953986", "0.6188841", "0.6180623", "0.6173053", "0.6171947" ]
0.7167825
18
Handles the 'forgotten password' form. The user can ask for a specific username to be reset. The user can also specify an email address instead of a username. An email containing encrypted and signed links is sent to the particular email address. There is no difference in the output if 0, 1, or many users are found matching the particular username/email. This prevents people from guessing valid usernames or emails.
def forgot_req(request): server = request.META['SERVER_NAME'] recover_url = urljoin(full_url(request), 'recover') if request.POST and not request.user.is_authenticated(): try: username_or_email = request.POST['username'] except KeyError: pass else: if '@' in username_or_email: qs = User.objects.filter(email = username_or_email) else: qs = User.objects.filter(username = username_or_email) users = [] user = None for user in qs: query = 'salt=%s&user=%s' % (urlsafe_b64encode(urandom(8)),\ user.username) url = add_encrypted_query_string(recover_url, query, settings.SECRET_KEY) url = sign_query_string(settings.SECRET_KEY + user.password, url) users.append(dict(username = user.username, url = url)) template = get_template('registration/recover-password.txt') context = Context(dict(users = users, ApplianceName = server)) if len(users) == 1: plural = '' else: plural = 's' if user: user.email_user(subject = "Your %s console account%s" % (server, plural), from_email = FROM_EMAIL, message = template.render(context)) return HttpResponseRedirect('sent') return render_to_response('registration/forgotten.html', dict(username=request.GET.get('username', ''), META=request.META, root=settings.ROOT_URL, media=settings.MEDIA_URL))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url + 'reset_password' + '/' + reset_slug\n from_email = ('noreply@thescriptgroup.in', 'TSG Bot')\n to_email = [(user.email, user.name)]\n subject = 'Password reset for Hades account'\n content = f\"Hello {user.name}, please click <a href=\\\"{reset_url}\\\">here</a> to reset your password!\"\n utils.send_mail(from_email, to_email, subject, content)\n return redirect(url_for('login'))\n return render_template('forgot_password.html')", "def forgot_password():\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ForgotPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user:\n token = user.make_reset_token()\n send_reset_token(user, token=token)\n\n flash((\"E-Mail sent! Please check your inbox.\"), \"info\")\n return redirect(url_for(\"auth.forgot_password\"))\n else:\n flash((\"You have entered an username or email that is not linked \\\n with your account\"), \"danger\")\n return render_template(\"auth/forgot_password.html\", form=form)", "def forgot_password():\r\n form = ForgotPasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user = model.user.User.query\\\r\n .filter_by(email_addr=form.email_addr.data)\\\r\n .first()\r\n if user and user.email_addr:\r\n msg = Message(subject='Account Recovery',\r\n recipients=[user.email_addr])\r\n if user.twitter_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Twitter')\r\n elif user.facebook_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Facebook')\r\n elif user.google_user_id:\r\n msg.body = render_template(\r\n '/account/email/forgot_password_openid.md',\r\n user=user, account_name='Google')\r\n else:\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n recovery_url = url_for('.reset_password',\r\n key=key, _external=True)\r\n msg.body = render_template(\r\n '/account/email/forgot_password.md',\r\n user=user, recovery_url=recovery_url)\r\n msg.html = markdown(msg.body)\r\n mail.send(msg)\r\n flash(gettext(\"We've send you email with account \"\r\n \"recovery instructions!\"),\r\n 'success')\r\n else:\r\n flash(gettext(\"We don't have this email in our records. \"\r\n \"You may have signed up with a different \"\r\n \"email or used Twitter, Facebook, or \"\r\n \"Google to sign-in\"), 'error')\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Something went wrong, please correct the errors on the '\r\n 'form'), 'error')\r\n return render_template('/account/password_forgot.html', form=form)", "def forgot_passwd(request):\n dc_settings = request.dc.settings\n\n return password_reset(\n request,\n template_name='gui/accounts/forgot.html',\n email_template_name='gui/accounts/forgot_email.txt',\n subject_template_name='gui/accounts/forgot_subject.txt',\n password_reset_form=partial(ForgotForm, request),\n post_reset_redirect=reverse('forgot_done'),\n from_email=dc_settings.DEFAULT_FROM_EMAIL,\n current_app='gui',\n extra_context={\n 'e_site_name': dc_settings.SITE_NAME,\n 'e_site_link': dc_settings.SITE_LINK,\n })", "def forgot():\n form = ForgotForm()\n\n if form.validate_on_submit():\n db.session.add(form.pw_reset)\n db.session.commit()\n\n form.pw_reset.send()\n flash('A password reset link has been sent to your email', 'alert-success')\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n return render_template('forgot.html', form=form)", "def forgot_password():\n \n if 'username' in session: \n flash('You are already logged in, you can reset your password here.', 'info')\n return redirect(url_for('dashboard'))\n \n form = ForgotPasswordForm()\n \n if request.method == 'POST':\n if form.validate_on_submit(): \n user = mongo.db.user.find_one({'email':form.email.data})\n\n if user:\n flash('Please enter your security passphrase and create a new password', 'info')\n return redirect(url_for('reset_password')) \n \n flash('Email address not found!', 'danger')\n return render_template('pages/forgot.html', \n title='Forgot Password', \n form=form\n )\n \n return render_template('pages/forgot.html', title='Forgot Password', form=form)", "def reset_password():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n\n form = RequestResetForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n send_reset_email(user) # located in utils.py\n flash('An email has been sent with instruction to reset your password', 'info')\n return redirect(url_for('users.login'))\n\n return render_template('reset_password_request.html', form=form)", "def forgot_password():\n url = 'http://localhost:8080/' + 'user/reset/'\n body = request.get_json()\n email = body.get('email')\n if not email:\n return jsonify(msg.MISSING_PARAMETER), 400\n user_email = views.UserManagement().exists(email=email)\n\n if not user_email:\n return jsonify(msg.NO_DATA), 404\n expires = datetime.timedelta(hours=24)\n reset_token = create_access_token(identity=email, expires_delta=expires)\n\n send_email('[Shodita] Reset Your Password', sender='shodita@shodita.com', recipients=[email],\n text_body=render_template('email/reset_password.txt', url=url + reset_token),\n html_body=render_template('email/reset_password.html', url=url + reset_token))\n\n return jsonify(msg.SUCCESS), 200", "def forgotpassword(request):\n if request.method == 'GET':\n return render(request, 'app/other/forgot_password.html', {'title':'Forgot Password?',})\n elif request.method == 'POST':\n username = request.POST['username']\n\n if User.objects.filter(username = username).exists():\n user = User.objects.get(username = username)\n if Referee.objects.filter(user = user).exists():\n referee = Referee.objects.get(user = user)\n # generate token\n passwordResetTokenGenerator = PasswordResetTokenGenerator()\n token = PasswordResetTokenGenerator.generate_token(passwordResetTokenGenerator, str(user.id))\n token = str(token.decode('utf-8'))\n # email to referee\n subject = \"[Password Reset Link]\"\n message = 'http:////localhost:8000//reset//token=//' + token\n content = \"<br>Dear sir,</br><br></br><br></br>Link is: \"+message+'. Please click on the link to change the credentials.'+\"<br></br><br></br>Regards,<br></br>PhDPortal.\"\n email = []\n receiver = referee.user\n email.append(receiver.email)\n send_email_task.delay(email, subject, content)\n # redirect to same page with status to check your mail and click on activation link\n \n dict = {'status' : 'Done', 'message' : 'An Activation link has been sent to your mail-id'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'You are not Authorized to change password'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else: # given username is not valid to use this feature\n dict = {'status': 'Error', 'message' : 'Invalid Username, Try Again!'}\n return HttpResponse(json.dumps(dict), content_type = 'application/json')\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def forgotPassword():\n if request.method == 'POST':\n if emailform():\n email = request.form['email1']\n\n #Confirm the user exist\n if hl.confirmUser(email):\n user = hl.getUser(\"Email\",email)\n refLink = \"http://\"+request.headers['Host']+hl.genUrl(user[\"Name\"],\"Password\")\n #Send email\n msg = \"\"\"\n Dear {},\n\n You are receiving this email because you have requested your password be reset. \n Use the following link to reset your password:\n\n {}\n\n If you did not request that your password be changed, please reply to this email immediately.\n\n Regards,\n Onegroup Admin Team\n \"\"\".format(user[\"Name\"],refLink)\n\n emailMessage(\"Password Reset\", [user[\"Email\"]], msg)\n return redirect(url_for('confirm', confirmed = 'Password reset email has been sent.'))\n else:\n flash(\"User doesn't exists\")\n else:\n flash(\"Emails don't match\")\n \n return render_template('emailsend.html')", "def request_password_reset():", "def user_password_reset(self, request):\n reset_password_form = ResetPasswordForm(request.form)\n\n if request.method == \"POST\":\n if reset_password_form.validate_on_submit():\n if check_password_hash(current_user.password, reset_password_form.old_password.data):\n new_hashed_password = generate_password_hash(reset_password_form.password.data)\n\n temp = current_user.get_id()\n (role, email) = temp.split(\":\")\n\n # if first element is `sysadmin` instead of a scheme_id\n # call function to reset `sysadmin` pass\n if role == \"sysadmin\":\n self._scheme_handler.update_hash_password(email, new_hashed_password)\n else:\n # regular user reset\n self._student_handler.update_hash_password(current_user.scheme_id, current_user.k_number, new_hashed_password)\n\n flash(\"Password successfully updated\")\n else:\n flash(\"Old password incorrect\")\n else:\n flash(\"Please double check your new password is valid.\")\n \n return render_template(\"user/reset_password.html\", reset_password_form=reset_password_form)", "def reset_password():\n form = ResetPassword()\n if form.validate_on_submit():\n user_email = form.email.data\n mail_exist = db.check_email(user_email)\n if mail_exist is not None:\n new_password = generate_password()\n new_password_hash = generate_password_hash(new_password)\n username = mail_exist['username']\n db.update_password_username(username, new_password_hash)\n flash('Your new password has been sent to your mailbox')\n redirect('login')\n # send_password_reset_email(user_email, new_password)\n return redirect(url_for('login'))\n else:\n flash('This email address is not registered')\n return redirect('reset_password')\n return render_template('resetpassword.html', form=form)", "def user_forgotpassword(): \n data = user_obj.user_forgotpassword(request.forms) \n return data", "def send_password_reset_email():\n aaa.send_password_reset_email(\n username=post_get('username'),\n email_addr=post_get('email_address')\n )\n return 'Please check your mailbox.'", "def password_reset(request):\n\tif not request.user.is_authenticated():\n\t\treturn django.contrib.auth.views.password_reset(request,\n template_name='usermgr/password_reset_form.html',\n email_template_name= 'usermgr/password_reset_email.html',\n post_reset_redirect='/usermgr/password_reset/done/')\n\telse:\n\t\treturn HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)", "def do_reset(term, handle, email=u''):\n sep_ok = getattr(term, color_secondary)(u'::')\n sep_bad = getattr(term, color_primary)(u'::')\n email = u''\n\n for _ in range(passkey_max_attempts):\n handle = prompt_input(term=term,\n key='Username',\n content=handle or u'',\n width=username_max_length)\n\n if not handle:\n # canceled\n return False\n\n email = prompt_input(term=term,\n key='E-mail',\n content=email or u'',\n width=email_max_length)\n if not email:\n # canceled\n return False\n\n user = matches_email(handle, email)\n if not user:\n echo(fixate_next(term))\n echo(u'{0} Address is incorrect !'.format(sep_bad))\n # try e-mail address again\n continue\n\n echo(fixate_next(term))\n passkey = send_passkey(user)\n if not passkey:\n # failed to send e-mail\n term.inkey(1)\n echo(u'\\r\\n\\r\\n')\n return False\n\n echo(u'{0} E-mail successfully delivered !'.format(sep_ok))\n\n for _ in range(passkey_max_attempts):\n try_passkey = prompt_input(term=term,\n key='Passkey',\n width=password_max_length)\n\n if not try_passkey:\n # canceled\n return False\n\n if passkey.strip() != try_passkey.strip():\n # passkey does not match\n echo(fixate_next(term))\n echo(u'{0} Passkey does not verify !'.format(sep_bad))\n # try passkey again\n continue\n\n new_password = prompt_input(term=term,\n key='Password',\n hidden=hidden_char,\n width=password_max_length)\n if not new_password:\n # canceled\n return False\n\n user.password = new_password\n user.save()\n log.debug('password reset successful for user {0!r}.'\n .format(user.handle))\n echo(fixate_next(term))\n echo(u'{0} Password reset successful !'.format(sep_ok))\n return True\n\n echo(fixate_next(term))\n echo(u'{0} Too many authentication attempts.'.format(sep_bad))\n\n echo(fixate_next(term))\n echo(u'{0} Too many authentication attempts.'.format(sep_bad))", "def forgot_passwd_check(request, uidb64=None, token=None):\n assert uidb64 is not None and token is not None\n dc1_settings = DefaultDc().settings\n sms_registration = dc1_settings.SMS_REGISTRATION_ENABLED\n\n if sms_registration:\n set_password_form = SMSSendPasswordResetForm\n else:\n set_password_form = PasswordResetForm\n\n if request.method == 'POST':\n try:\n user = User.objects.get(id=urlsafe_base64_decode(uidb64))\n profile = user.userprofile\n except (ValueError, OverflowError, User.DoesNotExist):\n profile = None\n\n if profile and profile.email_token == token:\n # Email address is verified, we cant compare to token as register token is different to reset one.\n profile.email_token = ''\n profile.email_verified = True\n # This may look strange - setting the phone_verified before the user logs in. It is not :) We are sending\n # new password to phone number in profile, after the user logs in we would set phone_verified to True anyway\n if sms_registration:\n profile.phone_verified = True\n profile.save()\n\n return password_reset_confirm(\n request,\n uidb64=uidb64,\n token=token,\n template_name='gui/accounts/forgot_check.html',\n set_password_form=set_password_form,\n post_reset_redirect=reverse('forgot_check_done'),\n current_app='gui',\n extra_context={\n 'sms_registration': sms_registration,\n }\n )", "def reset_password(email):\n user = AuthUser.query.filter_by(email=email).first()\n if user is None:\n return False\n # Generate email with unique link\n msg = Message(\n \"Password Reset Link\",\n recipients=[user.email] \n )\n msg.body = \"Click on this link and following the instructions to reset your \"\n \"password\\n\\n%s%s?uid=%s-%s\" % (\n app.config['SITE_URI'],\n \"/reset/password/\",\n user.id,\n user.get_uid()\n )\n mail.send(msg)\n return True", "def password_reset(request):\n\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\temail = request.POST.get('email')\n\t\tif email:\n\t\t\tuser = models.Teacher.objects.get(\n\t\t\t\tsoft_delete=False, user__email=email\n\t\t\t)\n\t\t\tif not user:\n\t\t\t\tcontext_dict[\"message\"] = \"Email ID does'nt exist, Enter Correct details\"\n\t\t\tmail = {\n\t\t\t\t'email': email,\n\t\t\t\t'domain': request.META['HTTP_HOST'],\n\t\t\t\t'site_name': 'Placement Portal',\n\t\t\t\t'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t'user': user,\n\t\t\t\t'token': ''.join([random.choice(ascii_letters+digits) for i in range (128)]),\n\t\t\t\t'protocol': 'http',\n\t\t\t}\n\t\t\ttry:\n\t\t\t\treset_token = models.PasswordReset(\n\t\t\t\t\tuser=user,\n\t\t\t\t\ttoken=mail['token'],\n\t\t\t\t\ttoken_consumed=False,\n\t\t\t\t)\n\t\t\t\treset_token.save()\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\t\tsubject_template_name = 'password_reset_email_subject.txt'\n\t\t\temail_template_name = 'password_reset_email.html'\n\t\t\tsubject = loader.render_to_string(subject_template_name, mail)\n\t\t\tsubject = ''.join(subject.splitlines())\n\t\t\temail_data = loader.render_to_string(email_template_name, mail)\n\t\t\tsend_mail(subject, email_data, DEFAULT_FROM_EMAIL, [email], fail_silently=False)\n\t\t\tcontext_dict[\"message\"] = \"Email has been sent to your registered Email ID with instructions.\"\n\treturn render(request, \"password_reset_form.html\", context_dict)", "def reset_password_request():\n form = ResetPasswordRequestForm()\n if form.validate_on_submit():\n try:\n user = User.query.filter_by(email=form.email.data).first_or_404()\n except Exception:\n flash('This Email ID is Not Registered', 'error')\n return render_template('password_reset_request.html',\n form=form), 400\n\n if user:\n send_password_reset_email(user)\n flash('Please check your email for a password reset link.',\n 'success')\n return render_template('post_pass_reset_request.html',\n title=\"Reset Password\")\n else:\n flash(\n 'Your email address must be confirmed \\\n before attempting a password reset.',\n 'error')\n return redirect(url_for('auth.login'))\n\n return render_template('password_reset_request.html', form=form), 400", "def passwordless():\n if current_app.config['DRIBDAT_NOT_REGISTER'] or \\\n not current_app.config['MAIL_SERVER']:\n flash(\"Passwordless login currently not possible.\", 'warning')\n return redirect(url_for(\"auth.login\", local=1))\n form = EmailForm(request.form)\n if not (form.is_submitted() and form.validate()):\n flash_errors(form)\n return redirect(url_for('auth.forgot'))\n # Continue with user activation\n flash(\n \"If your account exists, you will shortly receive \"\n + \"an activation mail. Check your Spam folder if you do not. \"\n + \"Then click the link in that e-mail to log into this application.\",\n 'success')\n a_user = User.query.filter_by(email=form.email.data).first()\n if a_user:\n # Continue with reset\n user_activation(a_user)\n else:\n current_app.logger.warn('User not found: %s' % form.email.data)\n # Don't let people spy on your address\n return redirect(url_for(\"auth.login\"))", "def reset(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # This is an initial request to show the activation form.\r\n username = rdict.get('username', None)\r\n activation_key = rdict.get('reset_key', None)\r\n user = ActivationMgr.get_user(username, activation_key)\r\n new_username = None\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n if 'code' in params:\r\n # This is a posted form with the activation, attempt to unlock the\r\n # user's account.\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('new_password', None)\r\n new_username = params.get('new_username', None)\r\n error = None\r\n\r\n if new_username:\r\n new_username = new_username.lower()\r\n\r\n # Check whether username exists or not. During signup request , a\r\n # record of current user is created with username as his email id\r\n # which is already checked for uniqueness. So when new_username is\r\n # equal to username ie the email id then no need to check for\r\n # uniqueness , but if new_username is something else it has to be\r\n # verified\r\n\r\n if username != new_username and \\\r\n UserMgr.get(username=new_username) is not None:\r\n # Set an error message to the template.\r\n error = \"Username already exists.\"\r\n elif not UserMgr.acceptable_password(password):\r\n # Set an error message to the template.\r\n error = \"Come on, pick a real password please.\"\r\n else:\r\n res = ActivationMgr.activate_user(username, activation, password)\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our\r\n # current username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError:\r\n error = 'There was an issue setting your new username'\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n error = ('There was an issue attempting to activate'\r\n 'this account.')\r\n\r\n if error:\r\n return {\r\n 'message': error,\r\n 'user': user\r\n }\r\n else:\r\n # Log the user in and move along.\r\n headers = remember(request, user.id, max_age=60 * 60 * 24 * 30)\r\n user.last_login = datetime.utcnow()\r\n\r\n # log the successful login\r\n AuthLog.login(user.username, True)\r\n\r\n # we're always going to return a user to their own /recent after a\r\n # login\r\n return HTTPFound(\r\n location=request.route_url(\r\n 'user_bmark_recent',\r\n username=user.username),\r\n headers=headers)\r\n\r\n else:\r\n LOG.error(\"CHECKING\")\r\n LOG.error(username)\r\n\r\n if user is None:\r\n # just 404 if we don't have an activation code for this user\r\n raise HTTPNotFound()\r\n\r\n LOG.error(user.username)\r\n LOG.error(user.email)\r\n return {\r\n 'user': user,\r\n }", "def reset_password(): \n \n form = ResetPasswordForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n \n hashed_pw = bcrypt.hashpw(form.new_password.data.encode('utf-8'), bcrypt.gensalt())\n user = mongo.db.user.find_one({'username': form.username.data})\n \n if user and bcrypt.checkpw(request.form['passphrase'].encode('utf-8'), user['passphrase']):\n mongo.db.user.find_one_and_update({'username': form.username.data}, {'$set':{'hashed_password':hashed_pw}})\n \n flash(f'Password reset was successful, {form.username.data}, pleaselogin again with your new password.','success'\n )\n return redirect(url_for('login'))\n \n return render_template('pages/reset.html', title='Forgot Password', form=form)", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>To reset your password </p>'\n subject = 'Request for changing password, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/reset_password/', html, False)\n pass", "def reset_password():\n pass", "def user_reset_password(request, token):\n\n if request.user.is_authenticated():\n return redirect(settings.AFTER_LOGIN_REDIRECT_URL)\n\n form = ResetPasswordForm(request.POST or None)\n\n if request.method == \"POST\":\n if form.is_valid():\n user_auth = get_object_or_404(PasswordResetAuth, token=token)\n user = get_object_or_404(User, email=user_auth.email)\n\n if user_auth.choose_me is True:\n new_password = form.cleaned_data[\"new_password\"]\n user.set_password(new_password)\n user.save()\n\n user_auth.choose_me = False\n user_auth.save()\n return redirect(\"/login/\")\n\n error_message = \"* Either you are not an identified user or \"\\\n \"token has been expired. So please click on back.\"\n return render_to_response(\"login/reset_password.html\", {\n \"form\": form,\n \"error_message\": error_message\n }, context_instance=RequestContext(request))\n\n return render_to_response(\"login/reset_password.html\", {\n \"form\": form\n }, context_instance=RequestContext(request))", "def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)", "def forgot_password(self, version):\n form=cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD':'POST','CONTENT_TYPE':self.headers['Content-Type'],}\n )\n version=version.split('/')[0]\n host = self.headers['Host']\n\n data={'email':form['email'].value}\n user = UserServices()\n response_data = user.forgot(data,host,version)\n return response_data", "def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200", "def change_password():\n form = PasswordResetForm()\n\n if form.validate_on_submit():\n # Update user\n current_user.password = crypto_manager.hash(form.password.data)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Password updated correctly'), 'success')\n\n return redirect(url_for('admin.profile_edit'))\n\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n\n flash(_('Error updating password, contact an administrator'), 'error')\n\n return render_template('admin/profile/change_password.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/change_password.html', form=form)", "def send_password_reset(user):\n _log('++ sending password reset email for: {} {}'.format(user.first_name, user.last_name))\n secret_string = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(20))\n\n # if local set the domain to localhost\n if ENV_DICT['ENVIRON'] == 'LOCAL':\n secret_link = 'http://localhost:8080/reset/{}/'.format(secret_string)\n # otherwise use the subdomain of the tenancy\n else:\n secret_link = 'http://{}.cpisearch.io/reset/{}/'.format(user.tenancy, secret_string)\n\n reset_link_object = PasswordResetLink(\n user_id=user.user_id,\n secret_link=secret_string,\n tenancy=user.tenancy,\n )\n db.session.add(reset_link_object)\n db.session.commit()\n send_email(\n to_email=user.email,\n subject='SuccessKit Password Reset',\n template_path='emails/password_reset_email.html',\n template_vars={\n 'user': user,\n 'secret_link': secret_link\n }\n )", "def request_password_reset_token():\n j = request.get_json(force=True)\n user_requested = j['user'].lower()\n\n # Disabled user accounts can not request for a new password.\n target_user = User.query.filter_by(mail=user_requested).first()\n\n if target_user is None:\n return Errors.UNKNOWN_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n if target_user.state == StateType.DEACTIVATED:\n return Errors.DEACTIVATED_USER.make_json_response(status.HTTP_400_BAD_REQUEST)\n\n target_user.generate_password_request_token()\n\n send_mail(target_user.mail, render_template(\"password/reset_password_mail.txt\",\n greeting=get_opening_greeting(target_user),\n wlink=\"{}/password/reset/{}\".format(\n app.config['BUZZN_BASE_URL'],\n target_user.password_reset_token\n )), 'Passwort zurücksetzen für Buzzn-App')\n\n db.session.commit()\n return '', status.HTTP_201_CREATED", "def reset_password(newpass, challenge):", "def GET_resetpassword(self, user, key):\r\n done = False\r\n if not key and request.referer:\r\n referer_path = request.referer.split(g.domain)[-1]\r\n done = referer_path.startswith(request.fullpath)\r\n elif not user:\r\n return self.abort404()\r\n return BoringPage(_(\"Reset password\"),\r\n content=ResetPassword(key=key, done=done)).render()", "def post(self):\n args = password_reset.parse_args()\n email = args.get('email')\n new_password = password_generator()\n\n validation_email = email_validation(email)\n if validation_email:\n return validation_email\n\n user = User.query.filter_by(email=email).first()\n if user:\n user.password = new_password\n user.save()\n response = {\n \"message\": \"Password has been reset\",\n \"status\": \"Reset password succesful!\",\n \"new_password\": new_password\n }\n return response, 200\n else:\n response = {\n 'message': 'User email does not exist, Please try again',\n 'status': 'Reset password failed!'\n }\n return response, 400", "def password_reset_confirm_wrapper(\r\n request,\r\n uidb36=None,\r\n token=None,\r\n):\r\n # cribbed from django.contrib.auth.views.password_reset_confirm\r\n try:\r\n uid_int = base36_to_int(uidb36)\r\n user = User.objects.get(id=uid_int)\r\n user.is_active = True\r\n user.save()\r\n except (ValueError, User.DoesNotExist):\r\n pass\r\n\r\n # tie in password strength enforcement as an optional level of\r\n # security protection\r\n err_msg = None\r\n\r\n if request.method == 'POST':\r\n password = request.POST['new_password1']\r\n if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):\r\n try:\r\n validate_password_length(password)\r\n validate_password_complexity(password)\r\n validate_password_dictionary(password)\r\n except ValidationError, err:\r\n err_msg = _('Password: ') + '; '.join(err.messages)\r\n\r\n # also, check the password reuse policy\r\n if not PasswordHistory.is_allowable_password_reuse(user, password):\r\n if user.is_staff:\r\n num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']\r\n else:\r\n num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']\r\n err_msg = _(\"You are re-using a password that you have used recently. You must \"\r\n \"have {0} distinct password(s) before reusing a previous password.\").format(num_distinct)\r\n\r\n # also, check to see if passwords are getting reset too frequent\r\n if PasswordHistory.is_password_reset_too_soon(user):\r\n num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']\r\n err_msg = _(\"You are resetting passwords too frequently. Due to security policies, \"\r\n \"{0} day(s) must elapse between password resets\").format(num_days)\r\n\r\n if err_msg:\r\n # We have an password reset attempt which violates some security policy, use the\r\n # existing Django template to communicate this back to the user\r\n context = {\r\n 'validlink': True,\r\n 'form': None,\r\n 'title': _('Password reset unsuccessful'),\r\n 'err_msg': err_msg,\r\n }\r\n return TemplateResponse(request, 'registration/password_reset_confirm.html', context)\r\n else:\r\n # we also want to pass settings.PLATFORM_NAME in as extra_context\r\n extra_context = {\"platform_name\": settings.PLATFORM_NAME}\r\n\r\n if request.method == 'POST':\r\n # remember what the old password hash is before we call down\r\n old_password_hash = user.password\r\n\r\n result = password_reset_confirm(\r\n request, uidb36=uidb36, token=token, extra_context=extra_context\r\n )\r\n\r\n # get the updated user\r\n updated_user = User.objects.get(id=uid_int)\r\n\r\n # did the password hash change, if so record it in the PasswordHistory\r\n if updated_user.password != old_password_hash:\r\n entry = PasswordHistory()\r\n entry.create(updated_user)\r\n\r\n return result\r\n else:\r\n return password_reset_confirm(\r\n request, uidb36=uidb36, token=token, extra_context=extra_context\r\n )", "def password_reset(request):\n host = settings.TACC_USER_PORTAL_HOST\n return redirect(f\"{host}/password-reset?{urlencode(request.GET)}\")", "def forgot_passwd_done(request):\n return render(request, 'gui/note.html', {\n 'header': _('Password reset instructions!'),\n 'blocks': (\n _('We\\'ve emailed you instructions for setting your password. You should be receiving them shortly.'),\n _('If you don\\'t receive an email, please make sure you\\'ve entered the address you registered with, and '\n 'check your spam folder.'),\n )\n })", "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None,\n html_email_template_name=None):\n email = self.cleaned_data[\"email\"]\n User = get_user_model()\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n for user in active_users:\n subject = _('Flisol - Restore your password')\n # send_email(\n # subject,\n # [user.email],\n # email_template_name,\n # {\n # 'email': user.email,\n # 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n # 'user': user,\n # 'token': token_generator.make_token(user),\n # 'protocol': settings.PROTOCOL,\n # },\n # )", "def password_reset_confirm(request, uidb64, token):\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n try:\n reset_form = ResetPasswordForm(instance=user)\n # urlsafe_base64_decode() decodes to bytestring on Python 3\n except (TypeError, ValueError, OverflowError, user.DoesNotExist):\n user = None\n if user is not None and default_token_generator.check_token(user, token):\n validlink = True\n title = ('Enter new password')\n if request.method == 'POST':\n if 'password-submit' in (request.POST):\n reset_form = ResetPasswordForm(request.POST,instance=user)\n password = request.POST.get(\"password_reset\", None)\n \n if reset_form.is_valid():\n user=reset_form.save(commit = False)\n user.save()\n return redirect('password_reset_complete')\n else:\n reset_form = ResetPasswordForm(instance=user)\n else:\n validlink = False\n reset_form = ResetPasswordForm(instance=user)\n title = ('Password reset unsuccessful')\n return redirect ('invalid_password_link')\n context = {\n 'reset_form': ResetPasswordForm,\n 'title': title,\n 'validlink': validlink,\n }\n return render(request, 'reset_confirm.html', context, {'reset_form': ResetPasswordForm})", "def send_pw_reset_email(user):\n token = user.get_token()\n message = Message(\n 'Reset Your Password',\n sender='storcwebsite@gmail.com',\n recipients=[user.email])\n message.body = f\"To verify reset your password, click the link \" \\\n f\"below:\\n\\n\" \\\n f\"{url_for('users.reset_password', token=token, _external=True)}\"\n mail.send(message)", "def post(self, request, token):\n form = PasswordResetForm(request.DATA)\n if form.is_valid():\n user_data = get_user_data(\n signing.loads(\n token,\n max_age=self.token_expires,\n salt=self.salt))\n if user_data:\n user_data.set_password(request.DATA['password1'])\n user_data.save()\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Change successfully\",\n 'message': \"your password has Change successfully\"})\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Sorry something wrong\",\n 'message': \"sorry try again to set new password\"})\n return render_to_response(\n 'registration/show_message.html',\n {\n 'title': \"Sorry something wrong\",\n 'message': \"sorry try again to set new password\"})", "def post(self):\n try:\n url = request.host_url + 'reset/password/'\n body = request.get_json()\n base_url = request.url_root\n email = body.get('email')\n\n if not email:\n raise SchemaValidationError\n\n user = User.objects.get(email=email)\n if not user:\n raise EmailDoesNotExistsError\n\n expires = datetime.timedelta(minutes=60)\n payload = {\"user_id\": str(user.id)}\n reset_token = create_access_token(payload, expires_delta=expires)\n\n return send_email('[Unboxit] Reset Your Password',\n sender='contact@tsantos.dev',\n recipients=[user.email],\n text_body=render_template(\n 'components/reset_password.txt',\n url=url + reset_token),\n html_body=render_template(\n 'components/reset_password.html',\n url=url + reset_token,\n first_name=user.first_name,\n base_url=base_url))\n except SchemaValidationError:\n raise SchemaValidationError\n except DoesNotExist:\n raise EmailDoesNotExistsError\n except Exception as e:\n raise InternalServerError", "def send_password_reset_email(user):\n\n token = user.get_password_token()\n reset_time=datetime.now()\n send_email('[SiteSurveyApp] Account password reset',\n recipients=[user.email],\n sender=app.config['MAIL_DEFAULT_SENDER'],\n text_body=render_template('auth/emails/reset_password.txt',\n user=user, token=token, reset_time=reset_time),\n html_body=render_template('auth/emails/reset_password.html',\n user=user, token=token, reset_time=reset_time))", "def password_req(request):\n next = request.POST.get('next',\n\t\t\t request.META.get('HTTP_REFERER',\n\t\t\t\t\t DEFAULT_REDIRECT))\n args = default_context(request, username=request.user.username, next=next)\n\n try:\n\tpassword = request.POST['password']\n\n\tpw1 = request.POST['pw1']\n\tpw2 = request.POST['pw2']\n except KeyError:\n\tpass\n else:\n\tif pw1 != pw2:\n\t args['mismatch'] = True\n\telif not request.user.check_password(password):\n\t args['error'] = True\n\telse:\n\t request.user.set_password(pw1)\n\t request.user.save()\n\t return HttpResponseRedirect(next)\n\n return render_to_response('registration/password.html', args)", "def password_reset(request):\r\n if request.method != \"POST\":\r\n raise Http404\r\n\r\n # Add some rate limiting here by re-using the RateLimitMixin as a helper class\r\n limiter = BadRequestRateLimiter()\r\n if limiter.is_rate_limit_exceeded(request):\r\n AUDIT_LOG.warning(\"Rate limit exceeded in password_reset\")\r\n return HttpResponseForbidden()\r\n\r\n form = PasswordResetFormNoActive(request.POST)\r\n if form.is_valid():\r\n form.save(use_https=request.is_secure(),\r\n from_email=settings.DEFAULT_FROM_EMAIL,\r\n request=request,\r\n domain_override=request.get_host())\r\n else:\r\n # bad user? tick the rate limiter counter\r\n AUDIT_LOG.info(\"Bad password_reset user passed in.\")\r\n limiter.tick_bad_request_counter(request)\r\n\r\n return JsonResponse({\r\n 'success': True,\r\n 'value': render_to_string('registration/password_reset_done.html', {}),\r\n })", "def forgot_password(self, username, relay_state=None):\n request = {\n 'username': username,\n 'relayState': relay_state\n }\n\n response = ApiClient.post_path(self, '/recovery/password', request)\n return Utils.deserialize(response.text, AuthResult)", "def reset_password(self, email,new_password):\n for user in self.users_list:\n if user['email'] == email:\n user['password'] = new_password\n return 'password reset was succesfull'\n continue\n return \"email provided does not match any user\"", "def login_reset():\n # Start with the currently logged in user\n\n if request.method == \"GET\":\n # In browser request that user wants to reset the password\n # Create a token\n # Send out an email\n #\n return flask.render_template('profile.html', name=session[\"user\"][\"label\"], email=session[\"user\"][\"email\"])\n\n if request.method == \"POST\":\n # In browser request that user wants to reset the password\n label = flask.request.form[\"label\"]\n passwd = flask.request.form[\"passwd\"]\n\n # Verify that the user is logged in or return\n if not session.has_key(\"user\"):\n return flask.Response('{\"error\" : \"User not logged in\" }')\n else:\n # Chagne the information in the session\n session[\"user\"][\"label\"] = label\n # Locate the record\n conn.register([model.User])\n dbobj = conn[current_app.config[\"CONFIGDB\"]]\n userdoc = dbobj[\"users\"].User.find_one({'_id' : ObjectId(session[\"user\"][\"id\"])})\n userdoc[\"passwd\"] = passwd\n userdoc[\"password_status\"] = \"ready\"\n userdoc[\"label\"] = label\n userdoc.validate()\n userdoc.save()\n\n return flask.Response('{\"success\" : \"\" }')", "def get(self, request, email=None):\n\n user = User.objects.filter(email=request.GET.get('email'))\n\n if user.count() == 1 and user.first() is not None:\n user = user.first()\n\n random_password = User.objects.make_random_password()\n user.set_password(random_password)\n user.save()\n\n message = \"\"\"Olá,\\nSua senha foi resetada, acesse a plataforma\n no link http://127.0.0.1/user/password e troque a\n senha\\nSua nova senha é:\\n {}\\nAtenciosamente,\n \\nEquipe Dream Rich.\"\"\".format(random_password)\n\n email = EmailMessage('Password reset',\n message, to=[user.email])\n email.send()\n\n return Response(dumps({'detail': 'email sent'}), status=200)\n\n return Response(dumps({'detail': 'user not found'}), status=404)", "def reset_request():\n if current_user.is_authenticated:\n return redirect('/home')\n form = RequestResetForm()\n if form.validate_on_submit():\n staff = Staff.query.filter_by(email=form.email.data).first()\n send_reset_email(staff)\n flash('An email has been sent with instructions to reset your password.', 'info')\n return redirect(url_for('login'))\n return render_template('reset_request.html', title='Reset Password',\n form=form)", "def test_45_password_reset_link(self):\r\n res = self.app.post('/account/forgot-password',\r\n data={'email_addr': self.user.email_addr},\r\n follow_redirects=True)\r\n assert (\"We don't have this email in our records. You may have\"\r\n \" signed up with a different email or used Twitter, \"\r\n \"Facebook, or Google to sign-in\") in res.data\r\n\r\n self.register()\r\n self.register(name='janedoe')\r\n self.register(name='google')\r\n self.register(name='facebook')\r\n jane = User.query.get(2)\r\n jane.twitter_user_id = 10\r\n google = User.query.get(3)\r\n google.google_user_id = 103\r\n facebook = User.query.get(4)\r\n facebook.facebook_user_id = 104\r\n db.session.add_all([jane, google, facebook])\r\n db.session.commit()\r\n with mail.record_messages() as outbox:\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': self.user.email_addr},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'janedoe@example.com'},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'google@example.com'},\r\n follow_redirects=True)\r\n self.app.post('/account/forgot-password',\r\n data={'email_addr': 'facebook@example.com'},\r\n follow_redirects=True)\r\n\r\n assert 'Click here to recover your account' in outbox[0].body\r\n assert 'your Twitter account to ' in outbox[1].body\r\n assert 'your Google account to ' in outbox[2].body\r\n assert 'your Facebook account to ' in outbox[3].body\r\n\r\n # Test with not valid form\r\n res = self.app.post('/account/forgot-password',\r\n data={'email_addr': ''},\r\n follow_redirects=True)\r\n msg = \"Something went wrong, please correct the errors\"\r\n assert msg in res.data, res.data", "def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}", "def reset_post():\n if g.session:\n # User is already authenticated\n return jsonify({'redirect': url_for('index.index')})\n\n form = request.values.get('form', default='email')\n token = request.values.get('token', default='')\n email = request.values.get('email', default='')\n password = request.values.get('password', default='')\n\n if form == 'password':\n try:\n user: User = db.session.query(User) \\\n .filter((User.password_token == token) & User.reset_active) \\\n .one()\n if user.is_reset_expired():\n return jsonify({'success': False, 'reason': 'expired'}), 401\n\n if len(password) < 8:\n return jsonify({'success': False, 'reason': 'password'}), 401\n\n user.set_password(password)\n db.session.commit()\n next_url = url_for('auth.reset_status', success=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'token not found'}), 401\n else:\n try:\n user: User = db.session.query(User) \\\n .filter(User.email == email).one()\n user.reset_password()\n db.session.commit()\n\n reset_url = urllib.parse.urljoin(\n request.host_url,\n url_for('auth.reset_get', token=user.password_token))\n kwargs = {\n 'subject': gettext('Reset Password'),\n 'body': reset_url,\n 'recipients': [user.email]\n }\n mail.send_mail(**kwargs)\n next_url = url_for('auth.reset_status', sent=True)\n return jsonify({'success': True, 'redirect': next_url})\n except NoResultFound:\n return jsonify({'success': False, 'reason': 'email'}), 401", "def password_resetenter(request, uidb64=None, token=None):\n\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\tassert uidb64 is not None and token is not None\n\t\tuid = urlsafe_base64_decode(uidb64)\n\t\tuser = models.Teacher.objects.get(\n\t\t\tsoft_delete=False, pk=uid\n\t\t)\n\t\tdb_user = user.user\n\t\treset_token = models.PasswordReset.objects.get(\n\t\t\ttoken=token, user=user\n\t\t)\n\t\ttoken_check = models.PasswordReset.objects.filter(\n\t\t\tuser=user, soft_delete=False, token_consumed=False,\n\t\t).exclude(token=token).first()\n\t\tupdate_fields = []\n\t\ttoken_check.token_consumed = True\n\t\tupdate_fields.append('token_consumed')\n\t\ttoken_check.soft_delete = True\n\t\tupdate_fields.append('soft_delete')\n\t\ttoken_check.save(update_fields=update_fields)\n\t\ttime_threshold = timezone.now() - reset_token.password_request_created_at\n\t\tif time_threshold > timedelta(minutes=30):\n\t\t\ttry:\n\t\t\t\tupdate_fields = []\n\t\t\t\treset_token.token_consumed = True\n\t\t\t\tupdate_fields.append('token_consumed')\n\t\t\t\treset_token.soft_delete = True\n\t\t\t\tupdate_fields.append('soft_delete')\n\t\t\t\treset_token.save(update_fields=update_fields)\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\tif reset_token.user == user and reset_token.token == token:\n\t\t\tif reset_token.token_consumed == False and reset_token.soft_delete == False:\n\t\t\t\ttry:\n\t\t\t\t\tupdate_fields = []\n\t\t\t\t\treset_token.token_consumed = True\n\t\t\t\t\tupdate_fields.append('token_consumed')\n\t\t\t\t\treset_token.soft_delete = True\n\t\t\t\t\tupdate_fields.append('soft_delete')\n\t\t\t\t\treset_token.save(update_fields=update_fields)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint (e)\n\t\t\t\tform = AdminPasswordChangeForm(user=db_user, data=request.POST)\n\t\t\t\tif form.is_valid():\n\t\t\t\t\tform.save()\n\t\t\t\t\thistory = models.History(\n\t\t\t\t\t\tuser=user,\n\t\t\t\t\t\tactivity = \"\",\n\t\t\t\t\t\tactivity_type = \"Reset Password\"\n\t\t\t\t\t)\n\t\t\t\t\thistory.save()\n\t\t\t\t\tcontext_dict[\"message\"] = \"Password changed successfully\"\n\t\t\t\telse:\n\t\t\t\t\tcontext_dict[\"message\"] = \"Password not changed\"\n\t\t\telse:\n\t\t\t\tcontext_dict[\"message\"] = \"Link is no longer valid\"\n\treturn render(request, \"reset.html\", context_dict)", "def POST(self):\n session = web.ctx.session\n nav = get_nav_bar(session)\n data = web.input(reset_token = \"\", new_password=\"\")\n \n reset_password_colum = reset_password_form()\n \n # check each field is endered values.\n if not reset_password_colum.validates():\n return render.reset_password(nav, reset_password_form, \"All fields must be valid.\")\n \n try:\n # log ip information\n ip_addr = web.ctx[\"ip\"]\n accessed_path = web.ctx[\"fullpath\"]\n\n # query user's name (username) and token (extra secruity)\n token = data.reset_token\n username = search_for_user(token, ip_addr, accessed_path)\n #print(\"-\"*16)\n #print(username)\n \n #update token to null database\n result_update_token = update_token_to_null(username, token, ip_addr, accessed_path)\n print(\"-\" * 16 + \"updated!\")\n\n # generate new password\n new_salt = generate_salt()\n hashed_password = hashed_value(data.new_password, new_salt)\n hashed_password = new_salt + hashed_password\n\n # update password \n result_update_password = update_user_password(username, hashed_password, ip_addr, accessed_path )\n raise web.seeother(\"/\")\n except Exception as e:\n print(e)\n except:\n print(exit[0])\n return render.login(nav, reset_password_form, \"- Something went wrong!\")", "def login_resetrequest():\n if request.method == \"GET\":\n # In browser request that user wants to reset the password\n return flask.render_template('reset-request.html', message=\"Please reset the password\")\n\n if request.method == \"POST\":\n # Create a token\n email = flask.request.form[\"email\"]\n\n # Find if an account with that name exists\n conn.register([model.User])\n admindb = conn[current_app.config[\"CONFIGDB\"]]\n\n userdoc = admindb[\"users\"].User.find_one({\"name\" : email, \"type\" : \"passwd\"})\n if userdoc == None:\n # user not found\n return flask.Response('{\"error\" : \"User not found\"}')\n\n # First reset the password\n name = userdoc[\"label\"]\n emailto = userdoc[\"name\"]\n\n # Create accout and a random tocken\n userdoc[\"token\"] = bson.ObjectId()\n userdoc[\"password_status\"] = \"reset-request\"\n\n # May only be useful for some\n if \"password_ready\" in userdoc:\n del userdoc[\"password_ready\"]\n\n userdoc.validate()\n userdoc.save()\n\n # Create email\n emailfrom = current_app.config[\"EMAIL_FROM\"] \n\n body = \"Hello \" + name + \",\\n\\n\"\n body = body + \"You recently requested a password reset for your account at https://slide-atlas.org.\"\n body = body + \"\\n To complete the request operation please follow the link below- \\n\"\n body = body + \"\\n \" + url_for('.login_confirm', _external=True) + \"?token=\" + str(userdoc[\"token\"]) + \" \\n\"\n body = body + \"\\nIf clicking on the link doesn't work, try copying and pasting it into your browser.\\n\"\n body = body + \"\\nThis link will work only once, and will let you create a new password. \\n\"\n body = body + \"\\nIf you did not request password reset, please disregard this message.\\n\"\n body = body + \"\\nThank you,\\nThe SlideAtlas Administration Team\\n\"\n\n # Create a text/plain message\n msg = MIMEText(body)\n\n # me == the sender's email address\n # you == the recipient's email address\n msg['Subject'] = 'Password reset confirmation for slide-atlas.org'\n msg['From'] = emailfrom\n msg['To'] = emailto\n print msg\n s = smtplib.SMTP(current_app.config[\"SMTP\"])\n try:\n out = s.sendmail(emailfrom, [emailto], msg.as_string())\n except:\n return flask.Response(\"{\\\"error\\\" : \\\"Error sending email\\\"}\")\n\n s.quit()\n return flask.Response(\"{\\\"success\\\" : \\\"\" + str(out) + \"\\\"}\")", "def forgot_passwd_check_done(request):\n dc1_settings = DefaultDc().settings\n\n if dc1_settings.SMS_REGISTRATION_ENABLED:\n text_blocks = (_('Your password has been reset and send to your phone number via text message (SMS).'),)\n else:\n text_blocks = ()\n\n return render(request, 'gui/note.html', {\n 'header': _('Password reset!'),\n 'blocks': text_blocks,\n 'links': ({'label': 'You may go ahead and log in now.', 'url': reverse('login')},),\n })", "def change_password(request):\n\n form = ChangePasswordForm(user=request.user)\n context = {\n 'form': form,\n 'submit_button_text': _('Update password'),\n 'back_button_text': _('Cancel'),\n 'show_back_button': True,\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = ChangePasswordForm(request.POST, user=request.user)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n user = request.user\n if not user.check_password(form.cleaned_data['old_password']):\n messages.error(request, _('Password was not changed! You typed your old password in incorrectly, please try again.'), extra_tags='alert alert-warning')\n else:\n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n update_session_auth_hash(request, request.user)\n # redirect to a new URL:\n messages.success(request, _('Your password was changed.'), extra_tags='alert alert-success')\n form = ChangePasswordForm(user=request.user)\n context.update({'form': form})\n return render(request, 'change_password_form.html', context)\n\n\n return render(request, 'change_password_form.html', context)", "def forgot_password(self, password=None, verification_code=None):\n log.info(\"Forgot password request for user : \" + self.__username)\n path = 'forgotpassword'\n forgot_password_info = {\n 'user_name': self.__username,\n \"password\": password,\n \"verification_code\": verification_code\n }\n forgot_password_url = serverconfig.HOST + path\n try:\n log.debug(\"Forgot password request url : \" + forgot_password_url)\n response = requests.put(url=forgot_password_url,\n data=json.dumps(forgot_password_info),\n headers=self.__request_header,\n verify=configmanager.CERT_FILE)\n log.debug(\"Forgot password response : \" + response.text)\n response.raise_for_status()\n except requests.exceptions.SSLError:\n raise SSLError\n except requests.exceptions.ConnectionError:\n raise NetworkError\n except Exception:\n raise Exception(response.text)\n log.info(\"Changed password successfully.\")\n return True", "def ask_password_reset(request):\n output_data = {}\n\n # Here we do not send a JSON answer based on success or failure\n # in order to prevent attackers from knowing if email exists in db or not.\n\n if request.method == 'POST':\n\n email = request.POST.get('email')\n\n if not email:\n output_data['error_code'] = '1'\n output_data['error_details'] = errors_for_dev['1']\n return JsonResponse(\n output_data,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n email = email.lower()\n\n try:\n user = User.objects.get(email=email)\n except exceptions.ObjectDoesNotExist:\n return JsonResponse(output_data)\n\n signer = TimestampSigner()\n timestamped_id = signer.sign(user.id)\n\n password_reset_url = \"%s%s\" % (\n settings.SITE_BASE_URL,\n reverse(set_new_password, args=(timestamped_id,))\n )\n\n send_password_reset_email(email, password_reset_url)\n\n return JsonResponse(output_data)\n\n else:\n\n output_data['error_code'] = '8'\n output_data['error_details'] = errors_for_dev['8']\n return JsonResponse(\n output_data,\n status=status.HTTP_400_BAD_REQUEST\n )", "def password_reset_confirm(request, uidb36=None, token=None,\n template_name='gallery/password_reset_confirm.html',\n token_generator=default_token_generator,\n set_password_form=SetPasswordForm,\n post_reset_redirect=None):\n assert uidb36 is not None and token is not None # checked by URLconf\n if post_reset_redirect is None:\n post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')\n try:\n uid_int = base36_to_int(uidb36)\n except ValueError:\n raise HttpResponseNotFound\n\n user = get_object_or_404(authmodels.User, id=uid_int)\n context_instance = RequestContext(request)\n\n if token_generator.check_token(user, token):\n context_instance['validlink'] = True\n if request.method == 'POST':\n form = set_password_form(user, request.POST)\n if form.is_valid():\n # we can't use form.save b/c that will update the p/w on the\n # model object, we need to do it in LDAP\n if settings.USE_LDAP:\n ldapper = get_ldap_connection()\n dn = get_user_dn(user.username)\n new_password = request.POST.get('new_password1')\n ldapper.passwd_s(dn, None, new_password)\n ldapper.unbind_s()\n request.notifications.add(_('Password change successful.'))\n else:\n form.save()\n return HttpResponseRedirect(post_reset_redirect)\n else:\n form = set_password_form(None)\n else:\n context_instance['validlink'] = False\n form = None\n context_instance['form'] = form\n return render_to_response(template_name, context_instance=context_instance)", "def password_reset(request):\n try:\n with transaction.atomic():\n try:\n data = request.data\n data = validations_utils.email_validation(data) # Validates email id, it returns lower-cased email in data.\n user = validations_utils.user_validation_with_email(data['email'])\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n current_site = get_current_site(request)\n domain = current_site.domain\n key = utils.create_reset_password_key(user.email)\n utils.send_reset_password_mail(user, key, domain) # Sends an email for resetting the password.\n return Response(messages.PASSWORD_RESET_LINK_SENT, status=status.HTTP_200_OK)\n except IntegrityError:\n return Response(messages.CAN_NOT_RESET_PASSWORD, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def passwdForgot_create(self, data):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['email'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# Look for the thrower by email\n\t\toThrower = Thrower.get(data['email'], index='email', limit=1)\n\t\tif not oThrower:\n\t\t\treturn Services.Effect(True)\n\n\t\t# Is there already a key in the thrower?\n\t\tif 'forgot' in oThrower and 'regenerate' not in data:\n\n\t\t\t# Is it not expired?\n\t\t\tif oThrower['forgot']['expires'] > int(time()):\n\t\t\t\treturn Services.Effect(True)\n\n\t\t# Update the thrower with a timestamp (for expiry) and the key\n\t\tsKey = StrHelper.random(32, '_0x')\n\t\toThrower['forgot'] = {\n\t\t\t\"expires\": int(time()) + 300,\n\t\t\t\"key\": sKey\n\t\t}\n\t\tif not oThrower.save(changes=False):\n\t\t\treturn Services.Effect(error=1103)\n\n\t\t# Get the domain config\n\t\tdConf = Conf.get(\"domain\")\n\n\t\t# Forgot email template variables\n\t\tdTpl = {\n\t\t\t\"key\": sKey,\n\t\t\t\"url\": \"%s://%s/#forgot=%s\" % (\n\t\t\t\tdConf['protocol'],\n\t\t\t\tdConf['primary'],\n\t\t\t\tsKey\n\t\t\t)\n\t\t}\n\n\t\t# Email the user the key\n\t\toEffect = Services.create('communications', 'email', {\n\t\t\t\"_internal_\": Services.internalKey(),\n\t\t\t\"html_body\": Templates.generate('email/forgot.html', dTpl, oThrower['locale']),\n\t\t\t\"subject\": Templates.generate('email/forgot_subject.txt', {}, oThrower['locale']),\n\t\t\t\"to\": data['email'],\n\t\t})\n\t\tif oEffect.errorExists():\n\t\t\treturn oEffect\n\n\t\t# Return OK\n\t\treturn Services.Effect(True)", "def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))", "def password_reset_token_created(sender, reset_password_token, *args, **kwargs):\n # send an e-mail to the user\n context = {\n 'current_user': reset_password_token.user,\n 'username': reset_password_token.user.username,\n 'email': reset_password_token.user.email,\n # ToDo: The URL can (and should) be constructed using pythons built-in `reverse` method.\n 'reset_password_url': \"http://some_url/reset/?token={token}\".format(token=reset_password_token.key)\n }\n\n # render email text\n email_html_message = render_to_string('email/user_reset_password.html', context)\n email_plaintext_message = render_to_string('email/user_reset_password.txt', context)\n\n msg = EmailMultiAlternatives(\n # title:\n \"Password Reset for {title}\".format(title=\"Some website title\"),\n # message:\n email_plaintext_message,\n # from:\n \"noreply@somehost.local\",\n # to:\n [reset_password_token.user.email]\n )\n msg.attach_alternative(email_html_message, \"text/html\")\n msg.send()", "def reset_password(token):\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n user = User.verify_reset_password_token(token)\n if not user:\n return redirect(url_for('main.index'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user.set_password(form.password.data)\n user.email_confirmed = True\n db.session.commit()\n return render_template(\n 'successful_pass_reset.html', title=\"Password Reset\")\n return render_template('reset_password.html', title=\"Password Reset\",\n form=form), 417", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def send_reset_email(user, domain_override=None,\n subject_template_name='registration/password_reset_request_subject.txt',\n email_template_name=None, use_https=False,\n token_generator=default_token_generator, from_email=None, request=None,\n html_email_template_name='registration/password_reset_email.html', extra_email_context=None):\n if user.first_name != \"\":\n user_name = user.first_name.title()\n else:\n user_name = user.email\n\n context = {\n 'email': user.email,\n 'user_name': user_name,\n 'domain': settings.BASE_URL,\n 'site_name': \"Clubby\",\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': 'https' if use_https else 'http',\n }\n send_mail(subject_template_name, email_template_name, context, from_email, user.email,\n html_email_template_name=html_email_template_name)", "def change_my_password():\n form = ChangePassword()\n if request.method == 'GET':\n return render_template('changemypassword.html', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n old_password = form.password.data\n new_password_hash = generate_password_hash(form.password1.data)\n account = db.check_item(\"username\", username)\n if account is not None:\n if check_password_hash(str(account['password_hash']), old_password):\n db.update_password_username(username, new_password_hash)\n flash('Your password has been changed')\n return redirect(url_for('login'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n return render_template('changemypassword.html', form=form)", "def change_password_user():\n\n form = ChangePasswordForm(request.form)\n\n if form.validate_on_submit():\n\n if not request.form['old_password'] or request.form['old_password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n\n hashed_password = user_manager.hash_password(request.form['password'])\n\n # Modificamos el password del usuario\n current_user.password = hashed_password\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error modifying password of user, make sure username and email are unique','error')\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n else:\n flash('Congratulations, update your password!','success')\n return redirect(url_for('user_ksat.show_user'))\n\n\n return render_template('user/change_password_user.html', title='Change Password', form=form)", "def reset_password(token):\n\n if not current_user.is_anonymous():\n return redirect(url_for(\"forum.index\"))\n\n form = ResetPasswordForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n expired, invalid, data = user.verify_reset_token(form.token.data)\n\n if invalid:\n flash((\"Your password token is invalid.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if expired:\n flash((\"Your password is expired.\"), \"danger\")\n return redirect(url_for(\"auth.forgot_password\"))\n\n if user and data:\n user.password = form.password.data\n user.save()\n flash((\"Your password has been updated.\"), \"success\")\n return redirect(url_for(\"auth.login\"))\n\n form.token.data = token\n return render_template(\"auth/reset_password.html\", form=form)", "def action_reset_password(self):\n # prepare reset password signup\n create_mode = bool(self.env.context.get('create_user'))\n\n # no time limit for initial invitation, only for reset password\n expiration = False if create_mode else now(days=+1)\n\n self.mapped('partner_id').signup_prepare(signup_type=\"reset\", expiration=expiration)\n\n # send email to users with their signup url\n template = False\n if create_mode:\n try:\n template = self.env.ref('loyalty.set_password_email', raise_if_not_found=False)\n except ValueError:\n pass\n if not template:\n template = self.env.ref('loyalty.reset_password_email')\n assert template._name == 'mail.template'\n\n template_values = {\n 'email_to': '${object.email|safe}',\n 'email_cc': False,\n 'auto_delete': True,\n 'partner_to': False,\n 'scheduled_date': False,\n }\n template.write(template_values)\n\n for user in self:\n if not user.email:\n raise UserError(_(\"Cannot send email: user %s has no email address.\") % user.name)\n with self.env.cr.savepoint():\n template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)\n _logger.info(\"Password reset email sent for user <%s> to <%s>\", user.login, user.email)", "def change_password(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\tform = AdminPasswordChangeForm(user=request.user, data=request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\tcontext_dict[\"message\"] = \"Password changed successfully\"\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=\"\",\n\t\t\t\tactivity_type=\"Changed password\"\n\t\t\t)\n\t\t\thistory.save()\n\t\telse:\n\t\t\tcontext_dict[\"message\"] = \"Password not changed\"\n\treturn render(request, \"changePassword.html\", context_dict)", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Updated password!')\n return redirect('profile')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'accounts/forms.html', {\n 'form': form\n })", "def save(self, domain_override=None,\r\n subject_template_name='registration/password_reset_subject.txt',\r\n email_template_name='registration/password_reset_email.html',\r\n use_https=False, token_generator=default_token_generator,\r\n from_email=None, request=None):\r\n from django.core.mail import send_mail\r\n UserModel = get_user_model()\r\n email = self.cleaned_data[\"email\"]\r\n username = self.cleaned_data[\"username\"]\r\n user = User.objects.get(username__exact=username)\r\n\r\n if user.is_active and user.has_usable_password():\r\n # Make sure that no email is sent to a user that actually has\r\n # a password marked as unusable\r\n if not domain_override:\r\n current_site = get_current_site(request)\r\n site_name = current_site.name\r\n domain = current_site.domain\r\n else:\r\n site_name = domain = domain_override\r\n c = {\r\n 'email': user.email,\r\n 'domain': domain,\r\n 'site_name': site_name,\r\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\r\n 'user': user,\r\n 'token': token_generator.make_token(user),\r\n 'protocol': 'https' if use_https else 'http',\r\n }\r\n subject = loader.render_to_string(subject_template_name, c)\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n email = loader.render_to_string(email_template_name, c)\r\n send_mail(subject, email, from_email, [user.email])", "def post(self, request):\n import pdb\n pdb.set_trace()\n form = ConfirmPasswordForm(request.POST)\n token = request.GET.get('token')\n if not token:\n raise Http404('Tocken not found.')\n if not form.is_valid():\n import pdb\n pdb.set_trace()\n return render(request, 'user_registrations/set_password.html', {'form': form, 'token': token, 'errors': form.errors})\n token_obj = PasswordResetTokens.objects.filter(token=token)\n if not token_obj:\n raise Http404('Fake token supplied.')\n password_1 = form.cleaned_data.get('password_1')\n user = token_obj[0].user\n user.set_password(password_1)\n user.save()\n token_obj[0].delete()\n return HttpResponseRedirect(reverse('login'))", "def send_restore_password_email(user_pk):\n user = User.objects.get(pk=user_pk)\n type = 'restore_password'\n token = token_generation(user, type)\n subject = 'Update your password'\n from_email = 'Facebook <Facebook.com>'\n content = render_to_string(\n 'users/restore_password.html', {'token': token, 'user': user})\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach_alternative(content, 'text/html')\n msg.send()", "def requestPwdReminder(self, email=None, acctName=None):\n assert acctName or email\n assert not (acctName and email)\n data = {}\n if email is not None:\n data['email'] = email\n else:\n data['accountName'] = acctName\n return self.talk('forgotPassword', data)", "def password_change_view(request):\n extra_context = {'title': _('Current user password change')}\n\n if request.user.user_options.block_password_change:\n messages.error(\n request, _(\n 'Changing the password is not allowed for this account.'\n )\n )\n return HttpResponseRedirect(reverse(settings.HOME_VIEW))\n\n return password_change(\n request, extra_context=extra_context,\n template_name='appearance/generic_form.html',\n post_change_redirect=reverse('authentication:password_change_done'),\n )", "def validate_password_reset_link(request, token):\n\n if request.method == \"GET\":\n passwordResetTokenGenerator = PasswordResetTokenGenerator()\n id = PasswordResetTokenGenerator.get_token_value(passwordResetTokenGenerator, token)\n\n if id != None:\n id = int(id)\n\n if User.objects.filter(id = id).exists():\n user = User.objects.get(id = id)\n request.session['user'] = user.username\n\n return render(request, 'app/referee/change_forgot_password.html', {\n 'title':'Change Password',\n 'user': user.username\n })\n else: # the user is invalid\n return redirect(reverse(URL_BAD_REQUEST))\n else: # either the link is expired or invalid\n return redirect(reverse(URL_BAD_REQUEST))\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def forgot(request):\n assert isinstance(request, HttpRequest) \n return HttpResponse(\"<ul><li><a href='/contact'> you need to contact your librarian to reset your password </a></li><li><a href='/'> Return to home to login or register</a></li></ul>\", content_type=\"text/html\"\n )", "def test_sendPasswordResetEmail(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_password_reset_email()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"jjones@yahoo.com\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Password Reset'\n assert 'To reset your password, please paste the below link into'\\\n ' your browser' in msg.body", "def reset_password(request):\r\n params = request.params\r\n\r\n # now also load the password info\r\n current = params.get('current_password', None)\r\n new = params.get('new_password', None)\r\n\r\n # if we don't have any password info, try a json_body in case it's a json\r\n # POST\r\n if current is None and new is None:\r\n params = request.json_body\r\n current = params.get('current_password', None)\r\n new = params.get('new_password', None)\r\n\r\n user_acct = request.user\r\n\r\n if not UserMgr.acceptable_password(new):\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'error': \"Come on, let's try a real password this time\"\r\n })\r\n\r\n # before we change the password, let's verify it\r\n if user_acct.validate_password(current):\r\n # we're good to change it\r\n user_acct.password = new\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'message': \"Password changed\",\r\n })\r\n else:\r\n request.response.status_int = 403\r\n return _api_response(request, {\r\n 'username': user_acct.username,\r\n 'error': \"There was a typo somewhere. Please check your request\"\r\n })", "def test_forgot_password_link_in_sign_in_page_redirects_to_reset_password_page(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Sign In\").click()\n contents = self.browser.find_element_by_class_name(\"sub-title\")\n self.assertTrue(\"Sign In\" in contents.text, \"Redirected page's subtitle did not contain 'Sign In'\")\n\n self.browser.find_element_by_link_text(\"Forgot your Password?\").click()\n contents = self.browser.find_element_by_class_name(\"sub-title\")\n self.assertTrue(\"Reset Password\" in contents.text,\n \"Redirected page's subtitle did not contain 'Reset Password'\")", "def send_password_reset_mail(email, token):\n print(\"reset password\")\n url = f\"{settings.SITE_URL}/reset-password?email={email}&token={token}\"\n SUBJECT = \"Reset Password Request\"\n # The HTML body of the email.\n body = \"\"\"\n <html>\n <head></head>\n <body>\n <p>Here is your password reset link:</p>\n <p><a href='{0}'>{1}</a></p>\n </body>\n </html>\n \"\"\".format(url, url)\n send_mail(SUBJECT, body, email)", "def password_reset_confirm(request, uidb64=None, token=None,\n template_name='registration/password_reset_confirm.html',\n token_generator=default_token_generator,\n set_password_form=SetPasswordForm,\n post_reset_redirect=None,\n current_app=None, extra_context=None):\n UserModel = get_user_model()\n assert uidb64 is not None and token is not None # checked by URLconf\n if post_reset_redirect is None:\n post_reset_redirect = reverse('session:password_reset_complete')\n else:\n post_reset_redirect = resolve_url(post_reset_redirect)\n try:\n # urlsafe_base64_decode() decodes to bytestring on Python 3\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = UserModel._default_manager.get(pk=uid)\n except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):\n user = None\n\n if user is not None and token_generator.check_token(user, token):\n validlink = True\n title = _('Enter new password')\n if request.method == 'POST':\n form = set_password_form(user, request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(post_reset_redirect)\n else:\n form = set_password_form(user)\n else:\n validlink = False\n form = None\n title = _('Password reset unsuccessful')\n context = {\n 'form': form,\n 'title': title,\n 'validlink': validlink,\n }\n if extra_context is not None:\n context.update(extra_context)\n\n if current_app is not None:\n request.current_app = current_app\n\n return TemplateResponse(request, template_name, context)", "def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )", "def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)", "def forgottenInfo():\n\n for accountNo, userDetail in databaseOfAccounts.items():\n try:\n forgottenInfo = int(input('Forgotten account number enter 1.\\nForgotten password enter 2.\\n'))\n except ValueError:\n print('Input was not a number')\n forgottenInfo()\n if (forgottenInfo == 1):\n print('To validate it is you.')\n email= input('Please enter your email?\\n')\n lastName = input('Please enter your last name?\\n')\n password = input('please enter your password?\\n')\n \n if (email == userDetail[0]):\n if (lastName == userDetail[2]):\n if(password == userDetail[3]):\n print('Your Account Number is %d.\\nPlease keep it safe.' %accountNo)\n login()\n\n elif (forgottenInfo == 2):\n print('To validate it is you.')\n accountNumber = int(input('Please enter your acount number?'))\n email= input('Please enter your email?\\n')\n lastName = input('Please enter your last name?\\n')\n if (accountNumber == accountNo):\n if (email == userDetail[0]):\n if (lastName == userDetail[2]):\n print('You are validated.\\nPlease enter your new password')\n newPass = input('Please enter your new password?\\n')\n reNewPass = input('Please repeat your new password?\\n')\n if (newPass == reNewPass):\n userDetail[3] = newPass\n print('Your password has be reset. Please keep it safe.') \n login()\n\n else:\n print('Invalid option')\n forgottenInfo()", "def reset_password():\n if request.method == 'POST':\n email = request.json.get('email')\n new_password = request.json.get('new_password')\n if len(new_password.strip()) < 4:\n return make_response(jsonify(\n {'message': 'password too short'}\n )), 409\n user = User.query.filter_by(email=email).first()\n if user:\n user.password_hash = generate_password_hash(new_password)\n user.save_user()\n return make_response(jsonify(\n {\n 'message': 'password reset successful',\n 'your new password': new_password\n }\n )), 201\n return make_response(jsonify(\n {'message': 'Wrong email, please provide a valid email and try again'}\n )), 401\n return None", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('view-profile', args=[request.user.id]))\n else:\n print \"form not valid\"\n else:\n form = PasswordChangeForm(user=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('change-password'),\n 'title' : \"Change Password\"\n })", "def change_password():\n\n from .forms import ChangeCredentialsForm\n\n username = current_user.get_id()\n form = ChangeCredentialsForm(request.form)\n\n if form.validate_on_submit():\n logger.info(username + \" wants to change something.\")\n if request.form['username'] != username:\n logger.info(\"User \" + username + \" wants to change the username.\")\n app.rename_user(username, request.form['username'],\n request.form['newPassword1'])\n else:\n logger.info(\"Changing password of user \" + username + \".\")\n app.add_user_and_password(request.form['username'],\n request.form['newPassword1'])\n\n logger.info(\"Successfully changed credentials of \"\n + username + '.')\n return redirect(url_for('home'))\n\n else:\n return render_template('change-credentials.html',\n form=form,\n username=username)", "def action_wx_user_reset_password(self):\n # prepare reset password signup\n create_mode = bool(self.env.context.get('create_user'))\n\n # no time limit for initial invitation, only for reset password\n expiration = False if create_mode else now(days=+1)\n\n self.mapped('partner_id').signup_prepare(signup_type=\"reset\", expiration=expiration)\n\n # send email to users with their signup url\n template = False\n if create_mode:\n try:\n template = self.env.ref('auth_signup.set_password_email', raise_if_not_found=False)\n except ValueError:\n pass\n if not template:\n template = self.env.ref('auth_signup.reset_password_email')\n assert template._name == 'mail.template'\n\n template_values = {\n 'email_to': '${object.email|safe}',\n 'email_cc': False,\n 'auto_delete': True,\n 'partner_to': False,\n 'scheduled_date': False,\n }\n template.write(template_values)\n\n for user in self:\n with self.env.cr.savepoint():\n if not user.wx_user_id:\n raise UserError(\"用户没有绑定微信,不能发送微信重置密码\")\n logging.info(\"密码重置OK.\")\n self.wx_reset_password(user)\n # template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)\n _logger.info(\"Password reset email sent for user <%s> to <%s>\", user.login, user.email)", "def do_password(token):\n password_reset_token = token\n requested_password = request.form['password']\n requested_password_repeat = request.form['passwordRepeat']\n\n # Only pending states can be used.\n target_user = User.query.filter_by(\n password_reset_token=password_reset_token).first()\n\n if target_user is None:\n return Response(render_template('password/failure.html',\n message=('Unbekannter token. Stellen '\n 'sie sicher, dass Sie nicht mehrfach '\n 'eine Passwortzurücksetzung '\n 'angefordert haben und nehmen sie '\n 'immer die aktuelle.')))\n\n if not target_user.state == StateType.PASSWORT_RESET_PENDING:\n return Response(render_template('password/failure.html',\n message='User has no pending password reset.'))\n\n if not requested_password == requested_password_repeat:\n return Response(render_template('password/request.html',\n passwordResetToken=token,\n message='Passwörter stimmen nicht überein.'))\n\n if not target_user.check_password_length(requested_password):\n return Response(render_template('password/request.html',\n passwordResetToken=token,\n message=('Passwort zu kurz. Das '\n 'Passwort muss mindestens {} '\n 'Zeichen haben').format(PASSWORD_MIN_LENGTH)))\n\n if not target_user.check_password_format(requested_password):\n return Response(render_template('password/request.html',\n passwordResetToken=token,\n message='Falsches Passwort Format. Das '\n 'Passwort muss mindestens eine Ziffer enthalten.'))\n\n target_user.set_password(requested_password)\n target_user.state = StateType.ACTIVE\n db.session.commit()\n\n return Response(render_template('password/success.html'))", "def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)", "def password_reset(*args, **kwargs):\n kwargs['password_reset_form'] = CustomPasswordResetForm\n return django_password_reset(*args, **kwargs)", "def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))", "def send_reset_email(staff):\n token = staff.get_reset_token()\n msg = Message('Password Reset Request', \n sender='NoReplyBloodBank@my.unt.edu', \n recipients=[staff.email])\n msg.body = f\"\"\"To reset your password, visit the following link:\n{url_for('reset_token', token=token, _external=True)}\nIf you did not make this request, then simply record this email and no changes will be made.\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)" ]
[ "0.8209941", "0.7987478", "0.7894088", "0.7582648", "0.7481974", "0.744047", "0.7361025", "0.73203427", "0.7314185", "0.7305841", "0.73045814", "0.72920185", "0.72777385", "0.71073365", "0.7094137", "0.703783", "0.7018474", "0.7012398", "0.69860375", "0.6957978", "0.6949704", "0.68838483", "0.6866337", "0.68402207", "0.6823861", "0.68113786", "0.6800302", "0.6790208", "0.67831", "0.67782396", "0.6752748", "0.6740841", "0.6729263", "0.66978884", "0.66755813", "0.6656109", "0.66557646", "0.6632938", "0.6605264", "0.65909576", "0.657914", "0.65781873", "0.6511453", "0.6507156", "0.6506599", "0.649186", "0.6483807", "0.6472642", "0.64686507", "0.64678127", "0.6455622", "0.6450114", "0.64497906", "0.6447819", "0.6439584", "0.64247566", "0.6421619", "0.6414159", "0.6399165", "0.6376913", "0.637331", "0.6371223", "0.63663495", "0.634945", "0.63367534", "0.63325787", "0.63263035", "0.63241345", "0.6311479", "0.6303956", "0.6300649", "0.6291297", "0.62827003", "0.62774587", "0.6262563", "0.62565297", "0.62560415", "0.62552327", "0.62547743", "0.62527925", "0.62488663", "0.62345016", "0.6226009", "0.6224681", "0.62152684", "0.6198875", "0.6198869", "0.6194424", "0.618839", "0.61833256", "0.618278", "0.6175458", "0.6154241", "0.6152636", "0.61461335", "0.61410064", "0.6129786", "0.61250806", "0.6122659", "0.6100668" ]
0.7435538
6
Allows the user to change his/her password.
def recover_req(request): query_string = request.META['QUERY_STRING'] query_args = parse_qs(get_encrypted_query_string(query_string, settings.SECRET_KEY)) template_map = dict(META = request.META, enc = request.GET['enc'], hmac = request.GET['hmac'], root = settings.ROOT_URL, media = settings.MEDIA_URL) if request.POST and 'user' in query_args: username = query_args['user'][0] user = User.objects.get(username = username) pw1 = request.POST['pw1'] pw2 = request.POST['pw2'] if pw1 != pw2: template_map['mismatch'] = True elif not is_signed_query_string_valid(settings.SECRET_KEY + user.password, query_string): template_map['error'] = True else: user.set_password(pw1) user.save() user = authenticate(username = username, password = pw1) login(request, user) return HttpResponseRedirect('..') return render_to_response('registration/recover.html', template_map)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def ChangePassword():\n if self.ChangePassword():\n # Update successful, return to main screen\n self.confirm_pass.set('')\n self.password.set('')\n Return()\n else:\n return", "def change_password(change_account):\n change_data(change_account, changed_data='password')", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def change_user_password(self, instance, user, new_pass):\n return instance.change_user_password(user, new_pass)", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")", "def change_user_password(self, user, new_pass):\n return self.update(user, password=new_pass)", "def change_password(request):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tcontext_dict = {}\n\tif request.method == 'POST':\n\t\tform = AdminPasswordChangeForm(user=request.user, data=request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\tcontext_dict[\"message\"] = \"Password changed successfully\"\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=\"\",\n\t\t\t\tactivity_type=\"Changed password\"\n\t\t\t)\n\t\t\thistory.save()\n\t\telse:\n\t\t\tcontext_dict[\"message\"] = \"Password not changed\"\n\treturn render(request, \"changePassword.html\", context_dict)", "def change_my_password():\n form = ChangePassword()\n if request.method == 'GET':\n return render_template('changemypassword.html', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n old_password = form.password.data\n new_password_hash = generate_password_hash(form.password1.data)\n account = db.check_item(\"username\", username)\n if account is not None:\n if check_password_hash(str(account['password_hash']), old_password):\n db.update_password_username(username, new_password_hash)\n flash('Your password has been changed')\n return redirect(url_for('login'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n return render_template('changemypassword.html', form=form)", "def change_password():\n\n from .forms import ChangeCredentialsForm\n\n username = current_user.get_id()\n form = ChangeCredentialsForm(request.form)\n\n if form.validate_on_submit():\n logger.info(username + \" wants to change something.\")\n if request.form['username'] != username:\n logger.info(\"User \" + username + \" wants to change the username.\")\n app.rename_user(username, request.form['username'],\n request.form['newPassword1'])\n else:\n logger.info(\"Changing password of user \" + username + \".\")\n app.add_user_and_password(request.form['username'],\n request.form['newPassword1'])\n\n logger.info(\"Successfully changed credentials of \"\n + username + '.')\n return redirect(url_for('home'))\n\n else:\n return render_template('change-credentials.html',\n form=form,\n username=username)", "def change_password():\n form = PasswordResetForm()\n\n if form.validate_on_submit():\n # Update user\n current_user.password = crypto_manager.hash(form.password.data)\n\n try:\n correct = True\n db.session.commit()\n\n flash(_('Password updated correctly'), 'success')\n\n return redirect(url_for('admin.profile_edit'))\n\n except Exception:\n correct = False\n current_app.logger.exception('Failed to update user password')\n\n flash(_('Error updating password, contact an administrator'), 'error')\n\n return render_template('admin/profile/change_password.html', form=form)\n\n finally:\n if not correct:\n db.session.rollback()\n\n return render_template('admin/profile/change_password.html', form=form)", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n messages.success(request, 'Updated password!')\n return redirect('profile')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'accounts/forms.html', {\n 'form': form\n })", "def edit_password():\n form = EditPasswordForm()\n\n if request.method == 'POST' and form.validate():\n\n user = Users.query.filter_by(id=current_user.id).first()\n\n if not user.check_password(form.old_password.data):\n flash('Incorrect old password', 'warning')\n return redirect(url_for('auth.edit_password'))\n\n user.set_password(form.new_password.data)\n\n try:\n db.session.commit()\n flash('Your password has been changed.', 'success')\n except IntegrityError:\n db.session.rollback()\n flash('ERROR! Unable to change your password, please check your details are correct and try again.',\n 'warning')\n\n return redirect(url_for('auth.account'))\n\n return render_template('auth/edit_account/edit_password.html', form=form)", "def doChangeUser(self, login, password, **kwargs):\n IUserChanger(self.context).setPassword(password)", "def change_password(self, password, newpassword):\n cred = {\"newpasswd\": newpassword, \"passwd\": password}\n return self.put(\"passwd\", cred)", "def setpassword(self, pwd):\n pass", "def change_pwd(self):\r\n if self.field_pwd.text() == \"\":\r\n self.label_chg_pwd.setText(\"Password cannot be empty\")\r\n return None\r\n self.encryptor.set_key_from_password(self.field_pwd.text())\r\n self.label_chg_pwd.setText(\"Password typed\")\r\n self.label_chg_pwd.setStyleSheet(\"color:#01ac2d\")\r\n self.label_chg_key.clear()\r\n self.field_key.clear()\r\n QtWidgets.QMessageBox.information(self, \"Password Change\", \r\n (\"Your password has been successfully changed.\\n\\n\"\r\n \"You can now encrypt / decrypt files.\"))", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def change_password(self, old_password, new_password):\n data = dict(password = new_password)\n data['old-password'] = old_password\n return self.app.post('/_changePassword', data = data, follow_redirects = True)", "async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}", "def password_change_view(request):\n extra_context = {'title': _('Current user password change')}\n\n if request.user.user_options.block_password_change:\n messages.error(\n request, _(\n 'Changing the password is not allowed for this account.'\n )\n )\n return HttpResponseRedirect(reverse(settings.HOME_VIEW))\n\n return password_change(\n request, extra_context=extra_context,\n template_name='appearance/generic_form.html',\n post_change_redirect=reverse('authentication:password_change_done'),\n )", "def change_user_password():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n user = get_user_by_id(user_id)\n if request.method == 'POST':\n old_password = request.form['old-password']\n new_password = request.form['new-password']\n confirm_password = request.form['confirm-password']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, user_id):\n is_password_updated = update_user_password(user_id, old_password, new_password, confirm_password)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if is_password_updated == \"OK\":\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=\"Password successfully updated!\", today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=is_password_updated, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def set_admin_password(self, instance, new_pass):\n pass", "def change_user():\n _ = db.change_password(auth.username(), generate_password_hash(request.json['password']))\n return str(_)", "def password_change(request):\n status = 200\n pform = ChangePasswordForm(request.user, request.POST)\n\n if pform.is_valid():\n status = pform.save(request)\n if status == 200:\n messages.success(request, _('Your password was successfully changed'))\n return redirect('profile')\n\n return render(request, 'gui/profile/profile_password_form.html', {\n 'user': request.user,\n 'pform': pform,\n }, status=status)", "def change_password():\n\n if request.method == 'POST':\n current_password = request.form['current_password']\n new_password = request.form['new_password']\n\n # If current password is correct, update and store the new hash\n if current_user.check_password_hash(current_password):\n current_user.generate_password_hash(new_password)\n else:\n return 'Current password you entered is wrong! Please try again!'\n\n # Commit the changes we made in the object to the database\n success, reason = commit_transaction()\n if not success:\n return f'Error occurred while changing your password - {reason}!'\n\n log(f'<code>{current_user.name}</code> has updated their password!</code>')\n\n # Log the user out, and redirect to login page\n logout_user()\n return redirect(url_for('login'))\n return render_template('change_password.html')", "def changepassword():\n if request.method == \"POST\":\n\n # Ensure password was submitted\n if not request.form.get(\"newpassword\"):\n return apology(\"must provide password\", 400)\n # Ensure passwords match\n elif request.form.get(\"newpassword\") != request.form.get(\"confirmation\"):\n return apology(\"passwords do not match\", 400)\n elif request.form.get(\"newpassword\").isalpha() == True:\n return apology(\"password must contain at least one numeric symbol\")\n\n # encrypt new password\n hash = generate_password_hash(request.form.get(\"newpassword\"))\n print(hash)\n # update user's password in database\n result = db.execute(\"UPDATE users SET hash = :hash WHERE id = :id\", hash=hash, id = session[\"user_id\"])\n\n if not result:\n return apology(\"password not available\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"changepass.html\")", "def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))", "def _change_password(self, user, password):\r\n user.set_password(password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def set_new_password(self, new_password):\n self.password = new_password", "def change_password(self, user):\n if not self.is_valid():\n return None\n password = self.clean_password2()\n user.set_password(password)\n user.save()\n return user", "def changeUserPassword(self, name, password):\n self._client.changeUserPassword(name, password)", "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()", "def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)", "def change_password(self, username=None, user_data=None):\n if not username:\n raise ValueError(\"Please provide a username.\")\n\n if not user_data:\n raise ValueError(\"Please provide correct user information.\")\n\n user_data = self._to_string(data=user_data)\n uri = 'json/users/' + username + '?_action=changePassword'\n data = self._post(uri=uri, data=user_data, headers=self.headers)\n if data.status_code == 200:\n return True\n else:\n return False", "def ChangePassword(self):\n \n username = self.username.get().lstrip().rstrip()\n if not username:\n messagebox.showerror('Error', 'No username entered.')\n return False\n \n if not self.PasswordMatch():\n messagebox.showerror('Error', 'Password fields do not match.')\n return False\n password = self.password.get().lstrip().rstrip()\n \n for user in self.user_db:\n if user['User'] == username:\n if user['Password'] == password:\n messagebox.showerror('Error',\n 'New password unchanged from the ' \\\n 'old password.')\n return False\n user['Password'] = password\n messagebox.showinfo('Success!', 'Password updated!')\n return True\n \n messagebox.showerror('Error', f'{username} not found in database.')\n return False", "def change_password():\n\n if request.method == \"POST\":\n\n # Ensure current password is not empty\n if not request.form.get(\"current_password\"):\n return apology(\"must provide current password\", 400)\n\n # Query database for user_id\n rows = db.execute(\"SELECT hash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])\n\n # Ensure current password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"current_password\")):\n return apology(\"invalid password\", 400)\n\n # Ensure new password is not empty\n if not request.form.get(\"new_password\"):\n return apology(\"must provide new password\", 400)\n\n # Ensure new password confirmation is not empty\n elif not request.form.get(\"new_password_confirmation\"):\n return apology(\"must provide new password confirmation\", 400)\n\n # Ensure new password and confirmation match\n elif request.form.get(\"new_password\") != request.form.get(\"new_password_confirmation\"):\n return apology(\"new password and confirmation must match\", 400)\n\n # Update database\n hash = generate_password_hash(request.form.get(\"new_password\"))\n rows = db.execute(\"UPDATE users SET hash = :hash WHERE id = :user_id\", user_id=session[\"user_id\"], hash=hash)\n\n # Show flash\n flash(\"Password Changed!\")\n return redirect(\"/\")\n\n return render_template(\"change_password.html\")", "def update_password(self, username, password):\n self.update(('Password', password), username)", "def changepassword():\n try:\n if request.method == 'POST':\n # Makes sure the passwords match and that it meets complexity\n validate = check_pass(\n request.form['newpass'], request.form['connewpass'])\n if validate == \"Passed\":\n data = [request.form['newpass'], session[\n 'username'], request.form['oldpass']]\n with Database() as database:\n database.updateUserPassword(data)\n return redirect(url_for('profile', username=session['username']))\n else:\n flash(validate)\n return render_template('changepass.html')\n\n else:\n return render_template('changepass.html')\n\n except Exception as e:\n flash(\"Oops, something went wrong... Try again.\")\n return render_template('changepass.html')", "def change_password(request):\n\n form = ChangePasswordForm(user=request.user)\n context = {\n 'form': form,\n 'submit_button_text': _('Update password'),\n 'back_button_text': _('Cancel'),\n 'show_back_button': True,\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = ChangePasswordForm(request.POST, user=request.user)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n user = request.user\n if not user.check_password(form.cleaned_data['old_password']):\n messages.error(request, _('Password was not changed! You typed your old password in incorrectly, please try again.'), extra_tags='alert alert-warning')\n else:\n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n update_session_auth_hash(request, request.user)\n # redirect to a new URL:\n messages.success(request, _('Your password was changed.'), extra_tags='alert alert-success')\n form = ChangePasswordForm(user=request.user)\n context.update({'form': form})\n return render(request, 'change_password_form.html', context)\n\n\n return render(request, 'change_password_form.html', context)", "def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return", "def change_password(self, user, current_password, password):\n\n if not password:\n raise DoorstepError('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise DoorstepError('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1", "def change_user_password(self, user, new_pass):\n return self._user_manager.change_user_password(user, new_pass)", "def change_password(self, cr, uid, ids, old_passwd, new_passwd, context=None):\n res = {'flag':False,\n 'info':''}\n records = self.browse(cr, uid, ids, context=context)\n if records and len(records) == 1:\n mem_obj = records[0]\n if not mem_obj:\n res['info'] = u'获取会员信息失败!'\n if not mem_obj.m_normal:\n res['info'] = u'会员状态不可用!'\n else:\n if self.check(cr, uid, ids,old_passwd):\n self.set_password(cr, uid, ids,new_passwd)\n res['flag'] = True\n else:\n res['info'] = u'原密码输入错误!'\n \n return res", "def change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('view-profile', args=[request.user.id]))\n else:\n print \"form not valid\"\n else:\n form = PasswordChangeForm(user=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('change-password'),\n 'title' : \"Change Password\"\n })", "def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def set_password(self, password):\n self.cloudserver.change_password(password)", "def set_password(self, request, pk=None):\n user = User.objects.get(id=pk)\n serializer = PasswordSerializer(data=request.data)\n\n if serializer.is_valid():\n if not user.check_password(serializer.data.get('old_password')):\n return Response({'old_password': ['Wrong password.']},\n status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(serializer.data.get('new_password'))\n user.save()\n return Response({'status': 'password set'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def update_password(): \n \n form = PasswordForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n \n hashed_pw = bcrypt.hashpw(form.new_password.data.encode('utf-8'), bcrypt.gensalt())\n user = mongo.db.user.find_one({'username': session['username']})\n \n if bcrypt.checkpw(request.form['password'].encode('utf-8'), user['hashed_password']):\n mongo.db.user.find_one_and_update({'username': session['username']}, {'$set':{'hashed_password':hashed_pw}})\n \n flash(f'Password reset was successful, please login again.','success')\n return redirect(url_for('login'))\n \n return render_template('pages/settings.html', \n title='Password', \n form=form\n )", "def change_password(self, user, current_password, password):\n\n if not password:\n raise Exception('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise Exception('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def change(ctx, password, clear, new_password, remember):\n if clear and new_password:\n ctx.fail(\"--clear cannot be combined with --new-password.\")\n\n _init_session(ctx, password, False, prompt=\"Enter the current password\")\n\n session = ctx.obj[\"session\"]\n keys = ctx.obj[\"oath_keys\"]\n device_id = session.device_id\n\n if clear:\n session.unset_key()\n if device_id in keys:\n del keys[device_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Password cleared from YubiKey.\")\n else:\n if remember:\n try:\n keys.ensure_unlocked()\n except ValueError:\n raise CliFail(\n \"Failed to remember password, the keyring is locked or unavailable.\"\n )\n if not new_password:\n new_password = click_prompt(\n \"Enter the new password\", hide_input=True, confirmation_prompt=True\n )\n key = session.derive_key(new_password)\n if remember:\n keys.put_secret(device_id, key.hex())\n keys.write()\n click.echo(\"Password remembered.\")\n elif device_id in keys:\n del keys[device_id]\n keys.write()\n session.set_key(key)\n click.echo(\"Password updated.\")", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def password():\n\n if request.method == 'POST':\n print 'Changing password'\n # query for user's hash of password\n pw_hash = datastore.get_user_by_user_id(engine, session['user_id'])['hash']\n\n # check all boxes filled, old password is correct, new and confirmation match\n if not request.form.get('old') or not check_password_hash(pw_hash, request.form.get('old')):\n flash('Incorrect old password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') or not request.form.get('confirmation'):\n flash('Must confirm new password!', 'danger')\n return render_template('password.html')\n elif not request.form.get('new') == request.form.get('confirmation'):\n flash('New passwords don\\'t match!', 'danger')\n return render_template('password.html')\n\n # update hash in database\n datastore.update_password_hash(engine, session['user_id'], generate_password_hash(request.form.get('new')))\n\n # redirect to portfolio\n flash('Password changed!', 'info')\n print 'Password changed!'\n return redirect(url_for('index'))\n\n else:\n print 'Loading change password page'\n return render_template('password.html')", "def view_update_user(self, user, username, password):\r\n user.realm._checker.passwd(username, password, True)", "def changePassword():\n\n if request.method == \"GET\":\n\n #Query for the current user that is logged in.\n user = db.execute(\"SELECT username from users WHERE id = :id\", id=session['user_id'])\n\n\n return render_template(\"changePassword.html\", user=user)\n\n if request.method == \"POST\":\n\n #Query for the current user that is logged in and get the hash.\n new_pass = db.execute(\"SELECT username, hash from users WHERE id = :id\", id=session['user_id'])\n\n old_password = request.form.get(\"old_password\")\n password = request.form.get(\"password\")\n confirmation = request.form.get(\"confirmation\")\n\n #Check if the user entered an input\n if not password:\n return apology(\"Please enter a password\", 400)\n if not confirmation:\n return apology(\"Please enter a password confirmation\", 400)\n\n #Check if the password and the confirmation password is the same.\n if password==confirmation:\n hashpw = generate_password_hash(password)\n\n else:\n return apology(\"Password doesn't match\", 400)\n\n #Check if the entered old password is correct.\n if check_password_hash(new_pass[0]['hash'], old_password)==True:\n db.execute(\"UPDATE users SET hash = :hashpw WHERE id = :id\", hashpw=hashpw, id=session['user_id'])\n flash('You successfully changed your password!')\n else:\n return apology (\"Hash doesn't match\", 400)\n\n return redirect (\"/\")", "def changePassword(self, oldPassword, newPassword):\n if self.checkPassword(oldPassword):\n pswFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"adminPass.psw\")\n tempFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"adminPassTmp.psw\")\n newHash = (hashlib.sha1(str(newPassword).encode('utf-8')).hexdigest())\n\n f = open(tempFile, \"w+\")\n f.write(newHash)\n f.close()\n shutil.copyfile(tempFile, pswFile)\n os.remove(tempFile)\n return True\n else:\n return False", "async def password(self, ctx):\n pass", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def set_password(self, password):\n self.password = password", "def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))", "def save_model(self, request, obj, form, change):\n if change:\n obj.save()\n else:\n obj.set_password(obj.password)\n obj.save()", "def password_change_done(request):\n messages.success(\n request, _('Your password has been successfully changed.')\n )\n return redirect('common:current_user_details')", "def _request_pwd_change(self, nick):\n user = nick.split('!')[0]\n if ':new_master' in self.users and user == self.users[':master']:\n self.users[user]['authenticated_at'] = datetime.datetime.now()\n self.bot.client.send('PRIVMSG', user, ':Please change your \\\npassword as soon as possible using the change_password command!')", "def userPassword(self, password=None):\n\n\t\tdisplay = False\n\n\t\tif password is None:\n\t\t\tdisplay = True\n\t\t\tpassword = hlstr.generate_password(\n\t\t\t\t\t\t\t\tLMC.configuration.users.min_passwd_size)\n\t\telif password == '':\n\t\t\tlogging.warning(_(u'Setting an empty password for user {0}. '\n\t\t\t\t'This is dangerous and totally insecure!').format(\n\t\t\t\t\tstylize(ST_LOGIN, self.__login)))\n\n\t\twith self.lock:\n\t\t\tif self.__already_created:\n\t\t\t\tLicornEvent('user_pre_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\tprefix = '!' if self.__locked else ''\n\n\t\t\tif password == '':\n\t\t\t\tself.__userPassword = prefix\n\t\t\telse:\n\t\t\t\tself.__userPassword = '%s%s' % (prefix,\n\t\t\t\t\t\t\t\t\tself.backend.compute_password(password))\n\n\t\t\t# 3600*24 get us to the number of days since epoch.\n\t\t\tself.__shadowLastChange = int(time.time() / 86400)\n\n\t\t\tif self.__already_created:\n\t\t\t\tself.serialize()\n\t\t\t\tLicornEvent('user_post_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\t\tif self.__already_created:\n\t\t\t\t\t# don't forward this event on user creation, because we\n\t\t\t\t\t# already have the \"user_added\" for this case.\n\t\t\t\t\tLicornEvent('user_userPassword_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tif display:\n\t\t\t\tlogging.notice(_(u'Set password for user {0} to {1}.').format(\n\t\t\t\t\tstylize(ST_NAME, self.__login),\n\t\t\t\t\tstylize(ST_IMPORTANT, password)),\n\t\t\t\t\t# don't display the clear-text password in the daemon's log.\n\t\t\t\t\tto_local=False)\n\t\t\telse:\n\t\t\t\tif self.__already_created:\n\t\t\t\t\tlogging.notice(_(u'Changed password for user {0}.').format(\n\t\t\t\t\t\t\t\t\t\t\tstylize(ST_NAME, self.__login)))", "def update_user_password(context, params):\n\n user = User.objects.filter(id=params.get('id')).first()\n if not user:\n raise ValueError(\"user not found\")\n # user.edited_by = context.user\n if params.get('password'):\n user.set_password(params.get('password'))\n\n user.save()\n return user", "def change_password(self, user, old_password, new_password):\n\n if not user.check_password(old_password):\n raise InvalidPassword('The provided old password is incorrect.')\n\n user.set_password(new_password)\n user.save()\n\n return user", "def set_password(self, raw_password: str):\n self.new_password = raw_password", "def password_change(self, request):\n view_func, args, kwargs = resolve(self.change_password_path)\n\n assert issubclass(kwargs['password_change_form'],\n StrictPasswordChangeForm), (\n \"Use django_auth_policy StrictPasswordChangeForm for password \"\n \"changes.\")\n\n # Provide extra context to be used in the password_change template\n is_exp = request.session.get('password_is_expired', False)\n is_tmp = request.session.get('password_is_temporary', False)\n if not 'extra_context' in kwargs:\n kwargs['extra_context'] = {}\n kwargs['extra_context']['is_enforced'] = True\n kwargs['extra_context']['is_temporary'] = is_tmp\n kwargs['extra_context']['is_expired'] = is_exp\n return view_func(request, *args, **kwargs)", "def enter_password(self):", "def change_password(request):\n if not request.user.is_authenticated:\n return JsonResponse({}, status=401)\n\n try:\n body = json.loads(request.body)\n except (TypeError, json.decoder.JSONDecodeError):\n return JsonResponse({'error': 'Cannot parse request body'}, status=400)\n\n old_password = body.get('oldPassword')\n new_password = body.get('newPassword')\n\n if not old_password or not new_password:\n return JsonResponse({'error': 'Missing payload'}, status=400)\n\n if not request.user.check_password(old_password):\n return JsonResponse({'error': 'Incorrect old password'}, status=400)\n\n try:\n validate_password(new_password, user)\n except ValidationError as err:\n return JsonResponse({'error': err.messages[0]}, status=400)\n\n request.user.set_password(new_password)\n request.user.save()\n update_session_auth_hash(request, request.user)\n\n return JsonResponse({})", "def updatepassword():\n if request.method == \"POST\":\n\n password = request.form.get(\"password\")\n password2 = request.form.get(\"confirmation\")\n\n if not password:\n return apology(\"must provide password\", 400)\n\n elif not (password == password2):\n return apology(\"passwords must match\", 400)\n\n elif not password2:\n return apology(\"must confirm password\", 400)\n\n rows = db.execute(\n \"SELECT password FROM users WHERE id = ?\", (session_get_int(\"user_id\"), )).fetchall()\n\n if (check_password_hash(rows[0][\"password\"], password)):\n return apology(\"password cannot be the same as existing password\", 400)\n\n else:\n db.execute(\"UPDATE users SET password = ? WHERE id = ?\",\n (generate_password_hash(password), session_get_int(\"user_id\")))\n con.commit()\n\n return redirect(\"/profile\")\n else:\n return redirect(\"/profile\")", "def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))", "def update_password(self, new_password=None):\n\n self.password = generate_password_hash(new_password)\n\n if self.save(verbose=False):\n self.logger.warn('Updated password! %s' % self)\n else:\n raise AttributeError('Password update failed!')", "def change_password_user():\n\n form = ChangePasswordForm(request.form)\n\n if form.validate_on_submit():\n\n if not request.form['old_password'] or request.form['old_password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n\n\n hashed_password = user_manager.hash_password(request.form['password'])\n\n # Modificamos el password del usuario\n current_user.password = hashed_password\n\n try:\n correct = True\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error modifying password of user, make sure username and email are unique','error')\n return render_template('user/change_password_user.html', title='Change Password', form=form)\n else:\n flash('Congratulations, update your password!','success')\n return redirect(url_for('user_ksat.show_user'))\n\n\n return render_template('user/change_password_user.html', title='Change Password', form=form)", "def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value", "def change_user(self, username, password):\n self.creds['username'] = username\n self.creds['password'] = password", "def change_password(self, request, **kwargs):\n self.method_check(request, allowed=['post'])\n self.throttle_check(request)\n\n data = json.loads(request.body)\n\n username = None\n old_password = None\n new_password = None\n\n if \"username\" in data:\n username = data[\"username\"]\n print username\n else:\n if \"email\" in data:\n username = data[\"email\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if \"old_password\" in data:\n old_password = data[\"old_password\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if \"new_password\" in data:\n new_password = data[\"new_password\"]\n else:\n BadRequest(INVALID_PARAMS)\n\n if (old_password is not None and new_password is not None and\n username is not None):\n member = authenticate(username=username, password=old_password)\n\n if member is not None:\n member.set_password(new_password)\n member.save()\n return self.create_response(request, {})", "def set_password(self, password):\n self.PASS = password", "def change_password(username, current_password, new_password):\n\n if current_password == \"\": # nosec (not a hardcoded password)\n current_password = getpass.getpass()\n\n is_password_ok = authenticate_user(username, current_password)\n if not is_password_ok:\n return False\n\n if new_password == \"\": # nosec (not a hardcoded password)\n new_password = getpass.getpass()\n\n global db\n if db is None:\n init_db()\n user_model = Query()\n user = db.search(user_model.username == username)[0]\n\n salt = user['salt']\n password = hash_password(new_password, salt)\n api_key = gen_api_key(username)\n\n user_id = db.update({'password': password, 'api_key': api_key}, doc_ids=[user.doc_id])\n\n return {\n 'result': 'success',\n 'eid': user_id,\n 'user_created': user,\n 'api_key': api_key\n }", "def set_password(username, new_password):\n if not validate_password(new_password):\n return \"salasana on väärää muotoa\"\n new_password_hash = generate_password_hash(new_password)\n sql = \"UPDATE users \" \\\n \"SET password=:new_pw \" \\\n \"WHERE username=:username\"\n db.session.execute(sql, {\"new_pw\": new_password_hash, \"username\": username})\n db.session.commit()\n return \"ok\"", "def pass_change(request):\n if request.method == \"POST\":\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n return home(request, \"Password Changed Successfully\")\n \n else:\n form = PasswordChangeForm(instance=request.user)\n \n ctx = _make_context(request, \"pass_form\", form)\n \n return TemplateResponse(request, \"users/index.html\", ctx)", "def set_password(self, password):\n self.PASSWORD = password", "def update_password(self, username, password): #WORKS\n password_hash = generate_password_hash(password)\n try:\n self.cur.execute(\"UPDATE users SET password = \\\"{}\\\" WHERE username = \\\"{}\\\"\".format(password_hash, username))\n self.db.commit()\n except:\n self.db.rollback()", "def update_password(self, pwd):\n self.password = bcrypt.generate_password_hash(pwd).decode('utf8')", "def set_password(self, password):\n self.password = self.hash_password(password)", "def reset_password():\n pass", "def password(self, password):\n self.password_hash = generate_password_hash(password)\n self.password_set = True", "def change_db_user_password(self, username, password):\n\n self.sql(\"ALTER USER %s WITH PASSWORD '%s'\" % (username, password))", "def updatePassword(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', alter user '%s' password\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"alter user %s with password '%s'\" % (userName, userPassword))", "def _set_password(self, cr, uid, id, password, context=None):\n encrypted = self._crypt_context(\n cr, uid, id, context=context).encrypt(password)\n print(password)\n print(encrypted)\n self._set_encrypted_password(cr, uid, id, encrypted, context=context)\n self._set_password_again(cr, uid, id, password, context=context)", "def post(self):\n DA = DataAccessor()\n session = getSessionByRequest(self)\n user = getSessionUser(session)\n \n old = self.request.get('old')\n new = self.request.get('new')\n new2 = self.request.get('new2')\n\n if old != user.password:\n setSessionMessage(session, \"Invalid Password\")\n self.redirect('/admin')\n\n if (new != new2) :\n setSessionMessage(session, \"Your new passwords did not match. Please try again.\", True)\n else:\n setSessionMessage(session, \"You have successfully changed your password.\", False)\n \n #Reset the password\n DA.update(user, password=new)\n\n #Reset the session.\n session.generated = False\n session.put()\n self.redirect('/admin')", "def set_password(user_id):\n user = _get_user_or_404(user_id)\n\n form = SetPasswordForm(request.form)\n if not form.validate():\n return set_password_form(user.id, form)\n\n new_password = form.password.data\n initiator_id = g.user.id\n\n password_service.update_password_hash(user.id, new_password, initiator_id)\n\n flash_success(\n gettext(\n \"New password has been set for user '%(screen_name)s'.\",\n screen_name=user.screen_name,\n )\n )\n\n return redirect_to('.view', user_id=user.id)", "def change_Password(): \r\n try:\r\n\r\n UserName=request.args.get(\"UserName\")\r\n validate_otp=request.args.get(\"OTP\") \r\n NewPassword=request.args.get(\"NewPassword\")\r\n hashed_Password = hashlib.md5(NewPassword.encode()).hexdigest() \r\n user_details=otp_access(UserName)\r\n otp=user_details[0]['otp']\r\n with open('api.key', 'r') as apikey:\r\n key=apikey.read().replace('\\n', '')\r\n if request.headers.get('API_KEY') == key:\r\n if str(otp)==str(validate_otp):\r\n msg=update_Password(UserName,hashed_Password)\r\n #This function calling makes the user use OTP until Password gets changed after that validity of OTP will be expired.\r\n new_otp=randint(10000,100000)\r\n # This will checks the new generated OTP and old OTP\r\n if str(otp)==str(new_otp):\r\n new_otp=randint(10000,100000)\r\n update_otp(UserName,new_otp)\r\n else:\r\n update_otp(UserName,new_otp)\r\n else:\r\n msg=\"Something went wrong check the OTP or UserName!!!!\"\r\n else:\r\n msg=\"Enter correct API KEY for Authentication.\"\r\n except IndexError:\r\n msg=f\"{UserName} does not exist , kindly enter correct UserName.\"\r\n return msg", "def post(self):\n # userId is retrieved from jwt identity\n userId = get_jwt_identity()\n data = ChangePasswordInputSchema().load(request.json)\n UserLoginService.change_password(userId,\n existing_password=data[\"existingPassword\"],\n new_password=data[\"newPassword\"])\n return {}, 200", "def changePassword(self, loginName, password, newPassword):\n return self.talk(\n 'purchase',\n data=self.__makeLoginDict(loginName, password,\n {'newPassword': newPassword}))", "def change_password(user, old_pw, new_pw, confirm_pw, lang):\n LOG.debug(\"Entering change_password\")\n _t = Translator(lang)\n\n success = False\n\n # is the old password given?\n if not old_pw:\n LOG.debug(\"Old pwd is empty\")\n message = _t.get(Keywords.oldPwdEmpty) # 'The old password field is empty.'\n # is the new password given?\n elif not new_pw:\n LOG.debug(\"New pwd is empty\")\n message = _t.get(Keywords.newPwdEmtpy) # 'The new password field is empty.'\n # is the confirmation password given?\n elif not confirm_pw:\n LOG.debug(\"Confirm pwd is empty\")\n message = _t.get(Keywords.confPwdEmpty) # 'The password confirmation field is empty.'\n # is new password equals the confirmation?\n elif not new_pw == confirm_pw:\n LOG.debug(\"New pwds not equal\")\n message = _t.get(Keywords.newPwdNotEqual) # 'The new passwords are not equal'\n # is new old password equals the new one?\n elif old_pw == new_pw:\n LOG.debug(\"Pwds are the same\")\n message = _t.get(Keywords.pwdsSame) # 'The new and old password are the same'\n else:\n # is the old password valid?\n if not user.validate_password(old_pw):\n LOG.debug(\"Old password is wrong\")\n message = _t.get(Keywords.oldPwdWrong) # 'Your old password is wrong.'\n else:\n user.change_password(new_pw)\n\n LOG.debug(\"Password was changed\")\n message = _t.get(Keywords.pwdChanged) # 'Your password was changed'\n success = True\n\n return message, success", "def test_account_password_change(self):\r\n params = {\r\n 'current_password': 'admin',\r\n 'new_password': 'not_testing'\r\n }\r\n\r\n res = self.testapp.post(\r\n \"/api/v1/admin/password?api_key=\" + str(API_KEY),\r\n params=params,\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n self.assertTrue(\r\n 'message' in user,\r\n \"Should have a message key in there: {0}\".format(user))\r\n\r\n params = {\r\n 'current_password': 'not_testing',\r\n 'new_password': 'admin'\r\n }\r\n res = self.testapp.post(\r\n \"/api/v1/admin/password?api_key=\" + str(API_KEY),\r\n params=params,\r\n status=200)\r\n\r\n self._check_cors_headers(res)" ]
[ "0.8333886", "0.8186766", "0.8097396", "0.7924953", "0.79134744", "0.79105085", "0.7877887", "0.78761387", "0.7860327", "0.784283", "0.7838352", "0.78312457", "0.78055656", "0.7788097", "0.77424246", "0.77323174", "0.77305025", "0.7709434", "0.7682401", "0.76711226", "0.76426893", "0.7642551", "0.7625603", "0.76037884", "0.7601878", "0.7562087", "0.75544965", "0.75471145", "0.7540491", "0.7533473", "0.751578", "0.7506597", "0.75020945", "0.7444804", "0.74428505", "0.74141294", "0.7412129", "0.7400299", "0.7393486", "0.7379752", "0.7360523", "0.73580545", "0.735026", "0.7345259", "0.7339419", "0.733578", "0.7313091", "0.7300418", "0.728768", "0.7284719", "0.72822994", "0.72760236", "0.727539", "0.7275345", "0.7268362", "0.72243434", "0.72109777", "0.7199011", "0.71864", "0.71674377", "0.71629673", "0.7150777", "0.7137635", "0.7134867", "0.71296895", "0.7105572", "0.70974255", "0.7089729", "0.7089047", "0.70876557", "0.7067477", "0.70528024", "0.7051212", "0.7040666", "0.7037874", "0.7023383", "0.7022915", "0.70166147", "0.70128536", "0.7010075", "0.70006996", "0.7000067", "0.69996023", "0.69854844", "0.69688123", "0.6959166", "0.69566953", "0.69524306", "0.6947866", "0.69423574", "0.6930158", "0.6922139", "0.6914695", "0.69012296", "0.6880839", "0.68803084", "0.6869407", "0.68439394", "0.68427867", "0.6831946", "0.6821339" ]
0.0
-1
Cancels a user session. Puts up the login form.
def logout_req(request): if request.user.is_authenticated(): logout(request) return render_to_response('registration/login.html', dict(next = DEFAULT_REDIRECT, META = request.META, root = settings.ROOT_URL, media = settings.MEDIA_URL))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout():\r\n form = LoginForm()\r\n user = current_user\r\n user.authenticated = False\r\n db.session.add(user)\r\n db.session.commit()\r\n logout_user()\r\n return redirect(url_for('hello'))", "def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project", "def log_out():\n if 'name' in session:\n PLAN.logout_user(session['name'])\n session.pop('name', None)\n return redirect(url_for('log_in'))\n return redirect(url_for('log_in'))", "def log_out_user(self):\n flask_login.logout_user()", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return render_template(\"login.html\")", "def logout(self):\n self.change_user(self.username, None, None)", "def dropsession():\n session.pop('user', None)\n return redirect(url_for('login'))", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def logout(self):\n if 'user' in session:\n del session['user']\n session.save()\n return render('logout.html')", "def OnButtonLoginCancelButton(self, event):\r\n\t\tself.Hide()", "def log_out(self):\n DB.log_out()\n self.customer.log_out()\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()", "def logout(self):\n\n # remove session\n session.delete()\n\n return render('login.html')", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def logout():\n\tsession.pop(\"username\", None)\n\treturn redirect(url_for(\"default\"))", "def logout():\n session.pop('username', None)\n session.pop('user_id', None)\n flash (\"You are logged out\")\n return redirect(url_for('index'))", "def logout():\n if \"username\" in session:\n session.pop(\"username\", None)\n flash(\"You have been logged out.\")\n return redirect(url_for(\"index\"))", "def logout_user():\n\n print \"Logging out.\"\n session.clear()\n flash(\"You are now logged out.\")\n\n return redirect('/')", "def LogOut(self):\n self.loginUi.UserNameLineEdit.clear()\n self.loginUi.IdLineEdit.clear()\n self.loginUi.PassLineEdit.clear()\n self.DoctorUi.ui.stackedWidget.setCurrentIndex(0)\n self.DoctorUi.newpatientflag = 1\n self.DoctorUi.ResetNewPatient()\n self.DoctorUi.close()\n self.loginUi.show()", "def logout():\n session.pop('username', None)\n session.pop('user_id', None)\n session.pop('logged_in', None)\n session.pop('is_admin', None)\n\n flash('Successfully logged out', 'alert-info')\n\n return redirect(url_for('index'))", "def submit(self):\n password = self.form_result['password']\n username = self.form_result['username']\n\n if not loginhelper.validateUsernamePassword(username, password):\n return render('login.html')\n\n # Mark user as logged\n session['user'] = username\n session.save()\n\n # Send user back to where they originally wanted\n if session.get('path_before_login'):\n redirect(session['path_before_login'])\n else:\n return render('loggedin.html')", "def logout_user():\n session.pop('username')\n return redirect('/login')", "def logout():\n login()", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"tips\"))", "def logout(self):\r\n session.clear()\r\n return redirect(\"/user/login\")", "def logout():\n # remove user from session cookies\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def logout():\n resp = Response(render_template('admin/login.html',\n message='Your session has been canceled.'))\n unset_jwt_cookies(resp)\n return resp", "def logout():\n resp = Response(render_template('admin/login.html',\n message='Your session has been canceled.'))\n unset_jwt_cookies(resp)\n return resp", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def log_out(self):\n self.__is_logged_in = False", "def logout_user():\n\n session.clear()\n\n return redirect(\"/\")", "def logout_user():\n pass", "def do_logout():\n\n session['authenticated'] = False\n session['username'] = None\n session['name'] = None\n session['cpi'] = None\n session['grp_size'] = None\n\n return home()", "def sign_out():\n session.clear()\n return redirect(url_for('index'))", "def logout():\n # Remove session data, this will log the user out\n session.pop('loggedin', None)\n session.pop('userid', None)\n session.pop('username', None)\n # Redirect to login page\n return redirect(url_for('site.login'))", "def logout_user():\n\n # Delete session data to log out\n del session[\"user_id\"]\n flash(\"Successfully logged out!\")\n\n return redirect(\"/\")", "def logout():\n flash(u'Zostałeś wylogowany')\n session.pop('user_id', None)\n return redirect(url_for('index'))", "def logout():\n flash('You were logged out')\n session.pop('username', None)\n return redirect(url_for('welcome_page'))", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def do_logout():\n del session[CURRENT_USER_KEY]", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def logout():\n session['user_id'] = None\n session['user_email'] = None\n return redirect(url_for('main'))", "def logout():\n if session:\n session.pop('user')\n flash('You were successfully logged out')\n return redirect('/')\n else:\n return redirect('/')", "def logout():\n session.pop(\"username\")\n\n return redirect(\"/\")", "def logout():", "def logout():\n try:\n if session[\"user\"]:\n flash(\"You have logged out successfully\", category=\"success\")\n session.pop(\"user\")\n except KeyError:\n flash(\"You are not logged in\", category=\"error\")\n try:\n if session[\"admin\"]:\n session.pop(\"admin\")\n except KeyError:\n # user is not an admin\n pass\n finally:\n return redirect(url_for(\"get_terms\"))", "def logOut(self):\n self.client.logout()", "def logout():\n\n session.pop(\"username\")\n return redirect(\"/login\")", "def log_out():\n session.pop('logged_in', None)\n flash('You were logged out.')\n\n return redirect(url_for('blog.show_posts'))", "def logout():\n session.pop('user_id', None)\n flash('Your were logged out')\n return redirect(url_for('login'))", "def logout():\n \n del session[\"logged_in\"]\n flash(\"See you later! ;)\")\n return redirect('/')", "def logout():\n session.pop(\"user\")\n return redirect(url_for(\"home\"))", "def logout():\n flash(_('You were logged out'))\n session.pop('user_id', None)\n return redirect(url_for('index'))\n #return redirect(url_for('public_timeline'))", "def go_back(self):\n self.displayUi = LoginScreen()\n DB.log_out()\n self.hide()\n self.displayUi.show()", "def logOut(self, e):\n\n\t\tself.unBind()\n\t\tself.menu_manager.runLogin()\n\t\tself.main_menu_window.root.destroy()", "def logout():\n \n session.pop('username', None)\n flash('You have logged out.', 'success')\n return redirect(url_for('home'))", "def logout():\n\n if session.get('user_id'):\n del session['user_id']\n flash('You are now logged out.')\n return redirect('/login')", "def logout():\n session.pop('username', None)\n return redirect('/')", "def logout():\n\n # remove the username from the session if it is there\n out_user = current_user.get_id()\n logout_user()\n logger.info(out_user + ' has been logged out.')\n return redirect(url_for('home'))", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('leaderboard'))", "def logout():\n session.pop('logged_in', None)\n session.pop('fname', None)\n session.pop('patron', None)\n flash('You were logged out')\n return redirect('/')", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out', 'success')\n return redirect(url_for('show_entries'))", "def logout():\n u = current_user\n u.authenticated = False\n db.session.add(u)\n db.session.commit()\n logout_user()", "def Return():\n confirm_frame.forget()\n self.LoadLogInWindow()", "def logout():\n # clear user data from session and flag as logged out\n for x in ['provider', 'state', 'user']:\n if x in flask.session:\n del flask.session[x]\n flask.session['logged_in'] = False\n\n flash('logout successful', 'info')\n return redirect(request.referrer or url_for('catalog.index'))", "def close_login(self):\n self.login.destroy()", "def logout():\n logout_user()\n flash('Successfully logged out.')\n return redirect(request.referrer)", "def logout(request):\n if request.session.get('username') is not None:\n call([\"rm\", \"-r\", request.session.get('username')])\n request.session.flush()\n return HttpResponseRedirect(reverse('index'))", "def signout():\r\n logout_user()\r\n flash(gettext('You are now signed out'), 'success')\r\n return redirect(url_for('home.home'))", "def t24_logoff(self):\n if self.home_page:\n self.login_page = self.home_page.sign_off()\n self.home_page = None", "def logout():\n session.pop('user', None)\n# g.user = None\n# g.graph = None\n return redirect(url_for('index'))", "def logout():\r\n logout_user()\r\n flash('You were logged out.')\r\n return redirect(url_for('index'))", "def logout():\n user = g.user\n do_logout(user)\n\n flash(\"You have successfully logged out.\", 'success')\n return redirect(\"/login\")", "def logout():\n\n session.clear()\n flash('See you next time!')\n return redirect('/')", "def sign_out(self):\n self.auth.log_out(self._user)\n self._user = None\n print(\"Signed out successfully\")\n return self.logging_page()", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n # redirect to the login page\n return redirect(url_for('view.login'))", "def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))", "def logout_user(session):\n del session['user']", "def logout():\n # Remove credentials key and user id from session\n session_helper = SessionHelper(session)\n session_helper.delete_credentials_from_session()\n session_helper.delete_user_from_session()\n return redirect(url_for('homepage.home_page_route'))", "def sign_out():\n next_url = request.args.get('next')\n session.pop(\"user\")\n flash(\"Sign Out Successful\", \"success\")\n return redirect(next_url or url_for('index'))", "def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')", "def login_page():\n form = loginUser()\n\n if \"user\" in session:\n logged_user = session[\"user\"]\n return redirect(f\"users/{logged_user}\")\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n\n user = User.authenticate(username=username, password=password)\n\n if user:\n session[\"user\"] = user.username\n\n return redirect(f'/users/{username}')\n else:\n form.password.errors = ['Unable to log in']\n\n return render_template(\"login_form.html\", form=form)", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def logout():\n session.clear()\n return redirect(\"/showlog\")", "def logout(self):", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def relogin(self):\n spotify.Error.maybe_raise(lib.sp_session_relogin(self._sp_session))", "def logout(self):\n user = self.get_user()\n if user:\n with atomic(self.conf['auth.dbfile']) as cursor:\n logout_user(cursor, user.username)\n request.user = self.tpls['user'] = None\n response.set_cookie(self.conf['auth.cookie_key'], '',\n secret=self.conf['auth.cookie_secret'], path='/')", "def logout(self):\n pass", "def login():\n\n if \"username\" in session:\n return redirect(f\"/users/{session['username']}\")\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.authenticate(form.data[\"username\"], form.data[\"password\"])\n if user is None:\n if User.query.filter_by(username=form.data[\"username\"]).count() == 0:\n form.username.errors.append(\"Invalid username\")\n else:\n form.password.errors.append(\"Invalid credentials\")\n return render_template(\"login.html\", form=form)\n\n session[\"username\"] = user.username\n return redirect(f\"/users/{user.username}\")\n \n return render_template(\"login.html\", form=form)", "def logout():\n session.clear()\n return redirect(url_for('index'))", "def login_form_valid(self, form):\n self.request.session.update({\n 'user_is_none': None,\n 'user_is_active': None\n })\n\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = authenticate(email=email, password=password)\n\n if user is None:\n self.request.session['user_is_none'] = True\n return HttpResponseRedirect('/user_account/')\n elif user.active is False:\n self.request.session['user_is_active'] = False\n return HttpResponseRedirect('/user_account/')\n else:\n self.request.session.update({\n 'user_is_none': False,\n 'user_is_active': True\n })\n login(self.request, user)\n return HttpResponseRedirect('/schedule/')", "def logout():\n logout_user()\n flash('You have successfully been logged out')\n\n # redirect to login page\n return redirect(url_for('auth.login'))", "def logout_redirect():\n login_session.clear()\n flash('You have logged out')\n return redirect(url_for('show_homepage'))", "def logout():\n # Log user out if they are authenticated\n if current_user.is_authenticated:\n logout_user()\n # Redirect to index page\n flash(\"Successfully logged out.\", category=\"success\")\n # Redirect back to index\n return redirect(url_for('main.index'))", "def logout():\n logout_user()\n return redirect(url_for(\".login\"))", "def logout(self):\n self.getLink('Logout').click()\n self.html_redirect()\n assert 'You have been logged out successfully.' in self.message, \\\n 'Not successfully logged out: message={0.message!r}'.format(self)" ]
[ "0.7182822", "0.69931966", "0.69211394", "0.67985123", "0.67923224", "0.6713557", "0.66810346", "0.6674477", "0.6670308", "0.66364765", "0.66233695", "0.6603241", "0.65926504", "0.6584644", "0.65809464", "0.6569001", "0.6568214", "0.65630865", "0.65387446", "0.6528097", "0.65220875", "0.6518022", "0.6512855", "0.6488379", "0.6466621", "0.64635843", "0.64616", "0.64616", "0.64452004", "0.64452004", "0.64322823", "0.64289653", "0.64212817", "0.64178604", "0.64071923", "0.6404839", "0.64038956", "0.64025205", "0.64008445", "0.64006054", "0.6391225", "0.63823825", "0.63823825", "0.6380766", "0.637982", "0.6374042", "0.63736254", "0.6347143", "0.6344922", "0.63438356", "0.6343313", "0.6336613", "0.6332407", "0.63019055", "0.6298339", "0.62982106", "0.629163", "0.6289217", "0.6280548", "0.6279948", "0.62630826", "0.62615496", "0.62595326", "0.6255772", "0.62371355", "0.623636", "0.6235594", "0.6219551", "0.6215875", "0.6210233", "0.6192793", "0.61886215", "0.6174135", "0.616551", "0.61640257", "0.61534035", "0.6149092", "0.6147237", "0.61389935", "0.6136902", "0.613176", "0.6128053", "0.61237705", "0.6118813", "0.61056834", "0.61056834", "0.610111", "0.60981333", "0.6091359", "0.6091359", "0.60877997", "0.60875744", "0.6077598", "0.6074449", "0.60711354", "0.60662174", "0.6065679", "0.60617775", "0.6055525", "0.60554904", "0.6051972" ]
0.0
-1
Create an object from its rdf type
def oid(identifier_or_rdf_type, rdf_type, context, base_type=None): identifier = identifier_or_rdf_type if rdf_type is None: rdf_type = identifier_or_rdf_type identifier = None cls = None if context is not None: cls = context.resolve_class(rdf_type) if cls is None and context is not None: for types in _superclass_iter(context.rdf_graph(), rdf_type): for typ in types: cls = context.resolve_class(typ) if cls is not None: break if cls is not None: break if cls is None: cls = base_type # if its our class name, then make our own object # if there's a part after that, that's the property name if context is not None: cls = context(cls) if identifier is not None: o = cls.query(ident=identifier, no_type_decl=True) else: o = cls.query() return o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __generate_object_term__(self, datatype, value):\n if datatype == NS_MGR.xsd.anyURI.rdflib:\n term = rdflib.URIRef(value)\n elif datatype:\n term = rdflib.Literal(value, datatype=datatype)\n else:\n term = rdflib.Literal(value)\n return term", "def make(self, atype, **kwargs):\n obj = self.api.get_type(f\"VSO:{atype}\")\n return obj(**kwargs)", "def create(spec):\n type = {\n 'polygon': Polygon,\n 'fusionTable': FusionTable,\n }[spec['type']]\n return type(spec)", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def create_object_type(self, object_type=None):\n # Return Value\n # ------------\n # {object_type_id: ''}\n #\n if not is_basic_identifier(object_type.name):\n raise BadRequest(\"Invalid object_type name: %s\" % object_type.name)\n if not is_yaml_string_valid(object_type.definition):\n raise BadRequest(\"Invalid YAML definition\")\n object_type_id, version = self.clients.resource_registry.create(object_type)\n return object_type_id", "def createInstance():\n\n graphTypeEnvVariable = os.getenv('GRAPH_TYPE')\n graphTypeKey = graphTypeEnvVariable if graphTypeEnvVariable is not None else 'networkx' # Default to networkx\n graphType = GraphFactory.typeMap[str(graphTypeKey)]\n\n return graphType()", "def load(uri: str, type: Optional[str] = None, *args, **kwargs) -> DataObject:\n from . import data # Loads all formats\n from . import core\n\n if type:\n return core.DataObject.registered_types[type].from_uri(uri, *args, **kwargs)\n else:\n return core.DataObject.from_uri(uri, *args, **kwargs)", "def create_new_type():\n string = \"I'm a string\"\n print(type(string))\n \"\"\"When passed 3 arguments, type() acts like a dynamic 'class' statement. It returns a new class object (see\n metaclassses notes)\n \"\"\"\n thing = type(\"SuperCoolClass\", (), {})\n thing.random = 4.556\n print(type(thing))\n print(vars(thing))\n print(thing)", "def _deserialize(self, data):\n uri = data[1:-1]\n # We have to retrieve the type to rebuild the object\n attr = self.__dict__['field']\n # Be careful when orig = None !!!!!\n orig = getattr(attr.model, attr.name)\n if None == orig:\n return rdfSubject(rdflib.term.URIRef(uri))\n elif isinstance(orig, list):\n # rdfalchemy mapper gives me the solution\n rt = attr.model.__class__.__dict__[attr.name].range_type\n from rdfalchemy.orm import mapper\n alch_map = mapper()\n try:\n cls = alch_map[str(rt)]\n return cls(rdflib.term.URIRef(uri))\n except:\n rdfSubject(rdflib.term.URIRef(uri))\n else:\n return type(orig)(rdflib.term.URIRef(uri))", "def _create_type(self, keyword_or_identifier, **kwargs):\n name = keyword_or_identifier\n if isinstance(name, Identifier):\n return self._idl_type_factory.reference_type(name, **kwargs)\n elif isinstance(name, str):\n return self._idl_type_factory.simple_type(name, **kwargs)\n else:\n assert False", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def rdf_type(self):\n return self._rdf_type", "def oid(identifier_or_rdf_type, rdf_type=None, context=None):\n identifier = identifier_or_rdf_type\n if rdf_type is None:\n rdf_type = identifier_or_rdf_type\n identifier = None\n\n c = None\n try:\n c = PyOpenWorm.CONTEXT.mapper.RDFTypeTable[rdf_type]\n except KeyError:\n c = BaseDataObject\n L.debug(\"oid: making a {} with ident {}\".format(c, identifier))\n\n # if its our class name, then make our own object\n # if there's a part after that, that's the property name\n o = None\n if context is not None:\n c = context(c)\n if identifier is not None:\n o = c(ident=identifier)\n else:\n o = c()\n return o", "def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass", "def __init__(self, *args):\n this = _libsbml.new_SpeciesType(*args)\n try: self.this.append(this)\n except: self.this = this", "def create_object_instance(path):\n path = utils.strip_path_string(path)\n object_config_file = utils.ConfigFile(path+\"/.config.py\")\n object_type = object_config_file.read_variable(\"object_type\")\n vobject_class = {\"algorithm\":VAlgorithm,\n \"task\":VTask,\n \"data\":VData,\n \"directory\":VDirectory,\n \"project\":VProject}\n return vobject_class[object_type](path)", "def create_ontic_type(name: str, schema: (dict, Schema)) -> OnticType:\n if name is None or name == '':\n raise ValueError('The string \"name\" argument is required.')\n if schema is None:\n raise ValueError('The schema dictionary is required.')\n if not isinstance(schema, dict):\n raise ValueError('The schema must be a dict or SchemaType.')\n\n ontic_type = type(name, (OnticType,), dict())\n\n if not isinstance(schema, Schema):\n schema = Schema(schema)\n\n ontic_type.ONTIC_SCHEMA = schema\n\n return ontic_type", "def create_type(name):\n\n new_type = Type(name=name)\n db.session.add(new_type)\n db.session.commit()\n return new_type", "def init_obj(obj_name):\n ret = type(obj_name, (object,), {})\n return ret", "def type(name):", "def from_type_string(cls, type_str):\n type_info = cls.is_my_type(type_str)\n if type_info:\n return cls(type_info)", "def create_node(name, node_type):\n if node_type in NODE_REGISTRY:\n return Node(name, NODE_REGISTRY[node_type])\n raise TypeError('The specified node type \\'%s\\' could not be found within imagegen.' % node_type)", "def __init__(self, type_):\n\n self.type = type_", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def XtremObjFactory(object_type, object_data, parent_connection):\r\n for cls in XtremObject.__subclasses__():\r\n if cls.is_class_for(object_type):\r\n return cls(object_data, parent_connection)", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def test_create_obj_by_type_from_primitive_type(self):\n test_obj = \"test_primitive\"\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertEqual(returned_obj, test_obj)", "def make_object():\n return object()", "def _ConstructType(self, type_name, type_contents, filepath, require_guid):\n\n description = ''\n parents = None\n local_field_names = None\n opt_local_field_names = None\n is_abstract = False\n allow_undefined_fields = False\n is_canonical = False\n guid = None\n\n expected_keys = set([\n 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'guid',\n 'is_canonical', 'allow_undefined_fields'\n ])\n\n if 'description' in type_contents:\n description = type_contents['description']\n if 'implements' in type_contents:\n parents = type_contents['implements']\n if 'uses' in type_contents:\n local_field_names = type_contents['uses']\n if 'opt_uses' in type_contents:\n opt_local_field_names = type_contents['opt_uses']\n if 'is_abstract' in type_contents:\n is_abstract = type_contents['is_abstract']\n if 'allow_undefined_fields' in type_contents:\n allow_undefined_fields = type_contents['allow_undefined_fields']\n if 'is_canonical' in type_contents:\n is_canonical = type_contents['is_canonical']\n if 'guid' in type_contents:\n guid = type_contents['guid']\n\n # Generate tuples to represent each field\n fq_lfn = []\n if local_field_names:\n self._ConstructField(local_field_names, False, fq_lfn)\n if opt_local_field_names:\n self._ConstructField(opt_local_field_names, True, fq_lfn)\n\n entity_type = EntityType(\n filepath=filepath,\n typename=type_name,\n description=description,\n parents=parents,\n local_field_tuples=fq_lfn,\n is_abstract=is_abstract,\n allow_undefined_fields=allow_undefined_fields,\n inherited_fields_expanded=False,\n is_canonical=is_canonical,\n guid=guid,\n require_guid=require_guid,\n namespace=self.local_namespace)\n\n # Add errors to type if there's anything extra in the block. We add to the\n # entity type because an extra key here is likely a typo in a real key name\n # that would result in information being lost from the type.\n for key in type_contents:\n if key not in expected_keys:\n entity_type.AddFinding(\n findings_lib.UnrecognizedKeyError(key, entity_type.file_context))\n\n return entity_type", "def create(self, odometryType): # real signature unknown; restored from __doc__\n pass", "def obj_factory (d):\n try:\n kind = d['kind']\n o = self.vtable.get (kind, Obj) (d)\n except KeyError:\n o = obj (d)\n return o", "def __init__(self, raw_type: type):\n self.raw_type = raw_type\n self.name = raw_type.__name__\n self.qualname = raw_type.__qualname__\n self.module = raw_type.__module__\n self.full_name = TypeInfo.to_full_name(raw_type)\n self.hash = hash(self.full_name)\n self.is_abstract = inspect.isabstract(raw_type)\n # TODO(fk) store more information on attributes\n self.instance_attributes: OrderedSet[str] = OrderedSet()\n self.attributes: OrderedSet[str] = OrderedSet()\n\n # TODO(fk) properly implement generics!\n # For now we just store the number of generic parameters for set, dict and list.\n self.num_hardcoded_generic_parameters: int | None = (\n 2 if raw_type is dict else 1 if raw_type in (set, list) else None\n )", "def new(ruletype, **kwargs):\n try:\n ruleclass = TYPE_MAP[ruletype]\n except KeyError:\n raise error.InvalidRule('Unrecognized rule type: %s' % ruletype)\n\n try:\n return ruleclass(**kwargs)\n except TypeError:\n log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs)\n raise\n #raise error.InvalidRule(\n # '%s does not work that way.\\nDetails: %s.\\nData: %s' % (\n # ruletype, err, kwargs))", "def GetCreateLink(identifier, namespace=None, klass=BroadcastSegment):\n return pynt.xmlns.GetCreateRDFObject(identifier=identifier, namespace=namespace, klass=BroadcastSegment)", "def create_object(object_name):\n if object_name == 'deathstar':\n return Deathstar()\n elif object_name == 'mercury':\n return Mercury()\n elif object_name == 'venus':\n return Venus()\n elif object_name == 'mars':\n return Mars()\n elif object_name == 'earth':\n return Earth()\n elif object_name == 'moon':\n return Moon()\n elif object_name == 'tatooine':\n return Tatooine()\n elif object_name == 'mordor':\n return Mordor()\n elif object_name == 'xwing':\n return Xwing()", "def dict_to_rdflib(d):\n if d is None:\n return None\n\n t = d[\"type\"]\n v = d[\"value\"]\n\n if t == \"uri\":\n return URIRef(v)\n\n if t == \"bnode\":\n if v not in _bnodes:\n # v is not used as BNode value on purpose (multiple calls should\n # not have the same value)\n _bnodes[v] = BNode()\n return _bnodes[v]\n\n l = d.get(\"xml:lang\", None)\n if t == \"literal\":\n return Literal(v, lang=l)\n\n if t == \"typed-literal\":\n # will raise type error if lang and datatype set\n return Literal(v, lang=l, datatype=d[\"datatype\"])\n\n raise rdflib.exceptions.ParserError(\n \"Invalid sparql json result according to \"\n \"http://www.w3.org/TR/rdf-sparql-json-res/: {0}\".format(d))", "def GmatBase_GetObjectType(typeString):\n return _gmat_py.GmatBase_GetObjectType(typeString)", "def __init__(self,typing,reflection,year):\n self.name = str(self)\n self.typing = typing\n self.reflection = reflection\n self.year = year", "def __init__(self,given_type):\n self.given_type=given_type", "def make_graph_from_spec(graphtype, args):\n parsed = parse_graph_argument(graphtype, args)\n assert parsed['graphtype'] == graphtype\n return obtain_graph(parsed)", "def from_type(cls: type[_ST1], type_obj: type) -> _ST1:\n try:\n dct = vars(type_obj)\n except TypeError:\n raise TypeError(f\"Expected a type object, got {type(type_obj).__name__!r}\") from None\n return cls._reconstruct({k: np.dtype(v) for k, v in dct.items() if not k.startswith(\"_\")})", "def get_object(self, pid=None, type=None, create=None):\n objtype = type or self.default_object_type\n\n if pid is None:\n if create is None:\n create = True\n else:\n if create is None:\n create = False\n\n return objtype(self.api, pid, create,\n default_pidspace=self.default_pidspace)", "def _doc_create(type, data):\n doc = dict(data)\n doc.update({'model_type': type})\n return doc", "def Instance(self) -> TypeManager:", "def world_object_factory(self, bwo_type, starting_pos=None, name=None, genome=None):\n\n\t\tif starting_pos is None:\n\t\t\tstarting_pos = self.get_random_location_in_world()\n\n\t\tif name is None:\n\t\t\tname = BWOType.get_name(bwo_type)\n\t\t\t#TODO add unique counter for the bug\n\n\t\tif bwo_type == BWOType.HERB:\n\t\t\treturn Herbivore(self, starting_pos, name, genome)\n\t\telif bwo_type == BWOType.CARN:\n\t\t\treturn Carnivore(self, starting_pos, name, genome)\n\t\telif bwo_type == BWOType.OMN:\n\t\t\treturn Omnivore(self, starting_pos, name, genome)\n\t\telif bwo_type == BWOType.OBST:\n\t\t\tif not genome:\n\t\t\t\tlogging.error(\"shouldn't have a genome for an obstacle\")\n\t\t\treturn Obstacle(self, starting_pos, name)\n\t\telif bwo_type == BWOType.MEAT:\n\t\t\tif not genome:\n\t\t\t\tlogging.error(\"shouldn't have a genome for an meat\")\n\t\t\treturn Meat(self, starting_pos, name)\n\t\telif bwo_type == BWOType.PLANT:\n\t\t\tif not genome:\n\t\t\t\tlogging.error(\"shouldn't have a genome for an plant ( yet :-} )\")\n\t\t\treturn Plant(self, starting_pos, name)\n\t\telse:\n\t\t\tlogging.error(\"invalid Object Type: \" + str(bwo_type))", "def __init__(self, name, types, reflection, year):\r\n self.name = name\r\n self.type = types\r\n self.reflection = reflection\r\n self.year = year", "def test_uri_field_deserialization(value):\n\n class Entity:\n def __init__(self, field):\n self.field = field\n\n schema = fields.Namespace(\"http://schema.org/\")\n\n class OldEntitySchema(JsonLDSchema):\n field = Uri(schema.field, allow_none=True)\n\n class Meta:\n rdf_type = schema.Entity\n model = Entity\n\n data = {\"@type\": [\"http://schema.org/Entity\"], \"http://schema.org/field\": value}\n\n entity = OldEntitySchema().load(data)\n\n assert entity.field == value", "def __init__(self, line, context):\n match = Ftype_type_decl.type_match(line)\n if match is None:\n raise ParseSyntaxError(\"type declaration\", token=line, context=context)\n else:\n self._match_len = len(match.group(0))\n self._class = match.group(1)\n self._typestr = match.group(2)\n self._kind = self.typestr\n # End if", "def create(self, objecttype, under, **kwargs):\n self.LogCommand()\n tclcode = \"stc::create \" + objecttype + \" -under \" + under\n\n for key in kwargs:\n tclcode = tclcode + \" \" + \"-\" + key + \" \" + str(kwargs[key])\n\n objecthandle = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(objecthandle))\n return objecthandle", "def to_rdf(self, *, graph: Graph) -> Resource:\n\n resource = graph.resource(self.uri)\n resource.add(RDF.type, MCS[self.__class__.__name__])\n return resource", "def create_object(self, alias: str, *args: Any, **kwargs: Any) -> Any:\n object_type = self._type_aliases.get(alias)\n if object_type is None:\n raise KeyError(f\"There is no type registered for alias {alias}\")\n if not callable(object_type):\n raise TypeError(\n f\"Asked to call {alias} with args {args} and kwargs {kwargs} but it is not \"\n f\"callable, its a {type(alias).__name__}.\"\n )\n return object_type(*args, **kwargs)", "def from_string(data, format):\n # Using ConjunctiveGraph instead of Graph for nquads support.\n graph = rdflib.ConjunctiveGraph()\n graph.parse(data=data, format=format)\n return graph", "def new_entity_type(name, client=default):\n data = {\"name\": name}\n return raw.create(\"entity-types\", data, client=client)", "def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]", "def create_ontology(name, content):\n try:\n query_ontology = QueryOntology(\n name=name,\n content=content,\n last_modif=datetime.now()\n )\n\n query_ontology.save()\n return query_ontology.pk\n except Exception as exc:\n return -1", "def __init__(self, str=None, type=None, dna=None, r=None, b=None, g=None):\n # have they passed in a stringified DNA object?\n if (str != None):\n self.makeFromNetString(str)\n # have they specified what type of DNA?\n elif (type != None):\n if (type == 's'): # Suit\n self.newSuit()\n else:\n # Invalid type\n assert 0\n else:\n # mark DNA as undefined\n self.type = 'u'", "def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)", "def type_instance(typedef):\n if subclassof(typedef, Type):\n # Type class passed, create no-arg instance\n typedef = typedef()\n return typedef", "def test08_media_to_rdflib_type(self):\n r = LDPRS()\n self.assertEqual(r._media_to_rdflib_type('text/turtle'), 'turtle')\n self.assertRaises(Exception, r._media_to_rdflib_type, 'elephants')", "def term_to_rdflib(term: str) -> Term:\n if term.startswith('?'):\n return Variable(term[1:])\n elif term.startswith(\"\\\"\"):\n return from_n3(term)\n else:\n return URIRef(term)", "def _create_concept(self, name, sco, cgi):\n\n # now define the class\n new_class = type(name, sco, {})\n\n self.name_mapping[name] = new_class\n self.new_classes.append(new_class)\n self.concepts.append(new_class)\n\n if cgi:\n # store that property in the class-object (available for look-up of child classes)\n self.cas_set(key=(new_class, \"X_createGenericIndividual\"), value=True)\n\n # create the generic individual:\n gi_name = f\"i{name}\"\n gi = new_class(name=gi_name)\n self.individuals.append(gi)\n self.name_mapping[gi_name] = gi\n\n return new_class", "def getOntologyItem(self, resource, oType=0):\n\n if isinstance(resource, int):\n resource = 'ontology/{0}/{1}'.format(resource, oType)\n\n res = self.getRequest(resource)\n onto = vsdModels.Ontology(**res)\n\n return onto", "def __init__(self, db):\n self.mime_types = dict(\n [('html', 'text/html'), ('pretty-xml', 'application/rdf+xml'),\n ('turtle', 'text/turtle'), ('nt', 'text/plain'),\n ('json-ld', 'application/ld+json'),\n ('sparql', 'application/sparql-results+xml'),\n ('sparql-json', 'application/sparql-results+json')])\n self.backend = RDFBackend(db)", "def gen_media_type_object(media_type, api_type, path):\n\n ref = f'{OPENAPI_YAML[api_type]}/{path}'\n\n content = {\n media_type: {\n 'schema': {\n '$ref': ref\n }\n }\n }\n\n return content", "def __init__(self, node, declare, type, prettyType=\"\"):\n self.name = getTag(node, \"name\")\n self.info = getTag(node, \"info\")\n self.comment = comment(node, declare)\n self.type = type\n self.prettyType = prettyType\n if prettyType == \"\":\n self.prettyType = type\n m = hash()\n m.update(self.name)\n m.update(self.info)\n m.update(self.type)\n self.link = \"a\"+m.hexdigest()", "def create_object(self, name, url):\n r = self.request('post', 'registry/objects/', json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n }))\n return self._extract_id_from_batch_response(r, 'oid')", "def fromgenotype(self):\n\t\tpass", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def from_dict(cls, d):\n clsname = d['type']\n\n if clsname == cls.__name__:\n raise Exception('Cannot instantiate abstract class \"Node\"')\n\n clstype = getattr(sys.modules[__name__], clsname)\n return clstype.from_dict(d)", "def create(_type, *args, **kwargs):\n # noinspection PyUnresolvedReferences\n return IExplorer.registry[_type.lower()](*args, **kwargs)", "def GetObjectType(typeString):\n return _gmat_py.GmatBase_GetObjectType(typeString)", "def build(uri):\n return uri if type(uri) is TaxonomyURI else TaxonomyURI.from_str(uri)", "def create(self, name: str, meta: Dict = {}, type: str = \"timeseries\", **kwargs):\n return super()._create(\n {\"name\": name, \"type\": type, \"meta\": meta, **kwargs},\n f=lambda x: registry.getObject(x, self.session),\n )", "def _create_and_load_object_class(self, b):\n gen_mon = _MONSTER.MonsterEnd(b)\n b.Finish(gen_mon)\n monster = _MONSTER.Monster.GetRootAs(b.Bytes, b.Head())\n monsterT = _MONSTER.MonsterT()\n monsterT.InitFromObj(monster)\n return monsterT", "def GetCreateInterface(identifier, namespace, klass=Interface, initfunction=None, **parameters):\n assert(issubclass(klass, ConnectionPoint))\n return pynt.xmlns.GetCreateRDFObject(identifier=identifier, namespace=namespace, klass=klass, \n verifyAttributes=False, initfunction=initfunction, **parameters)", "def testInitialization(self):\n\n self.rdfvalue_class(\"C.00aaeccbb45f33a3\")\n\n # Initialize from another instance.\n sample = self.GenerateSample()\n\n self.CheckRDFValue(self.rdfvalue_class(sample), sample)", "def rdf_loader(gdb, rdf):\n owl = Namespace(\"http://www.w3.org/2002/07/owl#\")\n node_dict = {}\n for j in rdf.subjects(predicate=RDF.type, object=owl.Class):\n for i in rdf.subjects(object=j, predicate=RDF.type):\n idata = i.split('#')[-1]\n if idata in node_dict.keys():\n node_dict[idata].add_label(j)\n print(idata)\n else:\n buf_node = NodeContainer(idata)\n buf_node.add_label(j)\n buf_node.set_uri(i)\n node_dict.update({idata: buf_node})\n print(idata)\n for i in node_dict.keys():\n print(\"%s %s\" % (i, node_dict[i],))\n for i in node_dict.keys():\n rdf_update_labels(rdf, node_dict[i])\n for i in node_dict.keys():\n print(\"%s %s\" % (i, node_dict[i],))\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n for i in node_dict.keys():\n node = node_dict[i]\n gdb_add_node(node, gdb, rdf, owl)\n for i in node_dict.keys():\n node = node_dict[i]\n gdb_add_connection(node, node_dict, rdf, owl)", "def create(self, value):\n\n # Explicitly check with len such that non-sequence types throw.\n if self._optional and (value is None or len(value) == 0):\n return None\n\n if hasattr(self._type, 'resource_type'):\n # Our type is a resource, so ensure we have a dict of title to\n # parameters\n if not isinstance(value, dict):\n raise ValueError(\"Resources must be specified as a dict of \"\n \"title to parameters\")\n if not self._many and len(value) > 1:\n raise ValueError(\"Only one resource can be provided for this \"\n \"TroposphereType variable\")\n\n result = [\n self._type.from_dict(title, v) for title, v in value.items()\n ]\n else:\n # Our type is for properties, not a resource, so don't use\n # titles\n if self._many:\n result = [self._type.from_dict(None, v) for v in value]\n elif not isinstance(value, dict):\n raise ValueError(\"TroposphereType for a single non-resource\"\n \"type must be specified as a dict of \"\n \"parameters\")\n else:\n result = [self._type.from_dict(None, value)]\n\n if self._validate:\n for v in result:\n v._validate_props()\n\n return result[0] if not self._many else result", "def create_node_instance(self, node_type=None):\n if node_type in self.aliases:\n node_type = self.aliases[node_type]\n\n _NodeClass = self.__nodes.get(node_type)\n if _NodeClass:\n return _NodeClass()", "def __init__(self, type, path, attributes, objects={}):\n\n self.type = type\n self.path = path\n self.attributes = attributes\n self.objects = objects", "def __init__(self, py_dict=None):\n super(EdgeNATRulesSchema, self).__init__()\n self.set_data_type('xml')\n self.natRule = EdgeNATRuleSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def __create_type(server,idx):\n\n types = Factory.get_list_types()\n\n # node de tipos no opcua\n uatypes = server.get_base_objectType_node()\n \n try:\n \n for t in types:\n\n # cria os tipos de objeto/variaveis no servidor opcua\n ua_type = Factory.create_type(uatypes,idx,t) \n\n logger.info(\"Criado o tipo: {} {}\".format(t,ua_type))\n\n except :\n logger.error(\"Problema ao cria os tipos {}\".format(types))", "def test_uri_field_serialization(value):\n\n class Entity:\n def __init__(self, field):\n self.field = field\n\n schema = fields.Namespace(\"http://schema.org/\")\n\n class OldEntitySchema(JsonLDSchema):\n field = Uri(schema.field, allow_none=True)\n\n class Meta:\n rdf_type = schema.Entity\n model = Entity\n\n entity = Entity(field=value)\n\n data = OldEntitySchema().dump(entity)\n\n if \"@id\" in data:\n del data[\"@id\"]\n\n assert data == {\"@type\": [\"http://schema.org/Entity\"], \"http://schema.org/field\": value}", "def __get_object__(binding):\n if isinstance(binding, rdflib.term.Node):\n return binding\n elif isinstance(binding, collections.Iterable):\n for key, row in binding.items():\n if isinstance(row, (rdflib.URIRef, rdflib.Literal)):\n return row\n elif isinstance(row, dict):\n if row.get('type').startswith('uri'):\n return rdflib.URIRef(row.get('value'))\n return rdflib.Literal(row.get('value'))\n elif isinstance(row, tuple):\n print(row)\n elif isinstance(row, str):\n if row.startswith(\"literal\") or \"xml:lang\" in key:\n continue\n return rdflib.Literal(row)", "def get_resource_by_type(self, graph_db, service_type):\n node = neo_resource.get_node_by_property(graph_db, self.label, 'name', service_type)\n return node", "async def infer_type_make_record(track, cls, *elems):\n elem_shapes = [await x['shape'] for x in elems]\n cls_v = await cls['value']\n return ClassShape(dict(zip(cls_v.attributes.keys(), elem_shapes)))", "def __init__(self):\n PrimaryObject.__init__(self)\n NoteBase.__init__(self)\n AddressBase.__init__(self)\n UrlBase.__init__(self)\n self.type = RepositoryType()\n self.name = \"\"", "def create(context, namespace_name, values, session):\n\n namespace = namespace_api.get(\n context, namespace_name, session)\n\n # if the resource_type does not exist, create it\n resource_type_name = values['name']\n metadef_utils.drop_protected_attrs(\n models.MetadefNamespaceResourceType, values)\n try:\n resource_type = resource_type_api.get(\n context, resource_type_name, session)\n except exc.NotFound:\n resource_type = None\n LOG.debug(\"Creating resource-type %s\", resource_type_name)\n\n if resource_type is None:\n resource_type_dict = {'name': resource_type_name, 'protected': False}\n resource_type = resource_type_api.create(\n context, resource_type_dict, session)\n\n # Create the association record, set the field values\n ns_resource_type_dict = _to_db_dict(\n namespace['id'], resource_type['id'], values)\n new_rec = _create_association(context, namespace_name, resource_type_name,\n ns_resource_type_dict, session)\n\n return _to_model_dict(resource_type_name, new_rec)", "def _new_entity(client, entity_type):\n\n return datastore.Entity(_load_key(client, entity_type))", "def __init__(self, name):\n self.type_cls = None\n\n self.name = name\n self.description = None\n self.updated = None\n self.notes = None\n self.properties = {}", "def object_via_gen_from(self, fit: af.Fit, galaxies: List[Galaxy]) -> object:", "def recordtype_create_values(\n coll_id=\"testcoll\", type_id=\"testtype\", update=\"RecordType\",\n type_uri=None, supertype_uris=None\n ):\n d = (\n { 'annal:type': \"annal:Type\"\n , 'rdfs:label': \"%s %s/%s/%s\"%(update, coll_id, \"_type\", type_id)\n , 'rdfs:comment': '%s coll %s, type %s, entity %s'%(update, coll_id, \"_type\", type_id)\n , 'annal:type_view': \"_view/Default_view\"\n , 'annal:type_list': \"_list/Default_list\"\n })\n if type_uri:\n d['annal:uri'] = type_uri\n if supertype_uris is not None:\n d['annal:supertype_uri'] = (\n [ { '@id': st } for st in supertype_uris ]\n )\n else:\n d['annal:supertype_uri'] = (\n [ { '@id': type_uri+\"/super1\" }\n , { '@id': type_uri+\"/super2\" }\n ])\n return d", "def from_jsonld(data, format):\n # As of this writing, 'application/nquads' is the only RDF format\n # (other than JSON-LD) supported by pyld. Convert via that.\n quads = jsonld.to_rdf(data, { 'format': 'application/nquads' })\n # Using ConjunctiveGraph instead of Graph for nquads support.\n graph = rdflib.ConjunctiveGraph()\n graph.parse(data=quads, format='nquads')\n return graph.serialize(format=format)", "def test_create_obj_by_type_from_dict(self):\n test_obj = {}\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIsInstance(returned_obj, self.tested_class)", "def build_from_string(self, obj):\n if self.string_type is unicode and not isinstance(obj, unicode):\n obj = str(obj).decode('utf-8')\n if self.string_type is str and not isinstance(obj, str):\n obj = unicode(obj).encode('utf-8')\n return self.art_type(obj.splitlines())", "def new_object(self):\r\n\t\tpass", "def __init__(self, *args):\n this = _libsbml.new_ListOfSpeciesTypes(*args)\n try: self.this.append(this)\n except: self.this = this", "def fl_make_object(flobjclass, otype, xpos, ypos, width, height, label,\n pyfn_HandlePtr):\n #FL_HANDLEPTR = cty.CFUNCTYPE(cty.c_int, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, cty.c_int, cty.c_void_p)\n _fl_make_object = library.cfuncproto(\n library.load_so_libforms(), \"fl_make_object\",\\\n cty.POINTER(xfdata.FL_OBJECT), [cty.c_int, cty.c_int, xfdata.FL_Coord,\n xfdata.FL_Coord, xfdata.FL_Coord, xfdata.FL_Coord, xfdata.STRING,\n xfdata.FL_HANDLEPTR],\n \"\"\"FL_OBJECT * fl_make_object(int objclass, int type, FL_Coord x,\n FL_Coord y, FL_Coord w, FL_Coord h, const char * label,\n FL_HANDLEPTR handle)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(flobjclass, \\\n xfdata.OBJCLASS_list)\n i_flobjclass = library.convert_to_intc(flobjclass)\n i_otype = library.convert_to_intc(otype)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n s_label = library.convert_to_bytestrc(label)\n library.verify_function_type(pyfn_HandlePtr)\n cfn_HandlePtr = xfdata.FL_HANDLEPTR(pyfn_HandlePtr)\n library.keep_cfunc_refs(cfn_HandlePtr, pyfn_HandlePtr)\n library.keep_elem_refs(flobjclass, otype, xpos, ypos, width, \\\n height, label, i_flobjclass, i_otype, i_xpos, i_ypos, \\\n i_width, i_height, s_label)\n retval = _fl_make_object(i_flobjclass, i_otype, i_xpos, i_ypos, \\\n i_width, i_height, s_label, cfn_HandlePtr)\n return retval", "def to_object(self, class_name=None, **qwargs):\n if \"TYPE\" not in self.list_nodes() and class_name is None:\n raise ValueError(\"Objects can be only recovered from hdf5 if TYPE is given\")\n elif class_name is not None and class_name != self.get(\"TYPE\"):\n raise ValueError(\n \"Object type in hdf5-file must be identical to input parameter\"\n )\n class_name = class_name or self.get(\"TYPE\")\n class_path = class_name.split(\"<class '\")[-1].split(\"'>\")[0]\n class_convert_dict = { # Fix backwards compatibility\n \"pyiron_base.generic.datacontainer.DataContainer\": \"pyiron_base.storage.datacontainer.DataContainer\",\n \"pyiron_base.generic.inputlist.InputList\": \"pyiron_base.storage.inputlist.InputList\",\n \"pyiron_base.generic.flattenedstorage.FlattenedStorage\": \"pyiron_base.storage.flattenedstorage.FlattenedStorage\",\n }\n if class_path in class_convert_dict.keys():\n class_name_new = \"<class '\" + class_convert_dict[class_path] + \"'>\"\n class_object = self.import_class(class_name_new)\n elif not class_path.startswith(\"abc.\"):\n class_object = self.import_class(class_name)\n else:\n class_object = class_constructor(cp=JOB_DYN_DICT[class_path.split(\".\")[-1]])\n\n # Backwards compatibility since the format of TYPE changed\n if class_name != str(class_object):\n self[\"TYPE\"] = str(class_object)\n\n obj = self.create_instance(class_object, **qwargs)\n obj.from_hdf(hdf=self.open(\"..\"), group_name=self.h5_path.split(\"/\")[-1])\n return obj", "def type(cls):" ]
[ "0.6613706", "0.62533885", "0.5993754", "0.5973116", "0.59641635", "0.59002733", "0.58277136", "0.58260673", "0.5804339", "0.579951", "0.57783437", "0.5778184", "0.5754997", "0.56360924", "0.56325865", "0.56170595", "0.5612792", "0.55933315", "0.55877227", "0.5579284", "0.5577288", "0.55766827", "0.5568407", "0.55432236", "0.553281", "0.5531453", "0.5508754", "0.54830295", "0.5473498", "0.5460577", "0.5457232", "0.5454432", "0.54454714", "0.5431796", "0.5428504", "0.5420636", "0.5387686", "0.53868526", "0.5385183", "0.5385014", "0.53641605", "0.5353077", "0.5348775", "0.53458", "0.5312833", "0.5303609", "0.5272286", "0.5271934", "0.52674115", "0.52659947", "0.5264738", "0.52541894", "0.525056", "0.52354", "0.5231467", "0.52304816", "0.52273905", "0.52261037", "0.52253824", "0.52243567", "0.52069753", "0.52056026", "0.5205103", "0.5197669", "0.51946914", "0.5184753", "0.51707333", "0.5170651", "0.5165532", "0.5158231", "0.5155099", "0.51526654", "0.5145293", "0.5130984", "0.51256037", "0.5116391", "0.5116346", "0.51119745", "0.51109326", "0.51088405", "0.51049423", "0.5090146", "0.50868857", "0.50805783", "0.5076475", "0.50731575", "0.50679314", "0.5056119", "0.50500673", "0.50497556", "0.5043382", "0.50381964", "0.5037215", "0.50337267", "0.5030369", "0.50287765", "0.5020254", "0.501817", "0.50138485", "0.50129986" ]
0.5583555
19
Load dataset from csv file
def load_simulator_data(self, csvfname): data = [] with open(csvfname, 'r') as csvfile: data_tmp = list(csv.reader(csvfile, delimiter=',')) for row in data_tmp: x7 = [float(x) for x in row[7].split(':')] x8 = [float(x) for x in row[8].split(':')] data.append(((row[0], row[1], row[2]), np.array([float(row[3]), float(row[4]), float(row[5]), float(row[6])] + x7 + x8))) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def loadCSV(input_file):", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def load_dataset_csv(filename, path=DATASETS):\n fpath = qualify_full_filepath(f\"{filename}.csv\", path)\n with open(fpath, \"r\", newline=\"\\n\") as infile:\n data = [row for row in csv.DictReader(infile)]\n return data", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def read_csv():", "def load_dataset(csv_path, label_col='y', add_intercept=False):\n\n def add_intercept_fn(x):\n global add_intercept\n return add_intercept(x)\n\n # Validate label_col argument\n allowed_label_cols = ('y', 't')\n if label_col not in allowed_label_cols:\n raise ValueError('Invalid label_col: {} (expected {})'\n .format(label_col, allowed_label_cols))\n\n # Load headers\n with open(csv_path, 'r') as csv_fh:\n headers = csv_fh.readline().strip().split(',')\n\n # Load features and labels\n x_cols = [i for i in range(len(headers)) if headers[i].startswith('x')]\n l_cols = [i for i in range(len(headers)) if headers[i] == label_col]\n inputs = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=x_cols)\n labels = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=l_cols)\n\n if inputs.ndim == 1:\n inputs = np.expand_dims(inputs, -1)\n\n if add_intercept:\n inputs = add_intercept_fn(inputs)\n\n return inputs, labels", "def load_dataset(filename):\n pickle_name = filename + \".pickle\"\n try:\n print(\"trying to load \" + filename + \" from pickle\")\n dataset = pickle.load(open(pickle_name, \"rb\"))\n except:\n with open(filename, 'r') as csv_file:\n print(\"no pickle exists. parsing file \" + filename)\n dataset = [DataPoint(item[1:], item[0])\n for item\n in csv.reader(csv_file, delimiter=',')]\n pickle.dump(dataset, open(pickle_name, \"wb\"))\n print(\"loaded \" + filename)\n return dataset", "def from_csv(cls, name, csv, **kwargs):\r\n data = pd.read_csv(csv, **kwargs)\r\n return Dataset(name, data, **kwargs)", "def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def get_data(self, csv_file):\n pass", "def load_dataset_from(csv_file: str) -> pd.DataFrame:\n\n print(\">>> LOADING DATASET FROM FILE {filename}\".format(filename=csv_file))\n if not csv_file.endswith(\".csv\"):\n print(\"File has to be CSV type file!\")\n exit(1)\n\n try:\n data = pd.read_csv(csv_file)\n print(\">>> Finished loading data!\")\n return data\n except FileNotFoundError:\n print(\"File couldn't be found. Verify if '{f_path}' is a correct file path!\".format(f_path=csv_file))\n exit(1)", "def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels", "def _load_csv(self, file_path):\n csv_ds = tf.data.experimental.CsvDataset(\n file_path, self._default_csv_values,\n header=True,\n field_delim=CsvFilesDataset.CSV_SEPARATOR,\n use_quote_delim=False,\n select_cols=self._feature_column_indices\n )\n\n # Map to dictionary with column names\n if self.debug_columns:\n csv_ds = csv_ds.enumerate()\n csv_ds = csv_ds.map(lambda *row: self._map_csv_row_to_dict_with_debug(file_path, row))\n else:\n csv_ds = csv_ds.map(\n lambda *row: { feature_column_name: csv_column_values for feature_column_name, csv_column_values in zip(self._feature_column_names, row) }\n )\n\n # Get CSV file sequences\n csv_ds = self._map_csv_file_to_sequences(csv_ds, file_path)\n\n # Remove train column (avoid keras warning about unused inputs)\n if self._data_definition.trainable_column:\n csv_ds = csv_ds.map(self.remove_trainable_column)\n\n return csv_ds", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def load_csv(filename):\r\n dataset = list()\r\n with open(filename, 'r') as file:\r\n csv_reader = reader(file, delimiter='\\t')\r\n for row in csv_reader:\r\n if not row:\r\n continue\r\n dataset.append([float(i) for i in row])\r\n return dataset", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def csv_data_loader(path):\n\n logging.info(\"Loading file using SparkSession\")\n csvload = Spark.instance.spark() \\\n .read \\\n .format(\"csv\") \\\n .options(header=True) \\\n .options(mode=\"DROPMALFORMED\")\n\n return csvload.option(\"inferSchema\", \"true\").load(path)", "def loader(filename,sep=',',rowskip=[], colskip=[], axis=1,names=1,fromstring=0):\n\n #manages excpetions to the csv file incase of missing data\n if (type(filename)==str) and (fromstring==1):\n iterable=filename.strip('\\n').split('\\n')\n content=np.array([i for i in csv.reader(iterable,delimiter=sep)])\n elif type(filename)==np.ndarray:\n content=filename\n else:\n content=np.array([i for i in\\\n csv.reader(open(filename,'r'),delimiter=sep)])\n #content=np.genfromtxt(filename,delimiter=sep,dtype=str)\n\n if rowskip:\n #rowskip.sort(reverse=True)\n content=np.delete(content,rowskip,0)\n #for i in rowskip: content.pop(i)\n\n if colskip:\n #colskip.sort(reverse=True)\n content=np.delete(content,colskip,1)\n #for i in colskip: content.pop(i)\n\n if axis==0: # if the file oriented column-wise\n #content=list(map(list,zip(*content)))\n content=content.T\n\n\n\n if names is 0:\n variables=np.arange(content.shape[1]).tolist()\n offset=0\n else:\n variables=content[0].tolist()\n offset=1\n\n try:\n content=np.array([conv_col(col) for col in\n content[offset:].T],dtype='object')\n arity=np.array([np.unique(i).size for i in content])\n return dataset(variables,content.T,arity)\n except ValueError: \n print( 'Data could not be loaded, failed converting to float.')\n return content", "def load_csv_dataset(path,\n label_col,\n data_dir=None,\n smi_col='smiles',\n explicit_H=False,\n use_chirality=False,\n use_molecular_attributes=False,\n all_pair_features=False,\n graph_distance=True):\n df = pd.read_csv(path)\n smiles = np.array(df[smi_col])\n labels = np.array(df[label_col])\n if data_dir is None:\n data_dir = tempfile.mkdtemp()\n dataset = MolDataset(root=data_dir)\n batch_graphs = []\n for i, (smi, l) in enumerate(zip(smiles, labels)):\n if i > 0 and i % 1000 == 0:\n print(\"Featurized %d molecules\" % i)\n dataset.add_graph_batch(batch_graphs)\n batch_graphs = []\n g = mol_to_graph(Chem.MolFromSmiles(smi),\n explicit_H=explicit_H,\n use_chirality=use_chirality,\n use_molecular_attributes=use_molecular_attributes,\n all_pair_features=all_pair_features,\n graph_distance=graph_distance)\n g.smi = smi\n w = (l==l) * 1\n y = copy.deepcopy(l)\n y[np.where(y != y)] = 0.\n g.y = t.from_numpy(y).long()\n g.w = t.from_numpy(w).float()\n batch_graphs.append(g)\n dataset.add_graph_batch(batch_graphs)\n return dataset", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def load_testset(filename):\n pickle_name = filename + \".pickle\"\n try:\n print(\"trying to load \" + filename + \" from pickle\")\n dataset = pickle.load(open(pickle_name, \"rb\"))\n except:\n with open(filename, 'r') as csv_file:\n print(\"no pickle exists. parsing file \" + filename)\n dataset = [DataPoint(item[0:], \" \")\n for item\n in csv.reader(csv_file, delimiter=',')]\n pickle.dump(dataset, open(pickle_name, \"wb\"))\n print(\"loaded \" + filename)\n return dataset", "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def read_Dataset(dataset_Path):\n dataset = pd.read_csv(dataset_Path)\n return dataset", "def load_data(file_path):\n data = pandas.read_csv(file_path)\n\n return data", "def read_csv_file(self):\n pass", "def __load_csv_into_mem(label, exp, obj, norms):\n filename = obj.get('file')\n # def csv_loader\n label_pos = obj.get('label', 'first')\n if label_pos == 'first':\n label_first = True\n else:\n label_first = False\n\n labels = []\n\n def get_element_from_csv():\n def callback(dim, lbl):\n CSVDataset.__load_csv_into_mem.dimension = dim - 1\n for l in lbl:\n labels.append(l)\n\n with open(filename, 'r') as f:\n for i in CSVDataset.__get_element_from_file__(csv.reader(f), label_first, norms, callback):\n yield i\n\n input_data = np.fromiter(get_element_from_csv(), dtype=np.float32)\n dimension = CSVDataset.__load_csv_into_mem.dimension\n input_data = input_data.reshape((-1, dimension))\n # print input_data[0]\n labels = np.asarray(labels, 'int32')\n kwargs = {}\n if 'batches' not in kwargs:\n b = getattr(exp.args, '%s_batches' % label, None)\n kwargs['batches'] = b\n if 'size' not in kwargs:\n kwargs['size'] = exp.args.batch_size\n kwargs['label'] = label\n return SequenceDataset(*(input_data, labels), **kwargs)", "def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col", "def get_dataset(filepath):\n return pandas.read_csv(filepath, header='infer')", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def read_csv(self) -> None:\n\n self._df = pd.read_csv(self._dataset_file)", "def load_dataset(csv_path: str, image_root: str):\n\n image_bases = get_image_bases(image_root)\n\n logging.info(f'Loading {len(image_bases)} images...')\n X = []\n for image_base in tqdm(image_bases):\n \"\"\" Load the image as a numpy array. Then, reshape it to be a vector\n of dimension equal to the total number of pixels, while converting it\n to grayscale by taking the average the RGB values. Index slicing is\n used to remove the 4th pixel value (alpha) if needed. \"\"\"\n im = np.array(Image.open(image_root + image_base))[:,:,:3]\n im = np.average(im.reshape(im.shape[0]**2, 3), axis=1)\n X.append(im)\n\n y = [int(x) for x in np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=[-1])]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n return X_train, X_test, y_train, y_test", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def load_csv_file(file_name):\n return pandas.read_csv(path_dataset + file_name)", "def load_data(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def load_dataset(file_name:str, delimiter:str):\n dataset = pd.read_csv(file_name, delimiter)\n\n return dataset", "def load_dataset_test():\n df_test = load_csv_file(\"31_test.csv\")\n return df_test.values", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def from_csv(self, filename):\n\t\tpoints = np.genfromtxt(filename, delimiter=\",\")\n\t\tassert points.shape[1] == 2\n\n\t\tself.N = points.shape[0]\n\t\tself.points = points\n\t\tself.original_points = points", "def __load_csv(filename):\n fp = open(Parser.DATA_FOLDER_PATH + filename + '.csv', 'r')\n records = []\n for line in fp:\n items = line.strip().split(',')\n x, y, z = '0', '0', '0'\n if len(items) > 1:\n x = items[1]\n if len(items) > 2:\n y = items[2]\n if len(items) > 3:\n z = items[3]\n\n values = [x, y, z]\n records.append(values)\n\n # Discard some beginning data which may be noisy\n # del records[:int(len(records) / 30)]\n n = len(records)\n\n for i in range(n):\n rec = []\n # Consider X, Y, Z axes\n for k in range(3):\n # If can convert string to float\n try:\n val = float(records[i][k])\n except ValueError:\n val = 0\n rec.append(val)\n\n # Replace it\n records[i] = rec\n return records", "def loadData(path):\n try:\n return pd.read_csv(path)\n except Exception as e:\n raise Exception(\"Could not read df, possbily incorrect path: {}\".format(e))", "def loadData(path_file):\n data = pd.read_csv(path_file) \n data.head()\n return data", "def _load_csv_data(kingdom_csv_path: str):\n\n file_path = os.getcwd() + \"/\" + RESOURCES_DIR_PATH + \"/\" + kingdom_csv_path\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_data(filepath):\n\n file_path_casted = Path(filepath)\n if not file_path_casted.exists():\n raise FileNotFoundError(\"File does not exist.\")\n\n data = pd.read_csv(filepath, delimiter=\",\")\n\n return data", "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def mock_data_loader(csv_path):\n file_path = KINGDOM_CSV_PATH\n\n kingdomArr = []\n\n with open(file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n for row in reader:\n kingdomArr.append(Kingdom(row[0], row[1]))\n\n return kingdomArr", "def load_data(path):\n try:\n data = pd.read_csv(path, sep='\\t')\n except FileNotFoundError:\n logger.exception(\"Traceback of data file '{}' not found.\".format(path))\n else:\n return data", "def load_csv_data(data_path):\n print(\"LOADING CSV FILE FROM {}\".format(data_path))\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=[1])\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n return yb, input_data, ids", "def load_file(csv_file):\n vehicles = []\n # Opens the file and reads it row for row\n with open(csv_file, 'rb') as csv_open_file:\n reader = csv.reader(csv_open_file)\n for row in reader:\n # Reads in vehicles\n if len(row) != 1:\n name, x, y, orientation, length = row\n vehicles.append(Vehicle(name, int(x), int(y), orientation, int(length)))\n # Read size of the grid\n else:\n n = int(row[0])\n return Grid(set(vehicles), n)", "def load(csvfile):\n return PsychoPyCSV(csvfile)", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def load_csv(fname = data_indoor):\n \n reader = csv.reader(open(fname, 'r'))\n \n # Blank list\n data = []\n \n # Don't read the zeroth element of each row (image name), convert to float.\n for row in reader:\n data.append(map(float, row[1:]))\n \n # Convert list to array \n d = np.array(data)\n \n # Seperate labels from features\n Y = d[:,0]\n X = d[:,1:]\n \n return X,Y", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def read_csv(self, file_name, bin_size=1):\r\n # read training data from csv file and store into dataframe in panda\r\n self.classification_training_data = pd.read_csv(file_name, dtype={\r\n \"Age\": float,\r\n \"Ht\": float,\r\n \"TailLn\": float,\r\n \"HairLn\": float,\r\n \"BangLn\": float,\r\n \"Reach\": float,\r\n \"EarLobes\": float,\r\n \"Class\": str\r\n })\r\n self.normalize_data()\r\n return self.classification_training_data", "def _load_data(filename):\n\n def str2date(s):\n \"\"\"Converts a string to a datetime\"\"\"\n return datetime.strptime(s.decode(), \"%Y-%m-%d %H:%M:%S\")\n\n # Load the data\n return np.recfromcsv(filename, converters={0: str2date}, comments=\"#\")", "def load_data(self):\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def load(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tif os.path.isfile(csvPath):\n\t\t\tprint('todo: load from', csvPath)\n\t\t\tself.dfAnalysis = pd.read_csv(csvPath, header=0)\n\t\t\tself.updateAnalysisPlot()\n\t\telse:\n\t\t\tprint('did not find saved file csvPath:', csvPath)", "def _read_data_file(self, path_model_id):\n\n path_dataset_file = path_model_id.joinpath('training_set.csv')\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n self.example_count = len(rows)\n\n img_files = [path.join(f'label_{row[1]}', row[0]) for row in rows]\n enc_labels = self.class_le.fit_transform([row[1] for row in rows])\n \n self.files_labels = [[img_files[i], enc_labels[i]]\n for i in range(self.example_count)]", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def readData(dataset=\"./p2_v1.csv\"):\n data = pd.read_csv(dataset)\n\n return data", "def _init_train_valid(self, csv_path, csv_sep, csv_names):\n # load groundtruth\n # last element following a dot is file's extension\n print('Loading data...') \n if csv_path.split('.')[-1] == 'cache':\n # load cache\n # Assumes that the cache contains a list of all the identities, a dictionary containing metadata about those identities and the number of samples contained in the cache.\n # The dictionary must have the same format as the 'groundtruth_metadata' dictionary that is built below.\n # dati che mi servono: identities, groundtruth_metadata, num_samples\n with open(csv_path, 'rb') as cache_file:\n cache = pickle.load(cache_file)\n self.identities = cache['identities']\n self.groundtruth_metadata = cache['groundtruth_metadata']\n self.num_samples = cache['num_samples']\n else:\n # Assumes for the provided csv the following structure:\n # Path, ID, Gender, Age, x_min(roi_origin_x), y_min(roi_origin_y), width(roi_width), height(roi_height)\n groundtruth = pd.read_csv(csv_path, sep=csv_sep, names=csv_names)\n # for each groundtruth row\n for gt_sample in groundtruth.iterrows():\n identity = gt_sample[1][\"ID\"]\n # this iteration is over all of the elements in groundtruth, so the same id can be encountered multiple times (same id associated to multiple images)\n if identity not in self.identities:\n self.identities.append(identity)\n # load identity's metadata\n id_data = {\n 'age': gt_sample[1][\"Age\"],\n 'roi': {\n 'upper_left_x': gt_sample[1][\"x_min\"],\n 'upper_left_y': gt_sample[1][\"y_min\"],\n 'width': gt_sample[1][\"width\"],\n 'height': gt_sample[1][\"height\"]\n },\n 'path': gt_sample[1][\"Path\"]\n }\n if identity not in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity] = {\n 'index': 0,\n 'metadata': []\n }\n # the other elements in the list associated to an identity are metadata \n self.groundtruth_metadata[identity]['metadata'].append(id_data)\n self.num_samples += 1\n # Dump loaded data to cache\n # Split csv path in directory path and filename\n (csv_dir, csv_name) = os.path.split(csv_path)\n # Create a name for cache file with the same name as csv file but different extension\n cache_name = csv_name.split('.')[0]+'.cache'\n # Create a path pointing to the new cache file, locating it in the same directory as the csv file\n cache_path = os.path.join(csv_dir, cache_name)\n # Write relevant data to file\n with open(cache_path, 'wb') as cache_out_file:\n out_dict = {}\n out_dict['identities'] = self.identities\n out_dict['groundtruth_metadata'] = self.groundtruth_metadata\n out_dict['num_samples'] = self.num_samples\n pickle.dump(out_dict, cache_out_file) \n print('Finished loading data!')\n if self.mode == 'training':\n self._shuffle()", "def load_simple_csv(filename, target_col = -1):\n #target_names = []\n #target = []\n #features = []\n n_samples = -1\n with open(filename) as csv_file:\n for line in csv_file:\n n_samples += 1\n\n with open(filename) as csv_file:\n data_file = csv.reader(csv_file)\n data_names = np.array(next(data_file))\n #print target_names.shape\n feature_names = np.delete(data_names,target_col) # 1 target , other cols are all features\n n_features = feature_names.shape[0]\n\n target = np.empty((n_samples,), dtype = np.dtype(float))\n features = np.empty((n_samples, n_features))\n type_list = [ (label, np.dtype(t)) for label,t in dtype_dict.items() ]\n type_list.pop(target_col)\n dt = np.dtype(type_list)\n # print len(dt)\n for i, item in enumerate(data_file):\n # print item,len(item)\n t = item.pop(target_col)\n target[i] = np.asarray(t, dtype = np.float64)\n features[i] = np.asarray(item, dtype = dt)\n\n return Bunch(data=features, target=target,\n target_names=None, # precit problem\n DESCR=None,\n feature_names=feature_names)", "def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df", "def test_load_with_csv(self):\n\n corpus = Corpus(\n common.TEST_CORPUS_PATH,\n csv_path=common.LARGE_TEST_CORPUS_CSV,\n name='test_corpus',\n )\n assert len(corpus) == 99\n assert isinstance(corpus.documents, list)\n assert corpus.name == 'test_corpus'", "def load(self):\n data = pandas.read_csv(self.path, names=self.names)\n return data", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def load_data(fl=\"data.csv\"):\n data = np.loadtxt(fl, delimiter=\",\")\n y1 = data[:, 0]\n y2 = data[:, 1]\n return y1, y2", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def restore_profile_from_csv(csv_file):\n return np.loadtxt(csv_file, delimiter=\",\", skiprows=1, usecols=range(1, 21))", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def read_dataset(self, file_path):\n try:\n with open(file_path, encoding=\"utf-8\") as file:\n fieldnames = ['hit_id', 'sentence', 'start_offset', 'end_offset', 'target_word', 'native_annots',\n 'nonnative_annots', 'native_complex', 'nonnative_complex', 'gold_label', 'gold_prob']\n\n dataset = pd.read_csv(file, names=fieldnames, sep=\"\\t\")\n\n except FileNotFoundError:\n print(\"File {} not found.\".format(file_path))\n dataset = None\n\n return dataset", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def __init__(self, csv_path):\r\n # Transforms\r\n self.to_tensor = transforms.ToTensor()\r\n # Read the csv file\r\n self.data_info = pd.read_csv(csv_path, header=None)\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[:, 0])\r\n # Second column is the labels\r\n self.label_arr = [np.asarray(self.data_info.iloc[:, 1])]\r\n # Third column is for an operation indicator\r\n #self.operation_arr = np.asarray(self.data_info.iloc[:, 2])\r\n # Calculate len\r\n self.data_len = len(self.data_info.index)", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def read_data_from_csv(filename):\n df = pd.read_csv(filename)\n return df", "def from_csv(cls, load_folder: Path) -> \"Parameters\":\n serializer = serializer_factory(fmt=SerializerEnum.CSV)\n return serializer.load(class_obj=cls, folder_path=load_folder)" ]
[ "0.7754483", "0.77290475", "0.7686873", "0.76696587", "0.75522053", "0.74628526", "0.7399509", "0.7380369", "0.7370351", "0.7362085", "0.72771174", "0.7227899", "0.72193736", "0.7215024", "0.72012967", "0.7180689", "0.71734875", "0.7173242", "0.7164557", "0.7121014", "0.7113908", "0.71125555", "0.709943", "0.70852625", "0.7078197", "0.70772326", "0.7037246", "0.7017191", "0.7009561", "0.700479", "0.70043206", "0.697084", "0.6966462", "0.6961327", "0.6959522", "0.6959106", "0.69482183", "0.6938188", "0.6925245", "0.69117415", "0.6907729", "0.6892198", "0.6852023", "0.6834558", "0.6827612", "0.6820547", "0.68186843", "0.67901844", "0.6785411", "0.6770582", "0.67605954", "0.67597634", "0.6759258", "0.67590374", "0.6748175", "0.67407894", "0.67004067", "0.6697276", "0.6696601", "0.6694789", "0.66864794", "0.6681498", "0.667418", "0.6667408", "0.66667676", "0.6666693", "0.6666091", "0.6661936", "0.6656573", "0.6656573", "0.66505975", "0.6643731", "0.6639993", "0.6635022", "0.66300786", "0.6628477", "0.66273993", "0.66104835", "0.6610288", "0.6609476", "0.66066486", "0.6606623", "0.6605665", "0.6600264", "0.65997744", "0.65934306", "0.65925646", "0.65893203", "0.65879124", "0.65869975", "0.6580394", "0.6580184", "0.6577949", "0.65713865", "0.65707105", "0.65692663", "0.6562418", "0.6560749", "0.6560521", "0.6557371", "0.655625" ]
0.0
-1
Gets info about all datasets. Returns str with MarkDown syntax
def get_info() -> str: req = Request(URL + '/info') context = ssl._create_unverified_context() with urlopen(req, context=context) as response: return response.read().decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_datasets():\n return METADATA.keys()", "def info(self, datasets=(), tasks=()):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Dataset')\n if any(datasets) or any(tasks):\n data = self._get_select_tasks_and_datasets_data(datasets, tasks)\n else:\n data = self._get_dataset_data()\n pp.pprint(data)\n print('')", "def show_available_datasets(params: DownloadCommandParameters):\n print(f\"\\nDatasets available in '{params.metadata_file}':\\n\")\n datasets = pd.read_csv(params.metadata_file)[\"dataset\"]\n items = datasets.value_counts()\n print(pd.DataFrame({\"Datasets\": items.index,\n \"Instances\": items.values}))", "def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]", "def datasets(self):\n pass", "def get_datasets():\n data = request.get_json()\n return jsonify(result=Tree.datasets(data['field']))", "def all_datasets():\n query = db.session.query(Dataset)\n result = [dict(name=q.name, description=q.description, id=q.id) for q in query]\n return result", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def list_dataset(self, dataset_id=None):\n url = self.prism_endpoint + \"/datasets\"\n\n if dataset_id is not None:\n url = url + \"/\" + dataset_id\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your datasets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def get_datasets() -> List[Dataset]:\n\n amzn = Dataset(\n id='amzn', name='Amazon Reviews', language='en',\n description=\"This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\")\n\n cnn = Dataset(\n id='cnn_dailymail', name='CNN/ DailyMail', language='en',\n description='The well-known CNN/ DailyMail data set for text summarization (version 3.0.0). The data has been fetched via HuggingFace Datasets')\n\n swisstext = Dataset(\n id='swisstext', name='SwissText 2019', language='de',\n description='The dataset was published for the SwissText conference 2019. ')\n\n return [amzn, cnn, swisstext]", "def dataset_list(self):\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['get_data_set_list'])\n\n results = []\n\n try:\n # Keep only the necessary fields from the request\n for content in response.json:\n results.append({'name': content['name'], 'description': content['description']})\n except IndexError:\n # Return emtpy results if parsing error\n pass\n return results", "def list_datasets(project=None):\n bigquery_client = bigquery.Client(project=project)\n\n for dataset in bigquery_client.list_datasets():\n print(dataset.name)", "def describe_dataset(self, dataset_id=None):\n url = self.prism_endpoint + \"/datasets\"\n\n if dataset_id is not None:\n url = url + \"/\" + dataset_id + \"/describe\"\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your datasets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def get_dataset_details(name, analyst):\n\n template = None\n allowed_sources = user_sources(analyst)\n dataset_object = Dataset.objects(name = name,\n source__name__in=allowed_sources).first()\n if not dataset_object:\n error = (\"Either no data exists for this dataset\"\n \" or you do not have permission to view it.\")\n template = \"error.html\"\n args = {'error': error}\n return template, args\n\n dataset_object.sanitize_sources(username=\"%s\" % analyst,\n sources=allowed_sources)\n\n # remove pending notifications for user\n remove_user_from_notification(\"%s\" % analyst, dataset_object.id, 'Dataset')\n\n # subscription\n subscription = {\n 'type': 'Dataset',\n 'id': dataset_object.id,\n 'subscribed': is_user_subscribed(\"%s\" % analyst,\n 'Dataset',\n dataset_object.id),\n }\n\n #objects\n objects = dataset_object.sort_objects()\n\n #relationships\n relationships = dataset_object.sort_relationships(\"%s\" % analyst, meta=True)\n\n # relationship\n relationship = {\n 'type': 'Datset',\n 'value': dataset_object.id\n }\n\n #comments\n comments = {'comments': dataset_object.get_comments(),\n 'url_key':dataset_object.name}\n\n # favorites\n favorite = is_user_favorite(\"%s\" % analyst, 'Dataset', dataset_object.id)\n\n # services\n service_list = get_supported_services('Dataset')\n\n # analysis results\n service_results = dataset_object.get_analysis_results()\n\n args = {'dataset': dataset_object,\n 'objects': objects,\n 'relationships': relationships,\n 'comments': comments,\n 'favorite': favorite,\n 'relationship': relationship,\n 'subscription': subscription,\n 'name': dataset_object.name,\n 'service_list': service_list,\n 'service_results': service_results}\n\n return template, args", "def datasets(self):\n return [Dataset.ENSEMBL]", "def summary(self):\n\t\tprint \"Summary--------------------------------------:\"\n\t\tprint \"Available data sources are:\"\n\t\tfor path in self.available_databases:\n\t\t\tprint path", "def _datasets(self):\n return self._flat_data._datasets", "def list_datasets(self):\n if self.list_type == \"base\":\n ds = Dataset(f\"{self.pool}/iocage/releases\").get_dependents()\n elif self.list_type == \"template\":\n ds = Dataset(\n f\"{self.pool}/iocage/templates\").get_dependents()\n else:\n ds = Dataset(f\"{self.pool}/iocage/jails\").get_dependents()\n\n ds = list(ds)\n\n if self.list_type in ('all', 'basejail', 'template'):\n if self.quick:\n _all = self.list_all_quick(ds)\n else:\n _all = self.list_all(ds)\n\n return _all\n elif self.list_type == \"uuid\":\n jails = {}\n\n for jail in ds:\n uuid = jail.name.rsplit(\"/\", 1)[-1]\n try:\n jails[uuid] = jail.properties[\"mountpoint\"]\n except KeyError:\n iocage_lib.ioc_common.logit(\n {\n 'level': 'ERROR',\n 'message': f'{jail.name} mountpoint is '\n 'misconfigured. Please correct this.'\n },\n _callback=self.callback,\n silent=self.silent\n )\n\n template_datasets = Dataset(\n f'{self.pool}/iocage/templates').get_dependents()\n\n for template in template_datasets:\n uuid = template.name.rsplit(\"/\", 1)[-1]\n jails[uuid] = template.properties['mountpoint']\n\n return jails\n elif self.list_type == \"base\":\n bases = self.list_bases(ds)\n\n return bases", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'id': tfds.features.Text(),\n 'program': tfds.features.Text(),\n 'date': tfds.features.Text(),\n 'url': tfds.features.Text(),\n 'summary': tfds.features.Text(),\n 'utt': tfds.features.Sequence(tfds.features.Text()),\n 'speaker': tfds.features.Sequence(tfds.features.Text()),\n }),\n supervised_keys=('utt', 'summary'),\n homepage='https://github.com/zcgzcgzcg1/MediaSum',\n citation=_CITATION,\n )", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def info(self, name=None):\n if name is not None:\n self._print_info_of_all_tasks_of_single_dataset(name)\n else:\n self._print_info_of_all_tasks_of_all_datasets()", "def list():\n cmd = 'qri list --format json'\n result, err = shell_exec(cmd)\n if err:\n raise RuntimeError(err)\n datasets = dataset.DatasetList([dataset.Dataset(d) for d in json.loads(result)])\n datasets.sort(key=lambda d: d.human_ref())\n return datasets", "def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def dataset_statistics(dataset):\n print (dataset.describe())", "def api_index():\n db = get_db()\n datasets = db.execute('SELECT * FROM dataset').fetchall()\n \n responses = {}\n columns = ['name', 'description', 'tags', 'dataset_file_size', 'files']\n\n for dataset in datasets:\n response = dict(zip(columns, [dataset[column] for column in columns]))\n response['link'] = url_for('dataset.api_show', dataset_id=dataset['id'])\n responses[dataset['id']] = response\n\n return jsonify(dict(responses))", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def test_list_datasets(self, tmp_path):\n assert qml.data.list_datasets() == {\n \"qspin\": {\"Heisenberg\": {\"closed\": {\"chain\": [\"1x4\"]}}},\n \"qchem\": {\"H2\": {\"6-31G\": [\"0.46\", \"1.0\", \"1.16\"]}},\n }", "def api_show(dataset_id):\n db = get_db()\n dataset = db.execute('SELECT * FROM dataset WHERE id = ?', (dataset_id, )).fetchone()\n\n columns = ['name', 'description', 'tags', 'dataset_file_size', 'files']\n response = dict(zip(columns, [dataset[column] for column in columns]))\n response['graph_path'] = os.path.join('static', 'results', str(dataset_id) + '.png')\n\n return jsonify(response)", "def dataset_names(self) -> List[str]:\n return list(self._datasets.keys())", "def _info(self) -> tfds.core.DatasetInfo:\n features = tfds.features.FeaturesDict({\n \"tokens\":\n tfds.features.Sequence(tfds.features.Text()),\n \"tags\":\n tfds.features.Sequence(\n tfds.features.ClassLabel(names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ])),\n \"langs\":\n tfds.features.Sequence(tfds.features.Text()),\n \"spans\":\n tfds.features.Sequence(tfds.features.Text()),\n })\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=None,\n homepage=\"https://github.com/afshinrahimi/mmner\",\n citation=_CITATION,\n )", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 1)),\n 'bboxes': tfds.features.Sequence({'bbox': tfds.features.BBoxFeature()}),\n 'image_id': tfds.features.Text(),\n 'series_id': tfds.features.Text(),\n 'study_id': tfds.features.Text(),\n 'category': tfds.features.ClassLabel(names=['negative', 'typical', 'atypical', 'indeterminate'])\n }),\n supervised_keys=('image', 'category'),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "def index():\n response = \"\"\n for table in config.TABLE_SCHEMA.keys():\n response = response + disco.examples(table)\n return response", "def all_datasets(self) -> set[str]:\n return set(self.frames.idx.get_dataset_ids())", "def list_all_datasets(client=None):\n datasets = []\n try:\n datasets_list = list(client.list_datasets())\n if datasets_list:\n for dataset in datasets_list:\n datasets.append(dataset.dataset_id)\n except Exception as error:\n print(\n \"Exception occurred at function {}: {}\".format(\"list_all_datasets\", error)\n )\n finally:\n return datasets", "def get_datasets(self): # noqa: N805\n return vars(self)", "def get_datasets_summary(rs):\n\n\tif rs == \"rs1\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"rs2\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"all\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\telse:\n\t\treturn\n\n\ttotal_methylation_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_methylation_cell_each_dataset ]\n\ttotal_methylation_cell_each_dataset = { k: v for d in total_methylation_cell_each_dataset for k, v in d.items() }\n\ttotal_snATAC_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_snATAC_cell_each_dataset ]\n\ttotal_snATAC_cell_each_dataset = { k: v for d in total_snATAC_cell_each_dataset for k, v in d.items() }\n\n\tdataset_cell_counts = []\n\tfor dataset in dataset_list:\n\t\ttry:\n\t\t\tnum_snATAC_cells = total_snATAC_cell_each_dataset[dataset['dataset']]\n\t\texcept KeyError as e:\n\t\t\tnum_snATAC_cells = 0\n\n\t\tif \"RS2\" not in dataset['dataset']:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[1]\n\t\t\tresearch_segment = \"RS1\"\n\t\telse:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[2]\n\t\t\tbrain_region_code = brain_region_code[-2:]\n\t\t\tresearch_segment = \"RS2\"\n\n\t\tregions_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['brain_region']).fetchone()\n\t\tif regions_sql is not None:\n\t\t\tABA_regions_descriptive = regions_sql['ABA_description'].replace('+', ', ')\n\t\telse:\n\t\t\tABA_regions_descriptive = \"\"\n\n\t\tif rs == \"rs1\":\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\telse:\n\t\t\ttarget_region_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['target_region']).fetchone()\n\t\t\tif target_region_sql is not None:\n\t\t\t\ttarget_region_descriptive = target_region_sql['ABA_description'].replace('+', ', ')\n\t\t\telse:\n\t\t\t\ttarget_region_descriptive = \"\"\n\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\n\treturn json.dumps(dataset_cell_counts)", "def get_datasets(request):\n from seed.models import obj_to_dict\n org = Organization.objects.get(pk=request.GET.get('organization_id'))\n datasets = []\n for d in ImportRecord.objects.filter(super_organization=org):\n importfiles = [obj_to_dict(f) for f in d.files]\n dataset = obj_to_dict(d)\n dataset['importfiles'] = importfiles\n if d.last_modified_by:\n dataset['last_modified_by'] = d.last_modified_by.email\n dataset['number_of_buildings'] = BuildingSnapshot.objects.filter(\n import_file__in=d.files,\n canonicalbuilding__active=True,\n ).count()\n dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)\n datasets.append(dataset)\n\n return {\n 'status': 'success',\n 'datasets': datasets,\n }", "def get_providers(self):\n datasets = [\n \"Heineken\",\n \"Eisenbahn\",\n \"Corona\",\n \"Brahma\",\n \"Skol\",\n \"Bohemia\"\n ]\n return datasets", "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(mtnt): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'src': tfds.features.Text(),\n 'dst': tfds.features.Text(),\n }),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('src', 'dst'), # Set to `None` to disable\n homepage='https://pmichel31415.github.io/mtnt/index.html',\n citation=_CITATION,\n )", "def dataset(options):\n pass", "def list_datasets():\n datasets = {}\n for datafile in HERE.glob(\"*.csv.gz\"):\n index = False\n name = datafile.name[:-7]\n if \".indexed\" in name:\n name = name.replace(\".indexed\", \"\")\n index = True\n datasets[name] = {\"index\": index, \"file\": datafile}\n return datasets", "def _get_dataset_contents(self,\n dataset_root_path: str,\n property_alias: str) -> Tuple[np.ndarray, np.ndarray, List[int], List[str]]:\n cur_dir = os.getcwd()\n os.chdir(dataset_root_path) # the parent folder with sub-folders\n\n list_fams, no_imgs, num_samples = self._get_labels_info(property_alias)\n\n y, indexes = self._fetch_labels(list_fams, no_imgs, num_samples)\n X, samples_names = self._get_X_and_names(list_fams, num_samples, property_alias)\n\n os.chdir(cur_dir)\n\n return X, y, indexes, samples_names", "def dataset(value=None):\n data = getDBData()\n return render_template(\"dataset.html\",\n value=data\n )", "def get_dataset_info(key: str):\n key = key.lower().replace(\"-\", \"_\").split(\".\")[0]\n filename = key + \".json\"\n\n if filename not in os.listdir(os.path.join(MODULE_ROOT, \"datasets\")):\n raise FileNotFoundError\n\n with open(os.path.join(MODULE_ROOT, \"datasets/\", filename), \"r\") as f:\n dataset_info = json.load(f)\n return dataset_info", "def get_datasets(self):\n return self.ds_primary, self.ds_secondary", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(cms_pf): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(\n {\n \"X\": tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32),\n \"ygen\": tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32),\n \"ycand\": tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32),\n }\n ),\n supervised_keys=(\"X\", \"ycand\"),\n homepage=\"\",\n citation=_CITATION,\n metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES),\n )", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def __repr__(self) -> str:\n output = f\"<Dataset(id={self.id}, name={self.dataset_name}\"\n output += f\"organisation name={self.organization_name},n\"\n output += f\"reference period={self.dataset_date}, update frequency={self.update_frequency}, \"\n output += f\"review_date={str(self.review_date)}, last_modified={str(self.last_modified)},\"\n output += f\"updated_by_script={str(self.updated_by_script)}, metadata_modified={str(self.metadata_modified)})>\"\n return output", "def __repr__(self):\n return \"IAM Dataset\\n\" f\"Num total: {len(self.xml_filenames)}\\nNum test: {len(self.metadata['test_ids'])}\\n\"", "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(kappatng): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n \"image\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"psf\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"variance\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"mask\": tfds.features.Tensor(shape=[41, 41], dtype=tf.int32),\n\t}),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=(\"image\", \"image\"),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "def get_dataset():\n data = request.get_json()\n return jsonify(result=Tree.files(data['label']))", "def test_cli_datasets_default():\n runner = CliRunner()\n result = runner.invoke(cli.main, [\"datasets\"])\n assert result.exit_code == 0\n assert \"Dataset ID\" in result.output\n assert \"ggallus_gene_ensembl\" in result.output\n assert \"ENSEMBL_MART_ENSEMBL\" in result.output", "def get_datasetID(self):\n\t\treturn self.dsDoc['about']['datasetID']", "def get_dataset_name(self):\n raise NotImplementedError", "def __str__(self):\r\n desc = f'{self.name}\\n' + '-' * len(self.name) + '\\n'\r\n desc += f'Contains {len(self)} data entries.\\n'\r\n if self.is_superset:\r\n for ds in self.data:\r\n desc += f'\\tSubset {ds.name}: {len(ds)} entries.\\n'\r\n return desc", "def get_a_list_of_testset_names() -> str:\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))\n return message", "def datasets_for_task(jeditaskid):\n dsets = []\n dsquery = {\n 'jeditaskid': jeditaskid,\n }\n values = (\n 'jeditaskid', 'datasetid', 'datasetname', 'containername', 'type', 'masterid', 'streamname', 'status',\n 'storagetoken', 'nevents', 'neventsused', 'neventstobeused', 'nfiles', 'nfilesfinished', 'nfilesfailed',\n 'nfilesmissing', 'nfileswaiting'\n )\n values = list(set(values) & set([f.name for f in JediDatasets._meta.get_fields()]))\n dsets.extend(JediDatasets.objects.filter(**dsquery).values(*values))\n\n dsets, dsinfo = calculate_dataset_stats(dsets)\n dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())\n\n return dsets, dsinfo", "def _info(self) -> tfds.core.DatasetInfo:\n # TODO(a2o): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=label_name),\n }),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('image', 'label'), # Set to `None` to disable\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )", "def display_all():\n results = artwork_db.get_all_artwork()\n for artist in results:\n print(artist)", "def available_datasets(self) -> List[str]:\n return sorted(self.__by_name.keys())", "def datasets(dtype=\"*\", name=\"*\", rootdir=None, fullpath=False):\n if rootdir is None:\n rootdir = Path(cf.options.rootdir).expanduser() / \"datasets\" / API_VERSION\n else:\n rootdir = Path(rootdir).expanduser()\n if not rootdir.exists():\n raise ValueError(f\"rootdir does not exist: {rootdir}\")\n\n files = sorted(glob(str(rootdir/ FreezableAPI.to_slug(dtype,name))))\n if fullpath:\n files = files\n else:\n files = [os.path.basename(x) for x in files]\n return files", "def get_avail_datasets(self):\n\n return self.datasets.keys()", "def show_data():", "def get_datasets(self,\n params: typing.Optional[typing.Mapping[str, str]] = None) -> typing.List[str]:\n raise NotImplementedError('This data connector does not provide a list of datasets')", "def _info(self) -> tfds.core.DatasetInfo:\n info = tfds.core.DatasetInfo(\n builder=self,\n description='A minimal TFDS DatasetBuilder backed by SQL ClientData',\n features=self._tfds_features,\n homepage='N/A',\n citation='N/A',\n metadata=None)\n df = _load_sql_client_data_metadata(self._sql_database)\n\n split_infos = list()\n\n for client_id in self._cd.client_ids:\n split_infos.append(\n tfds.core.SplitInfo(\n name=client_id,\n shard_lengths=[\n int(df[df['client_id'] == client_id]['num_examples'])\n ],\n num_bytes=0))\n\n split_dict = tfds.core.SplitDict(\n split_infos, dataset_name='tfds_builder_by_sql_client_data')\n info.set_splits(split_dict)\n return info", "def get_datasets(FIELDS='all'):\n dsinfostr = fork_and_get_output(\"zfs list -H -o {0}\".format(FIELDS).split())\n header = get_zfs_ds_header()\n dsinfo = dsinfostr.splitlines()\n dsobjs = []\n for dsstr in dsinfo:\n dsobjs.append(DataZFS(dsstr, header, 'dataset'))\n return dsobjs", "def extra_repr(self) -> List[str]:\n body = [\n f'Root of dataset: \\t{self.data_root}',\n ]\n return body", "async def list(request):\n dict_answer = {'models': [item[1]+' '+item[0]+str(item[2:]) for item in models_db],\n 'datasets': [conv_time(d.stat().st_atime)+' '+str(d.name) for d in Path('data/datasets/').glob('*')],\n }\n return web.json_response(dict_answer)", "async def get_datasets_metadata(location_id: LocationID, user_id: UserID):", "def get_dataset():\n\n return db.store.all()", "def get_all_dataset_summaries() -> DatasetSummariesResponse:\n return DatasetSummariesResponse(\n threat_exchange_datasets=_get_threat_exchange_datasets(\n datastore_table,\n threat_exchange_data_bucket_name,\n threat_exchange_data_folder,\n )\n )", "def get_all_data():\n return jsonify(service.get_all_data())", "def get_info(self):\n return f\"{self.name} data: {len(self.data)}, {self.num_labels} labels, {len(self.targets) if self.targets is not None else None} targets\"", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n citation=_CITATION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=_CLASS_NAMES),\n }),\n homepage=_HOMEPAGE,\n supervised_keys=('image', 'label'),\n )", "def get_dataset_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_dataset_names')", "def dataset_name(self):\n return self.dataset.name", "def get_dataset_info(self, options):\n _line_iter, _field_iter = _get_load_iterators(options)\n vars = []\n \n with codecs.open(options['filename'], 'rb', options['encoding']) as in_stream: \n f = _line_iter(in_stream)\n for line in f:\n line = line.lower() if line else ''\n if line.startswith('@attribute'):\n var = _get_var(re.search('@attribute (.+?) (.+)', line).groups())\n vars.append(var)\n elif line.startswith('@relation'):\n options['dataset'] = line.replace('@relation ', '')\n elif line.startswith('@data'):\n break\n #can add mark to get cases if desired\n options['_variables'] = VariableSpec(vars)\n \n \n options['_cases'] = None\n options['format'] = 'arff'", "def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))", "def info(self):\n print 'A= ', self.application\n print 'C= ', self.city\n print 'D= ', self.dataset.shape", "def identify_datasets(self, language_objects, context):\n\n datasets, new_sen = self.extractor.extract_all_templates(\n language_objects, context\n )\n context[\"datasets\"] = datasets\n return {'type': 'result', 'result': (new_sen, context[\"datasets\"])}", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = (\n \"<xbout.BoutDataset>\\n\"\n + \"Contains:\\n{}\\n\".format(str(self.data))\n + \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n )\n if self.options:\n text += \"Options:\\n{}\".format(self.options)\n return text", "def getDatasets(fileinformation):\n filedata = pd.read_csv(fileinformation, sep=\"\\t\", header=None)\n datalabels = list(filedata.iloc[:,0].values)\n datafiles = list(filedata.iloc[:,1].values)\n return datalabels, datafiles", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = \"<xbout.BoutDataset>\\n\" + \\\n \"Contains:\\n{}\\n\".format(str(self.data)) + \\\n \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n if self.options:\n text += \"Options:\\n{}\".format(styled(self.options))\n return text", "def ds_atags(self):\n atags = {\n 'unit': {\n 'atname': 'unit',\n 'data_type': 'text',\n 'description': 'Unit of measure for values in data'},\n 'description': {\n 'atname': 'description',\n 'data_type': 'text',\n 'description': 'Human readable description of data'},\n 'comments': {\n 'atname': 'comments',\n 'data_type': 'text',\n 'description': 'Comments about the data set'},\n 'references': {\n 'atname': 'references',\n 'data_type': 'text',\n 'description': 'path to group, diminsion index or field being referenced'},\n 'semantic_type': {\n 'atname': 'semantic_type',\n 'data_type': 'text',\n 'description': 'Semantic type of data stored'},\n 'scale': {\n 'atname': 'conversion',\n 'data_type': 'float',\n 'description': 'Scale factor to convert stored values to units of measure'},\n }\n return atags", "def url(self) -> str:\n return self.DATASET_URLS[self.name]", "def getInfo():", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def dataset_list_cli(self,\r\n sort_by=None,\r\n size=None,\r\n file_type=None,\r\n license_name=None,\r\n tag_ids=None,\r\n search=None,\r\n user=None,\r\n mine=False,\r\n page=1,\r\n csv_display=False,\r\n max_size=None,\r\n min_size=None):\r\n datasets = self.dataset_list(sort_by, size, file_type, license_name,\r\n tag_ids, search, user, mine, page,\r\n max_size, min_size)\r\n fields = [\r\n 'ref', 'title', 'size', 'lastUpdated', 'downloadCount',\r\n 'voteCount', 'usabilityRating'\r\n ]\r\n if datasets:\r\n if csv_display:\r\n self.print_csv(datasets, fields)\r\n else:\r\n self.print_table(datasets, fields)\r\n else:\r\n print('No datasets found')", "def get_dataset_name(self):\n return self.dataset_name", "def info(self):\n print(\n \"\"\"\n Factory holds {0} unique plots\n \"\"\".format(\n len(self.plots)\n )\n )\n for i, plot in enumerate(self.plots):\n print(\"\\t\\tPlot {0} holds {1} unique datasets\".format(i, len(plot)))\n for j, dataset in enumerate(plot):\n print(\n \"\\t\\t\\tDataset {0} holds {1} datapoints\".format(\n j, len(dataset[\"x\"])\n )\n )\n\n print()\n return", "def chartdata():\n chart = billboard.ChartData('hot-100')\n chart_data = []\n for song in chart:\n song_data = (song.title, song.artist)\n chart_data.append(song_data)\n \n return chart_data", "def __str__ (self):\n string = \"data loader:\\n\"\n for i in range(len(self.loaded_files)):\n string += \"level: {}, dimension: {}.\\n\" \\\n .format(self.loaded_files[i], str(np.shape(self.loaded_data[i])))\n return string", "def dataAsString(self):\n\n # Force generation of .array\n d = self.asArray()\n slist = []\n for l in self.array:\n s = \"%s %s\" % (self.name, self.rowAsString(l))\n slist.append(s)\n return '\\n'.join(slist)", "def get_all_datasets_conf_ds():\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))\n return listOfDatasetDSConfig", "def get_new_datasets(self, output_name):\n return []", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def __get_dataset_name(self):\n d = gdal.Open(self.fname)\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n\n if 'data_var' in md:\n return md['data_var']\n else:\n fnames = d.GetFileList()\n if len(fnames) > 2:\n d = gdal.Open(fnames[1])\n # Get band metadata\n b = d.GetRasterBand(1)\n md = b.GetMetadata()\n if 'data_var' in md:\n return md['data_var']\n else:\n return 'data'\n else:\n return 'data'" ]
[ "0.7014192", "0.6812912", "0.6607309", "0.65944916", "0.64315784", "0.6382068", "0.636666", "0.63273066", "0.63251984", "0.6180837", "0.6164181", "0.61418563", "0.61337954", "0.61007583", "0.6100278", "0.6081731", "0.60703063", "0.60678405", "0.60557", "0.6037266", "0.6016712", "0.60027087", "0.5998768", "0.59929526", "0.5971946", "0.5958143", "0.59411806", "0.5918224", "0.5918224", "0.59086204", "0.58966047", "0.58812654", "0.5872809", "0.58544165", "0.58429897", "0.5836323", "0.5820082", "0.5806874", "0.57933295", "0.57899916", "0.5782581", "0.5780093", "0.5753569", "0.5745795", "0.57120836", "0.570486", "0.570208", "0.569378", "0.56891817", "0.56792724", "0.56645447", "0.5663036", "0.56521523", "0.5647415", "0.56457055", "0.5639013", "0.5605137", "0.5582895", "0.55691993", "0.55617374", "0.5559673", "0.55548275", "0.5545665", "0.5544273", "0.5535341", "0.5524631", "0.55143493", "0.5491689", "0.548714", "0.5482943", "0.54776305", "0.54734373", "0.54692596", "0.5468464", "0.546583", "0.54642963", "0.5461336", "0.5451091", "0.5444239", "0.54441404", "0.54340255", "0.5419851", "0.5406711", "0.54039013", "0.53881973", "0.5377996", "0.5376702", "0.53720695", "0.5352875", "0.53440213", "0.5340927", "0.5324294", "0.53187364", "0.53101516", "0.53068763", "0.5303276", "0.5298898", "0.5295644", "0.52900124", "0.52868325", "0.52812934" ]
0.0
-1
Load dataset by dataset_name. Run get_info() to get dataset information
def load_dataset(dataset_name: str, internals_folder_path: str = None) -> Dataset: dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), dataset_name) if glob(dataset_path + '*'): return Dataset(glob(dataset_path + '*')[0], internals_folder_path=internals_folder_path) req = Request(URL + '/download') context = ssl._create_unverified_context() values = {'dataset-name': dataset_name} data = urllib.parse.urlencode(values).encode("utf-8") with urlopen(req, data=data, context=context) as answer: total_size = int(answer.headers.get('content-length', 0)) block_size = 1024 save_path = dataset_path + answer.getheader('file-extension') t = tqdm(total=total_size, unit='iB', unit_scale=True) with open(save_path + '.gz', 'wb') as f: while True: chunk = answer.read(block_size) if not chunk: break t.update(len(chunk)) f.write(chunk) t.close() if total_size != 0 and t.n != total_size: print("Failed to download file") return None else: with gzip.open(save_path + '.gz', 'rb') as gz: with open(save_path, 'wb') as f: f.write(gz.read()) os.remove(save_path + '.gz') return Dataset(save_path, internals_folder_path=internals_folder_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def load_dataset(dataset_name):\n url = METADATA[dataset_name][\"url\"]\n f = urlopen(url)\n data = _read_rows(f)\n f.close()\n return data", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n\n elif dataset_name == \"Wine Dataset\":\n data = datasets.load_wine()\n\n elif dataset_name == \"MNIST\":\n data = datasets.load_digits()\n\n #elif dataset_name == \"Boston Housing Price\":\n # data = datasets.load_boston()\n\n X = data.data\n y = data.target\n\n return X, y", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_dataset(self, problem_name=\"\", split=\"train\"):\n\n orm = self.__orm\n username = \"admin\" # should be unused (unless submit new feature to db)\n\n with orm.session_scope() as session:\n if not problem_name:\n problem_name = session.query(Problem.name)\\\n .filter(Problem.name != \"demo\").scalar()\n problem_id = session.query(Problem.id)\\\n .filter(Problem.name == problem_name).scalar()\n\n data_dir = os.path.join(\"/data\", split)\n dataset, entities_featurized, target = load_dataset_from_dir(\n session, data_dir, problem_name)\n\n suffix = \"_\" + split\n\n return problem_name, dataset, entities_featurized, target", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def load_dataset(name, version):\n dataset_dir = os.path.join(DATA_DIR, name)\n dataset_ver_dir = os.path.join(dataset_dir, version)\n\n if not os.path.isdir(dataset_dir):\n raise FileNotFoundError(\"Dataset dir not found\")\n if not os.path.isdir(dataset_ver_dir):\n raise FileNotFoundError(\"Dataset version dir not found\")\n\n train_data = load_kg_file(os.path.join(dataset_ver_dir, \"train.txt.gz\"))\n valid_data = load_kg_file(os.path.join(dataset_ver_dir, \"valid.txt.gz\"))\n test_data = load_kg_file(os.path.join(dataset_ver_dir, \"test.txt.gz\"))\n\n dataset = KgDataset()\n dataset.load_triples(train_data, tag=\"train\")\n dataset.load_triples(valid_data, tag=\"valid\")\n dataset.load_triples(test_data, tag=\"test\")\n return dataset", "def load_dataset(name):\n dataset, info = tfds.load(name=name,\n with_info=True,\n data_dir='data/external')\n train_dataset = dataset['train']\n train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE,\n reshuffle_each_iteration=False)\n\n return train_dataset", "def LoadData(DATASET_NAME, config):\n # handling for MNIST or CIFAR Superpixels\n if DATASET_NAME == 'MNIST' or DATASET_NAME == 'CIFAR10':\n return SuperPixDataset(DATASET_NAME)\n \n # handling for (ZINC) molecule dataset\n if DATASET_NAME == 'ZINC':\n return MoleculeDataset(DATASET_NAME)\n\n # handling for the TU Datasets\n TU_DATASETS = ['ENZYMES', 'DD', 'PROTEINS_full']\n if DATASET_NAME in TU_DATASETS: \n return TUsDataset(DATASET_NAME)\n\n # handling for SBM datasets\n SBM_DATASETS = ['SBM_CLUSTER', 'SBM_PATTERN']\n if DATASET_NAME in SBM_DATASETS: \n return SBMsDataset(DATASET_NAME)\n \n # handling for TSP dataset\n if DATASET_NAME == 'TSP':\n return TSPDataset(DATASET_NAME)\n\n # handling for COLLAB dataset\n if DATASET_NAME == 'OGBL-COLLAB':\n return COLLABDataset(DATASET_NAME)\n\n # handling for the CSL (Circular Skip Links) Dataset\n if DATASET_NAME == 'CSL': \n return CSLDataset(DATASET_NAME)\n\n return RNADataset(DATASET_NAME, config)", "def load_processed_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n return pd.read_csv(os.path.join(path_processed, 'iris.csv'))\n\n elif name == 'wine':\n return pd.read_csv(os.path.join(path_processed, 'wine.csv'))\n\n elif name == 'titanic':\n return pd.read_csv(os.path.join(path_processed, 'titanic.csv'))\n\n elif name == 'lanl':\n with open(os.path.join(path_processed, 'train_data.pkl'), 'rb') as f:\n x = pkl.load(f)\n with open(os.path.join(path_processed, 'train_targets.pkl'), 'rb') as f:\n y = pkl.load(f)\n return x, y\n\n elif name == 'MNIST' or name == 'FashionMNIST':\n training = torch.load(os.path.join(path_processed, 'training.pt'))\n test = torch.load(os.path.join(path_processed, 'test.pt'))\n return training, test", "def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def load_raw_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n\n os.makedirs('downloads', exist_ok=True)\n path = os.path.join('downloads', name)\n path_raw = os.path.join(path, 'raw')\n\n if name == 'iris':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'iris.data'), names=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'species'])\n\n elif name == 'wine':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'wine.data'), names=['class',\n 'alcohol',\n 'malic_acid',\n 'ash',\n 'alkalinity',\n 'magnesium',\n 'phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'dilution',\n 'proline'])\n\n elif name == 'titanic':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path_raw)) == 0:\n kaggle.api.competition_download_files('titanic', path_raw)\n titanic = pd.read_csv(os.path.join(path_raw, 'train.csv'))\n titanic_test = pd.read_csv(os.path.join(path_raw, 'test.csv'))\n return titanic, titanic_test\n\n elif name == 'lanl':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path)) == 0:\n kaggle.api.competition_download_files('LANL-Earthquake-Prediction', path_raw)\n if not os.path.exists(os.path.join(path_raw, 'test')):\n zip_ref = zipfile.ZipFile(os.path.join(path_raw, 'test.zip'), 'r')\n zip_ref.extractall(os.path.join(path_raw, 'test'))\n zip_ref.close()\n return pd.read_csv(os.path.join(path_raw, 'train.csv.zip'))\n\n elif name == 'MNIST':\n mnist = torchvision.datasets.MNIST('downloads', train=True, download=True)\n mnist_test = torchvision.datasets.MNIST('downloads', train=False, download=True)\n return mnist, mnist_test\n\n elif name == 'FashionMNIST':\n fmnist = torchvision.datasets.FashionMNIST('downloads', train=True, download=True)\n fmnist_test = torchvision.datasets.FashionMNIST('downloads', train=False, download=True)\n return fmnist, fmnist_test", "def load_data(name: str) -> pandas.DataFrame:\n datasets = list_datasets()\n if name not in datasets:\n raise ImportError(\n f\"No such dataset: {name}, \"\n f\"available: {list(list_datasets().keys())}\"\n )\n\n dataset = datasets[name]\n data = pandas.read_csv(\n dataset[\"file\"],\n index_col=0 if dataset[\"index\"] else False,\n )\n data.__dfname__ = name\n return data", "def get_dataset(name, split, data_dir=\"~/tensorflow_datasets\"):\n\n data_dir = os.path.join(os.getcwd(),'data/VOC')\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n assert split in [\"train\", \"train+validation\", \"validation\", \"test\"]\n\n dataset, info = tfds.load(name, split=split, data_dir=data_dir, with_info=True)\n return dataset, info", "def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def dataset_by_name(name):\n return _datasets[name.lower()]", "def load_dataset(dataset):\n print('Start loading dataset - ' + dataset + '...\\n')\n # split into training and test sets\n (ds_train, ds_test), ds_info = tfds.load(dataset, split=['train','test'], with_info=True, shuffle_files=True)\n\n print('Dataset - ' + dataset + ' loaded into train and test splits successfully.\\n')\n\n print(\"The list of all available labels for this dataset:\")\n print(list(ds_info.features.keys())) # extract available labels from ds_info \n print()\n\n print(\"The input shape of the provided image in the dataset:\")\n print(ds_info.features['image'].shape) # extract image shape from ds_info\n print()\n\n # print the size of training and test sets to console\n print(\"The number of images in the training set: \" + str(ds_info.splits['train'].num_examples))\n print(\"The number of images in the test set: \" + str(ds_info.splits['test'].num_examples))\n print()\n\n return ds_train, ds_test, ds_info", "def load_dataset(file_name, model_ver):\n\n print 'Loading dataset ...'\n\n if model_ver == 'dmspline':\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n standardzed_attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('attr', attr_field), ('id', id_field), ('standardized_attr', standardzed_attr_field), ('text', text_field)],\n skip_header=True)\n\n else:\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('id', id_field), ('text', text_field)],\n skip_header=True)\n\n text_field.build_vocab(dataset, min_freq=10)\n return dataset", "def find_dataset_using_name(dataset_name):\n dataset_filename = \"datasets.\" + dataset_name + \"_dataset\"\n datasetlib = importlib.import_module(dataset_filename)\n dataset = None\n target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n for name, cls in datasetlib.__dict__.items():\n if 'dataset' in name.lower():\n if name.lower() == target_dataset_name.lower():\n dataset = cls\n\n if dataset is None:\n raise NotImplementedError(\n \"In %s.py, there should be a subclass of BaseDataset with class tag that matches %s in lowercase.\" % (\n dataset_filename, target_dataset_name))\n\n return dataset", "def get_dataset(name, *, tfds_data_dir=None, seed=547):\n\n kwargs = {}\n kwargs['tfds_data_dir'] = tfds_data_dir\n name_prefix = name\n\n if name_prefix not in ['lsun', *SimpleDataset.DATASET_NAMES]:\n kwargs['seed'] = seed\n\n if name_prefix not in DATASETS:\n raise ValueError(\"Dataset %s is not available.\" % name)\n\n return DATASETS[name_prefix](**kwargs)", "def find_dataset_using_name(dataset_name):\n dataset_filename = \"data.\" + dataset_name + \"_dataset\"\n datasetlib = importlib.import_module(dataset_filename)\n\n dataset = None\n target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n for name, cls in datasetlib.__dict__.items():\n if name.lower() == target_dataset_name.lower() \\\n and issubclass(cls, BaseDataset):\n dataset = cls\n\n if dataset is None:\n raise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n return dataset", "def find_dataset_using_name(dataset_name):\n dataset_filename = \"data.\" + dataset_name + \"_dataset\"\n datasetlib = importlib.import_module(dataset_filename)\n\n dataset = None\n target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n for name, cls in datasetlib.__dict__.items():\n if name.lower() == target_dataset_name.lower() \\\n and issubclass(cls, BaseDataset):\n dataset = cls\n\n if dataset is None:\n raise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n return dataset", "def find_dataset_using_name(dataset_name):\n dataset_filename = \"data.\" + dataset_name + \"_dataset\"\n datasetlib = importlib.import_module(dataset_filename)\n\n dataset = None\n target_dataset_name = dataset_name.replace('_', '') + 'dataset'\n for name, cls in datasetlib.__dict__.items():\n if name.lower() == target_dataset_name.lower() \\\n and issubclass(cls, BaseDataset):\n dataset = cls\n\n if dataset is None:\n raise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n return dataset", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def find_dataset_using_name(dataset_name):\n\tdataset_filename = \"data.\" + dataset_name + \"_dataset\"\n\tdatasetlib = importlib.import_module(dataset_filename)\n\n\tdataset = None\n\ttarget_dataset_name = dataset_name.replace('_', '') + 'dataset'\n\tfor name, cls in datasetlib.__dict__.items():\n\t\tif name.lower() == target_dataset_name.lower() \\\n\t\t and issubclass(cls, BaseDataset):\n\t\t\tdataset = cls\n\n\tif dataset is None:\n\t\traise NotImplementedError(\"In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.\" % (dataset_filename, target_dataset_name))\n\n\treturn dataset", "def get_dataset_reference(self, dataset_name):\n\n print_debug(\"Geting dataset :\" + dataset_name)\n dataset = DatasetFactory.get(dataset_file_name=dataset_name)\n return dataset", "def get_dataset(name: str) -> pd.DataFrame:\r\n\r\n datasets = read_json(CONFIG_PATH)\r\n dataset = datasets[name]\r\n\r\n if dataset:\r\n if dataset['type'] == 'xls':\r\n return pd.read_excel(dataset['url'])\r\n elif dataset['type'] == 'csv':\r\n return pd.read_csv(dataset['url'], header=dataset['header'])\r\n else:\r\n return None", "def load_data(name, download=True):\n\n # Get the path from the datasets\n path = datasets[name]\n\n # Check if the data exists, otherwise download or raise\n if not os.path.exists(path):\n if download:\n download_all()\n else:\n raise ValueError((\n \"'{}' dataset has not been downloaded, \"\n \"use the download.py module to fetch datasets\"\n ).format(name))\n\n\n # Return the data frame\n return pd.read_csv(path)", "def load_data(name, download=True):\n\n # Get the path from the datasets\n path = datasets[name]\n\n # Check if the data exists, otherwise download or raise\n if not os.path.exists(path):\n if download:\n download_all()\n else:\n raise ValueError((\n \"'{}' dataset has not been downloaded, \"\n \"use the download.py module to fetch datasets\"\n ).format(name))\n\n # Read the directories in the directory as the categories.\n categories = [\n cat for cat in os.listdir(path)\n if os.path.isdir(os.path.join(path, cat))\n ]\n\n files = [] # holds the file names relative to the root\n data = [] # holds the text read from the file\n target = [] # holds the string of the category\n\n # Load the data from the files in the corpus\n for cat in categories:\n for name in os.listdir(os.path.join(path, cat)):\n files.append(os.path.join(path, cat, name))\n target.append(cat)\n\n with open(os.path.join(path, cat, name), 'r') as f:\n data.append(f.read())\n\n\n # Return the data bunch for use similar to the newsgroups example\n return Bunch(\n categories=categories,\n files=files,\n data=data,\n target=target,\n )", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def get(self, name):\n assert name, \"Must input a valid dataset name.\"\n return self.manager.get_data(name)", "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def get_dataset(name):\n from ._ids import _datasets\n\n if name in _datasets:\n return read_drive_data(*_datasets[name])\n else:\n raise IndexError(\n f\"Uknown company name. {name} was given. Available datasets are {_datasets.keys()}\"\n )", "def load_demo_data(name, header=None):\n params = DEMO_DATASETS.get(name)\n if params:\n url, file_path, continuous_columns = params\n if not os.path.isfile(file_path):\n base_path = os.path.dirname(file_path)\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n\n download_file(url, file_path)\n\n else:\n message = (\n '{} is not a valid dataset name. '\n 'Supported values are: {}.'.format(name, list(DEMO_DATASETS.keys()))\n )\n raise ValueError(message)\n\n return pd.read_csv(file_path, header=header), continuous_columns", "def get_dataset(dataset_name, split_name, dataset_dir):\n if dataset_name not in _DATASETS_INFORMATION:\n raise ValueError('The specified dataset is not supported yet.')\n\n splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes\n\n if split_name not in splits_to_sizes:\n raise ValueError('data split name %s not recognized' % split_name)\n\n # Prepare the variables for different datasets.\n num_classes = _DATASETS_INFORMATION[dataset_name].num_classes\n ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label\n\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Specify how the TF-Examples are decoded.\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/filename': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/height': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/width': tf.FixedLenFeature(\n (), tf.int64, default_value=0),\n 'image/segmentation/class/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/segmentation/class/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/lastmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/lastmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n 'image/firstimage/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstimage/format': tf.FixedLenFeature(\n (), tf.string, default_value='jpg'),\n 'image/firstmask/encoded': tf.FixedLenFeature(\n (), tf.string, default_value=''),\n 'image/firstmask/format': tf.FixedLenFeature(\n (), tf.string, default_value='png'),\n }\n\n items_to_handlers = {\n 'image': tfexample_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n channels=3),\n 'image_name': tfexample_decoder.Tensor('image/filename'),\n 'height': tfexample_decoder.Tensor('image/height'),\n 'width': tfexample_decoder.Tensor('image/width'),\n 'labels_class': tfexample_decoder.Image(\n image_key='image/segmentation/class/encoded',\n format_key='image/segmentation/class/format',\n channels=1),\n 'last_mask': tfexample_decoder.Image(\n image_key='image/lastmask/encoded',\n format_key='image/lastmask/format',\n channels=1),\n 'first_image': tfexample_decoder.Image(\n image_key='image/firstimage/encoded',\n format_key='image/firstimage/format',\n channels=3),\n 'first_mask': tfexample_decoder.Image(\n image_key='image/firstmask/encoded',\n format_key='image/firstmask/format',\n channels=1),\n\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n return dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=splits_to_sizes[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n ignore_label=ignore_label,\n num_classes=num_classes,\n name=dataset_name,\n multi_label=True)", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def get_dataset_info(key: str):\n key = key.lower().replace(\"-\", \"_\").split(\".\")[0]\n filename = key + \".json\"\n\n if filename not in os.listdir(os.path.join(MODULE_ROOT, \"datasets\")):\n raise FileNotFoundError\n\n with open(os.path.join(MODULE_ROOT, \"datasets/\", filename), \"r\") as f:\n dataset_info = json.load(f)\n return dataset_info", "def get_by_name(self, name: str) -> Optional[\"Dataset\"]:\n raise NotImplementedError", "def load_dataset(\n self, datastore, filestore, file_id=None, url=None,\n detect_headers=True, infer_types=None, load_format='csv', options=[],\n username=None, password=None, resources=None, reload=False,\n human_readable_name=None\n ):\n dataset = None\n file_handle = None\n result_resources = dict()\n if url is not None:\n # If the same url has been previously used to generate a dataset\n # we do not need to download the file and re-create the dataset.\n if not reload and resources is not None:\n if resources.get(base.RESOURCE_URL) == url:\n ds_id = resources.get(base.RESOURCE_DATASET)\n if ds_id:\n dataset = datastore.get_dataset(ds_id)\n # If dataset is still None we need to create a new dataset by\n # downloading the given Uri\n if dataset is None:\n file_handle = filestore.download_file(\n self,\n uri=url,\n username=username,\n password=password\n )\n result_resources[base.RESOURCE_URL] = url\n else:\n # If the same file has been previously used to generate a dataset\n # we do not need to re-create it.\n if resources is not None:\n if resources.get(base.RESOURCE_FILEID) == file_id:\n ds_id = resources.get(base.RESOURCE_DATASET)\n if ds_id:\n dataset = datastore.get_dataset(ds_id)\n # If dataset is still None we need to create a new dataset from the\n # specified file\n if dataset is None:\n file_handle = filestore.get_file(file_id)\n result_resources[base.RESOURCE_FILEID] = file_id\n # Ensure that the dataset is not None at this point\n if dataset is None:\n # Create dataset from the file that is represented by the file\n # handle.\n dataset = datastore.load_dataset(\n fh=file_handle,\n profiler=infer_types\n )\n result_resources[base.RESOURCE_DATASET] = dataset.identifier\n return VizualApiResult(\n dataset=dataset,\n resources=result_resources\n )", "def resolve_dataset(self, which_set, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n\n if not(path.isdir(serial.preprocess(p))):\n raise IOError(\"MRI dataset directory %s not found.\"\n % serial.preprocess(p))\n\n if which_set == 'train':\n data_path = p + 'train.npy'\n label_path = p + 'train_labels.npy'\n elif which_set == 'test':\n data_path = p + 'test.npy'\n label_path = p + 'test_labels.npy'\n else:\n if which_set != \"full\":\n raise ValueError(\"dataset \\'%s\\' not supported.\" % which_set)\n data_path = p + \"full_unshuffled.npy\"\n label_path = p + \"full_labels_unshuffled.npy\"\n \n data_path = serial.preprocess(data_path)\n label_path = serial.preprocess(label_path)\n\n if not(path.isfile(data_path)):\n raise ValueError(\"Dataset %s not found in %s\" %(which_set,\n serial.preprocess(p)))\n return data_path, label_path", "def get_dataset(self, dataset_path=None, normalize=True, return_original=False):\n if dataset_path is None:\n dataset_path = self.dir\n \n if \"mocap\" in dataset_path.lower():\n print(\"Loading Mocap dataset.\")\n df = get_mocap()\n df_orig = df\n elif \"profi\" in dataset_path.lower():\n print(\"Loading Profiset dataset.\")\n df = get_profiset()\n df_orig = df\n else:\n print(\"Loading CoPhIR dataset.\")\n df_orig, attr_lengths = get_objects_with_indexes(self.labels, f'{dataset_path}/level-{str(self.n_levels)}.txt', f'{dataset_path}/objects.txt')\n if normalize:\n df = scale_per_descriptor(df_orig, self.labels, attr_lengths)\n else:\n df = df_orig\n \n assert df.shape[1] == self.descriptor_values + self.n_levels + len([\"object_id\"])\n logging.info(f\"Loaded dataset of shape: {df.shape}\")\n if return_original:\n return df, df_orig\n else:\n return df", "def get_dataset(self, name, enforce=None, download=True):\n if self._meta is None:\n self._get_meta()\n return self._get_dataset(name, enforce=enforce, download=download)", "def load_dataset(self, split, combine=False, **kwargs):\r\n data_json_path = os.path.join(self.args.data, \"{}.json\".format(split))\r\n self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def load(self, dataset, model_dir):\n raise NotImplementedError", "def load_dataset(self, subset):\n assert subset in ('train', 'val')\n\n # Add classes\n for id, name in self.class_mapper.items():\n self.add_class('nodule', id, name)\n\n # Add images\n self.df = self.df_all[self.df_all['subset'] == subset]\n\n image_ids = set()\n for row in self.df.itertuples():\n image_id = (row.seriesuid, row.coordZ)\n path = os.path.join(cur_dir, 'data', 'train', '{}_{}.npy'.format(row.seriesuid, row.coordZ))\n if image_id in image_ids:\n continue\n self.add_image(\"nodule\", image_id=image_id, path=path)\n image_ids.add(image_id)", "def get(dataset_name: str, redownload: bool = False) -> Dataset:\n return Dataset._from_url(dataset_name, force=redownload)", "def load_data(self, name, with_axis=None):\n filename, extension = os.path.splitext(name)\n \n if extension not in [\".dat\",\".txt\",\".npy\",\".npz\", \".mat\"]:\n raise Exception(\"Unknown data format\") \n\n if (extension == \".dat\") or (extension == \".txt\"):\n self._importDataFromText(name, with_axis)\n\n elif extension == \".npy\":\n self._loadBinaryData(name, with_axis)\n\n elif extension == \".npz\":\n self._loadBinaryData_compressed(name, with_axis)\n\n elif extension == \".mat\":\n self._loadMatlab(name, with_axis)", "def _init_dataset(self, data_config, split='train'):\n assert split in {'train', 'valid'}\n\n # load datasets\n print(f'Load {split} dataset')\n if data_config['type'] == 'npy':\n dataset = MSDMelDataset(\n data_config['mel_root'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'], on_mem=data_config['on_mem'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'hdf':\n dataset = MSDMelHDFDataset(\n data_config['hdf_fn'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'audio':\n dataset = MSDAudioDataset(\n data_config['audio_root'], data_config[f'{split}_tids_fn'],\n data_config['tid2path_fn'], data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n device='cpu',\n transform=ToVariable())\n\n return dataset", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t", "def open_dataset(path: str):\n\n # remove trailing slash:\n if path.endswith(\"/\"):\n path = path[:-1]\n\n if path.endswith(\".zarr.zip\") or path.endswith(\".zarr\"):\n # we can recognise a Zarr dataset by its extension.\n dataset = ZDataset(path)\n elif path.endswith(\"tif\") or path.endswith(\"tiff\"):\n dataset = TIFDataset(path)\n elif exists(join(path, \"stacks\")):\n # we can recognise a ClearControl dataset by the presence of a 'stacks' sub folder.\n dataset = CCDataset(path)\n else:\n raise ValueError(\"Dataset type not recognised, or path incorrect!\")\n\n return dataset", "def _load_dataset(self, split, align, partition):\n\n if partition == 'all':\n self._image_list = self._face.image_list + self._clothes.image_list\n celeba_num = self._face.num_images\n deepfashion_num = self._clothes.num_images\n elif partition == 'face':\n self._image_list = self._face.image_list\n celeba_num = self._face.num_images\n deepfashion_num = 0\n elif partition == 'clothes':\n self._image_list = self._clothes.image_list\n celeba_num = 0\n deepfashion_num = self._clothes.num_images\n\n self._gtdb = {'attr': -1.*np.ones((self.num_images, self.num_classes), dtype=np.float64)}\n\n # load labels for celeba images if they are included. \n if celeba_num > 0:\n self._gtdb['attr'][:celeba_num, self._face_class_idx] = self._face.gtdb['attr']\n # load soft labels for clothes attributes on celeba\n if align:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'_align.pkl')\n else:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'.pkl') \n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][:celeba_num, self._clothes_class_idx] = labels\n else:\n 'Dataset {}: Labels for clothes attributes on CelebA are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for clothes attributes on CelebA are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)\n\n # load labels for deepfashion images if they are included.\n if deepfashion_num > 0:\n self._gtdb['attr'][celeba_num:, self._clothes_class_idx] = self._clothes.gtdb['attr']\n # load soft labels for face attributes on deepfashion\n fn = osp.join(self.data_path, 'person_'+'clothes'+'_'+split+'.pkl')\n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][celeba_num:, self._face_class_idx] = labels\n else:\n 'Dataset {}: Labels for face attributes on Deepfashion are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for face attributes on Deepfashion are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)", "def load_datapair(self, ds):\n raise NotImplementedError(\"Define this in your derived checker class\")", "def load_dataset(dataset, subset='train', format='bow', root='./data', verbose=False):\n download_dataset(dataset, subset=subset, format=format, root=root, verbose=verbose)\n dataset_meta = _get_data_meta(dataset, subset=subset, format=format)\n file_format = dataset_meta['file_format']\n data_dir = path.join(root, dataset_meta['dir'])\n file_path = dataset_meta[subset]\n\n if file_format == 'libsvm':\n return load_libsvm_file(path.join(data_dir, file_path))\n elif file_format == 'XY_sparse':\n X, _ = load_libsvm_file(path.join(data_dir, file_path['X']))\n Y, _ = load_libsvm_file(path.join(data_dir, file_path['Y']))\n return X, Y\n elif file_format == 'jsonlines':\n return load_json_lines_file(path.join(data_dir, file_path), features_fields=dataset_meta['features_fields'], labels_field=dataset_meta['labels_field'])\n else:\n raise ValueError(\"File format {} is not supported\".format(file_format))", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def dataset(options):\n pass", "def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save", "def get_dataset(params):\r\n module_name, class_name = params.dataset.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def dataset_read(self, name):\n\n # Checks inputs\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n\n ret = {\n 'ts_list': [],\n 'description': None\n }\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['dataset_read'],\n uri_params={\n 'name': name\n })\n\n is_404(response=response, msg=\"Dataset %s not found in database\" % name)\n\n if response.status_code == 200:\n if 'fids' in response.json:\n ret['ts_list'] = response.json['fids']\n\n if 'description' in response.json:\n ret['description'] = response.json['description']\n\n return ret\n raise SystemError(\"Something wrong happened\")", "def _load_data(self):\n\n # This allows a simulated dataset to use the same constructor.\n if self.input_file is None:\n return\n\n logging.info(f\"Loading data from file {self.input_file}\")\n\n # Load the dataset.\n if os.path.isdir(self.input_file):\n self.data = get_matrix_from_mtx(self.input_file)\n else:\n self.data = get_matrix_from_h5(self.input_file)", "def getDataset(filename, dsdict):\n\n dataset = \"\"\n for ds in dsdict.keys():\n if filename in dsdict[ds]:\n dataset = ds\n break\n\n if dataset == \"\":\n tolog(\"!!WARNING!!2999!! Dataset not found for file %s\" % (filename))\n else:\n tolog(\"File %s belongs to dataset/container %s\" % (filename, dataset))\n\n return dataset", "def _get_dataset(\n self,\n dataset_path: str,\n data_folder: str = \"data/\",\n ):\n if not os.path.isdir(dataset_path):\n click.secho(f\"{dataset_path} not found!\", fg=\"red\")\n\n dataset_hash = (\n int(hashlib.sha256(dataset_path.encode(\"utf-8\")).hexdigest(), 16) % 10 ** 8\n )\n\n # To avoid using cache for different models\n # split(/) for google/electra-base-discriminator\n pretrained_model = (\n self.hparams.pretrained_model.split(\"/\")[1]\n if \"/\" in self.hparams.pretrained_model\n else self.hparams.pretrained_model\n )\n dataset_cache = data_folder + \".dataset_\" + str(dataset_hash) + pretrained_model\n\n if os.path.isfile(dataset_cache):\n click.secho(f\"Loading tokenized dataset from cache: {dataset_cache}.\")\n return torch.load(dataset_cache)\n\n dataset_path += \"\" if dataset_path.endswith(\"/\") else \"/\"\n dataset = {\n \"train\": pd.read_csv(dataset_path + \"train.tsv\", sep=\"\\t\").to_dict(\n \"records\"\n ),\n \"valid\": pd.read_csv(dataset_path + \"valid.tsv\", sep=\"\\t\").to_dict(\n \"records\"\n ),\n \"test\": pd.read_csv(dataset_path + \"test.tsv\", sep=\"\\t\").to_dict(\"records\"),\n }\n # Read Labels\n with open(dataset_path + \"labels.txt\", \"r\") as fp:\n labels = [line.strip() for line in fp.readlines()]\n label_encoder = {labels[i]: i for i in range(len(labels))}\n\n dataset[\"label_encoder\"] = label_encoder\n # Tokenize\n dataset[\"train\"] = self._tokenize(dataset[\"train\"])\n dataset[\"valid\"] = self._tokenize(dataset[\"valid\"])\n dataset[\"test\"] = self._tokenize(dataset[\"test\"])\n torch.save(dataset, dataset_cache)\n return dataset", "def get_dataset(dataset: str, version: Optional[str] = None, unlabeled: bool = False, **dataset_kwargs):\r\n if version is not None:\r\n version = str(version)\r\n\r\n if dataset not in wilds.supported_datasets:\r\n raise ValueError(f'The dataset {dataset} is not recognized. Must be one of {wilds.supported_datasets}.')\r\n\r\n if unlabeled and dataset not in wilds.unlabeled_datasets:\r\n raise ValueError(f'Unlabeled data is not available for {dataset}. Must be one of {wilds.unlabeled_datasets}.')\r\n\r\n if dataset == 'amazon':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.amazon_unlabeled_dataset import AmazonUnlabeledDataset\r\n return AmazonUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.amazon_dataset import AmazonDataset\r\n return AmazonDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'camelyon17':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.camelyon17_unlabeled_dataset import Camelyon17UnlabeledDataset\r\n return Camelyon17UnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.camelyon17_dataset import Camelyon17Dataset\r\n return Camelyon17Dataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'celebA':\r\n from wilds.datasets.celebA_dataset import CelebADataset\r\n return CelebADataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'civilcomments':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.civilcomments_unlabeled_dataset import CivilCommentsUnlabeledDataset\r\n return CivilCommentsUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.civilcomments_dataset import CivilCommentsDataset\r\n return CivilCommentsDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'domainnet':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.domainnet_unlabeled_dataset import DomainNetUnlabeledDataset\r\n return DomainNetUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.domainnet_dataset import DomainNetDataset\r\n return DomainNetDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'iwildcam':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.iwildcam_unlabeled_dataset import IWildCamUnlabeledDataset\r\n return IWildCamUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n if version == '1.0':\r\n from wilds.datasets.archive.iwildcam_v1_0_dataset import IWildCamDataset\r\n else:\r\n from wilds.datasets.iwildcam_dataset import IWildCamDataset # type:ignore\r\n return IWildCamDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'waterbirds':\r\n from wilds.datasets.waterbirds_dataset import WaterbirdsDataset\r\n return WaterbirdsDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'yelp':\r\n from wilds.datasets.yelp_dataset import YelpDataset\r\n return YelpDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'ogb-molpcba':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.ogbmolpcba_unlabeled_dataset import OGBPCBAUnlabeledDataset\r\n return OGBPCBAUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.ogbmolpcba_dataset import OGBPCBADataset\r\n return OGBPCBADataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'poverty':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.poverty_unlabeled_dataset import PovertyMapUnlabeledDataset\r\n return PovertyMapUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n if version == '1.0':\r\n from wilds.datasets.archive.poverty_v1_0_dataset import PovertyMapDataset\r\n else: \r\n from wilds.datasets.poverty_dataset import PovertyMapDataset # type:ignore\r\n return PovertyMapDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'fmow':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.fmow_unlabeled_dataset import FMoWUnlabeledDataset\r\n return FMoWUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n if version == '1.0':\r\n from wilds.datasets.archive.fmow_v1_0_dataset import FMoWDataset\r\n else:\r\n from wilds.datasets.fmow_dataset import FMoWDataset # type:ignore\r\n return FMoWDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'bdd100k':\r\n from wilds.datasets.bdd100k_dataset import BDD100KDataset\r\n return BDD100KDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'py150':\r\n from wilds.datasets.py150_dataset import Py150Dataset\r\n return Py150Dataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'sqf':\r\n from wilds.datasets.sqf_dataset import SQFDataset\r\n return SQFDataset(version=version, **dataset_kwargs)\r\n \r\n elif dataset == 'globalwheat':\r\n if unlabeled:\r\n from wilds.datasets.unlabeled.globalwheat_unlabeled_dataset import GlobalWheatUnlabeledDataset\r\n return GlobalWheatUnlabeledDataset(version=version, **dataset_kwargs)\r\n else:\r\n from wilds.datasets.globalwheat_dataset import GlobalWheatDataset # type:ignore\r\n return GlobalWheatDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'encode':\r\n from wilds.datasets.encode_dataset import EncodeDataset\r\n return EncodeDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'rxrx1':\r\n from wilds.datasets.rxrx1_dataset import RxRx1Dataset\r\n return RxRx1Dataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'globalwheat':\r\n from wilds.datasets.globalwheat_dataset import GlobalWheatDataset\r\n return GlobalWheatDataset(version=version, **dataset_kwargs)\r\n\r\n elif dataset == 'cmnist':\r\n from wilds.datasets.cmnist_debug_dataset import CMNISTDDataset\r\n return CMNISTDDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'noisy_2feature':\r\n from wilds.datasets.noisy_simple_dataset import NoisySimpleDataset\r\n return NoisySimpleDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'rot_simple':\r\n from wilds.datasets.rot_simple_dataset import RotSimpleDataset\r\n return RotSimpleDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'spu_2feature':\r\n from wilds.datasets.spu_simple_dataset import SpuSimpleDataset\r\n return SpuSimpleDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'multiNLI':\r\n from wilds.datasets.multiNLI_dataset import MultiNLIDataset\r\n return MultiNLIDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'noisy_mnist':\r\n from wilds.datasets.noisy_mnist_dataset import NMNISTDataset\r\n return NMNISTDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'rot_mnist':\r\n from wilds.datasets.rot_mnist_dataset import RMNISTDataset\r\n return RMNISTDataset(**dataset_kwargs)\r\n\r\n elif dataset == 'spu_mnist':\r\n from wilds.datasets.spu_mnist_dataset import SpuMNISTDataset\r\n return SpuMNISTDataset(**dataset_kwargs)", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def load(self, config, dataset_type, *args, **kwargs):\n base_dataset_name = config.get(\"base_dataset_name\", \"vqa2\")\n base_dataset_config = config.get(\"base_dataset\", config)\n # instantiate base dataset\n # instantiate base dataser builder\n base_dataset_builder_class = registry.get_builder_class(base_dataset_name)\n base_dataset_builder_instance = base_dataset_builder_class()\n # build base dataset instance\n base_dataset_builder_instance.build_dataset(base_dataset_config)\n base_dataset = base_dataset_builder_instance.load_dataset(\n base_dataset_config, dataset_type\n )\n if hasattr(base_dataset_builder_instance, \"update_registry_for_model\"):\n base_dataset_builder_instance.update_registry_for_model(base_dataset_config)\n\n # instantiate vinvl dataset\n vinvl_text_processor = config[\"processors\"][\"text_processor\"]\n with open_dict(base_dataset_config):\n base_dataset_config[\"processors\"][\"text_processor\"] = vinvl_text_processor\n base_dataset_config[\"label_map\"] = config[\"label_map\"]\n\n vinvl_dataset = super().load(base_dataset_config, dataset_type, *args, **kwargs)\n vinvl_dataset.set_base_dataset(base_dataset)\n return vinvl_dataset", "def get_inference_dataset(dataset_path,debug=False):\n\n if not os.path.exists(dataset_path):\n assert False, \"Couldn't find path : '{}'\".format(dataset_path)\n print(\"\\nprocessing data :'{}'\\n\".format(dataset_path))\n\n path = os.getcwd()\n os.chdir(dataset_path)\n\n dataset = []\n for file in tqdm(os.listdir('.')):\n if not file.endswith('features'):\n continue\n name = file.replace(\".features\", \"\") # removing \"features\"\n x = np.loadtxt(name + '.features')\n np.nan_to_num(x, copy=False)\n #get labels file\n if os.path.exists(name + '.test.labels'):\n labels_file = open(name + '.test.labels').readlines()\n elif os.path.exists(name + '.labels'):\n labels_file = open(name + '.labels').readlines()\n else:\n continue\n file_info = (name , float(labels_file[-2].split(' ')[-1]),\n np.fromstring(labels_file[1].strip(), sep=' ')[:2],\n float(labels_file[2]))#(file name,window_offset,(onset,offset),vot_type)\n\n dataset.append([torch.from_numpy(x).float(), file_info])\n if debug and len(dataset)>100:\n break\n os.chdir(path)\n\n return DataLoader(dataset,shuffle=False)", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def from_dataset(cls, name, options=None):\n if options is None:\n options = dict()\n if name == 'lin_aaai12':\n return cls.from_lin_aaai12(**options)\n elif name == 'bragg_hcomp13':\n return cls.from_bragg_hcomp13(**options)\n elif name == 'bragg_teach':\n return cls.from_bragg_teach(**options)\n elif name == 'rajpal_icml15':\n return cls.from_rajpal_icml15(**options)\n else:\n raise NotImplementedError", "def get_dataset(self, cid, type=\"train\"):\n dataset = torch.load(\n os.path.join(self.path, type, \"data{}.pkl\".format(cid)))\n return dataset", "def load_dataset(args, corpus_type, shuffle):\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)", "def load_data(self) -> None:", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def load_data(data_config):\n return tfds.load(data_config.path, with_info=data_config.load_with_info)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def get_dataset(self, identifier):\n # Test if a subfolder for the given dataset identifier exists. If not\n # return None.\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return None\n # Load the dataset handle\n return FileSystemDatasetHandle.from_file(\n descriptor_file=os.path.join(dataset_dir, DESCRIPTOR_FILE),\n data_file=os.path.join(dataset_dir, DATA_FILE),\n annotations=DatasetMetadata.from_file(\n self.get_metadata_filename(identifier)\n )\n )", "def load_data(is_train, num_par=4):\n if is_train:\n src = FLAGS.train_data_path\n else:\n src = FLAGS.dev_data_path\n\n if src is None:\n raise ValueError(\"Missing data path\")\n\n if FLAGS.dataset == \"boolq\":\n return load_boolq_file(src, num_par)\n else:\n return load_nli_file(src, num_par)", "def _read_dataset(self, dataset_path):\n dataset = pd.read_pickle(dataset_path)\n return dataset", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def _load_data(dataset, is_training=False):\n import data_augmentation as aug\n import features\n\n features_path = os.path.join(cfg.extraction_path, dataset.name + '.h5')\n x = utils.timeit(lambda: features.load_features(features_path),\n 'Loaded features of %s dataset' % dataset.name)\n\n # Clip dynamic range to 90 dB\n x = np.maximum(x, x.max() - 90.0)\n\n # Load scaler from file if cached, or else compute it.\n scaler_path = cfg.scaler_path\n if os.path.exists(scaler_path) or not is_training:\n with open(scaler_path, 'rb') as f:\n scaler = pickle.load(f)\n else:\n scaler = utils.timeit(lambda: utils.compute_scaler(x),\n 'Computed standard scaler')\n with open(scaler_path, 'wb') as f:\n pickle.dump(scaler, f)\n\n x = utils.timeit(lambda: utils.standardize(x, scaler),\n 'Standardized %s features' % dataset.name)\n\n names, y = utils.timeit(lambda: utils.read_metadata(dataset.metadata_path),\n 'Loaded %s metadata' % dataset.name)\n if dataset == cfg.training_set and cfg.enable_augmentation:\n names, y = aug.expand_metadata((names, y))\n\n return x, y, names", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def load_dataset(self, split='train'):\n path = self.args.data\n if not os.path.exists(path):\n raise FileNotFoundError(\n \"Dataset not found: ({})\".format(path)\n )\n\n files = os.listdir(path) if os.path.isdir(path) else [path]\n files = [f for f in files if split in f]\n assert len(files) > 0\n\n self.datasets[split] = CombineBertData(files)\n\n\n \"\"\"\n dataset = data_utils.load_indexed_dataset(\n split_path, self.dictionary, self.args.dataset_impl, combine=combine\n )\n if dataset is None:\n raise FileNotFoundError(\n \"Dataset not found: {} ({})\".format(split, split_path)\n )\n\n dataset = TokenBlockDataset(\n dataset,\n dataset.sizes,\n self.args.tokens_per_sample,\n pad=self.dictionary.pad(),\n eos=self.dictionary.eos(),\n break_mode=self.args.sample_break_mode,\n include_targets=True,\n )\n\n add_eos_for_other_targets = (\n self.args.sample_break_mode is not None\n and self.args.sample_break_mode != \"none\"\n )\n\n self.datasets[split] = MonolingualDataset(\n dataset,\n dataset.sizes,\n self.dictionary,\n self.output_dictionary,\n add_eos_for_other_targets=add_eos_for_other_targets,\n shuffle=True,\n targets=self.targets,\n add_bos_token=self.args.add_bos_token,\n )\n \"\"\"", "def load_dataframe(dataset_name):\n filename = '{}.pickle'.format(dataset_name)\n cache_matrio_data(filename)\n return pd.read_pickle(os.path.join(CAMD_CACHE, filename))", "def get_data(name):\n\n if name == 'train' or name == 'unlabeled':\n return np.expand_dims(np.load(os.path.join(DATADIR, 'source_d10_train_X.npy')), axis=-1), \\\n np.argmax(np.load(os.path.join(DATADIR, 'source_d10_train_y.npy')), axis=1)\n # custom svhn has only training/validation set\n elif name == 'validation' or name == 'val':\n return np.expand_dims(np.load(os.path.join(DATADIR, 'source_d10_val_X.npy')), axis=-1), \\\n np.argmax(np.load(os.path.join(DATADIR, 'source_d10_val_y.npy')), axis=1)", "def test_dataset_details():\n with new_test_dataset(2) as test_ds:\n args = build_register_args(test_ds.copy_to_s3())\n ds_name = args['name']\n URLs.run(url_info=URLs.register_url(), json_body=args)\n\n ds_parts = URLs.run(url_info=URLs.dataset_parts_url(ds_name)).json\n assert ds_parts['filenames'] == test_ds.expected_parts.filenames\n expected_columns = json.loads(datafile_schema().to_json())['columns']\n\n ds_short_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=False)).json\n assert ds_short_schema['columns'] == expected_columns\n\n ds_full_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=True)).json\n assert ds_full_schema['columns'][DEFAULT_TIMESTAMP_COLUMN]['colattrs']['numericMin'] == BASE_TIME\n\n URLs.run(url_info=URLs.unregister_url(ds_name))", "def load_dataset(label, max_deltaR=None):\n\n # Files should be located in the datasets directory\n particles = np.load(\"datasets/PARTICLES_\"+label+\".npy\", allow_pickle=True)\n\n with open(\"datasets/DROP_\"+label, 'rb') as pickle_file:\n water = pickle.load(pickle_file)\n\n with open(\"datasets/INTERACTIONS_\"+label, 'rb') as pickle_file:\n interactions = pickle.load(pickle_file)\n\n samples = particles_to_samples(particles)\n\n if max_deltaR is not None:\n whr = water.deltaR.flatten() < max_deltaR\n samples = samples[whr]\n water.deltaR = water.deltaR[whr]\n water.theta = water.theta[whr]\n\n return samples, water, interactions" ]
[ "0.77818465", "0.7501332", "0.7296937", "0.7217461", "0.71556944", "0.70442104", "0.7043356", "0.7002447", "0.69755036", "0.69738203", "0.6970711", "0.6962691", "0.6954843", "0.69172084", "0.69021535", "0.68848944", "0.6871628", "0.6846385", "0.68101037", "0.6778666", "0.67686003", "0.6674326", "0.6635063", "0.65952444", "0.6552296", "0.65509504", "0.65509504", "0.65509504", "0.6550834", "0.6543146", "0.65425146", "0.654166", "0.65366864", "0.65346086", "0.65318716", "0.6530894", "0.65264124", "0.65070885", "0.64958835", "0.6466124", "0.6455837", "0.6445818", "0.64180464", "0.64121526", "0.6405481", "0.6396473", "0.6388251", "0.63846487", "0.6379483", "0.6369258", "0.63568735", "0.63394624", "0.6330125", "0.6318018", "0.62926865", "0.62765074", "0.6263217", "0.6247926", "0.6247768", "0.6232444", "0.6230147", "0.62247866", "0.6209076", "0.6207267", "0.6205337", "0.6191724", "0.618889", "0.6184631", "0.6182315", "0.6173686", "0.61636144", "0.6148539", "0.613282", "0.6129673", "0.61223114", "0.6120499", "0.6103556", "0.6099437", "0.6088738", "0.60861045", "0.60856515", "0.60833675", "0.60717785", "0.60717785", "0.6070236", "0.6061469", "0.6061428", "0.6061428", "0.6042559", "0.6034611", "0.6034488", "0.603344", "0.6028783", "0.6024103", "0.6022084", "0.60130644", "0.60129553", "0.6009686", "0.6005302", "0.599754" ]
0.69610137
12
Creates a GSEA analysis This GSEA implementation is based on gseaPy.
def __init__(self, data_source, num_resamples=NUM_RESAMPLES, method=GSEA_RANKING_SNR, case=None, control=None, preprocessors=None): logger.debug('GSEA initialised with num_resamples=%d and ranking_method=%s' % (num_resamples, method)) super().__init__(data_source, preprocessors=preprocessors) self.num_resamples = num_resamples self.method = method self.case = case self.control = control
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_gsea(self, gct_file, gmt_file, cls_file, gsea_dir):\n r = robjects.r\n r.source(self.gsea_r_location)\n r(\"\"\"GSEA( # Input/Output Files :-------------------------------------------\n input.ds = \"{}\", # Input gene expression Affy dataset file in RES or GCT format\n input.cls = \"{}\", # Input class vector (phenotype) file in CLS format\n gs.db = \"{}\", # Gene set database in GMT format\n output.directory = \"{}/\", # Directory where to store output and results (default: \"\")\n # Program parameters :----------------------------------------------------------------------------------------------------------------------------\n doc.string = \"syngsea\", # Documentation string used as a prefix to name result files (default: \"GSEA.analysis\")\n non.interactive.run = F, # Run in interactive (i.e. R GUI) or batch (R command line) mode (default: F)\n reshuffling.type = \"sample.labels\", # Type of permutation reshuffling: \"sample.labels\" or \"gene.labels\" (default: \"sample.labels\" \n nperm = 1000, # Number of random permutations (default: 1000)\n weighted.score.type = 1, # Enrichment correlation-based weighting: 0=no weight (KS), 1= weigthed, 2 = over-weigthed (default: 1)\n nom.p.val.threshold = -1, # Significance threshold for nominal p-vals for gene sets (default: -1, no thres)\n fwer.p.val.threshold = -1, # Significance threshold for FWER p-vals for gene sets (default: -1, no thres)\n fdr.q.val.threshold = 0.25, # Significance threshold for FDR q-vals for gene sets (default: 0.25)\n topgs = 20, # Besides those passing test, number of top scoring gene sets used for detailed reports (default: 10)\n adjust.FDR.q.val = F, # Adjust the FDR q-vals (default: F)\n gs.size.threshold.min = 10, # Minimum size (in genes) for database gene sets to be considered (default: 25)\n gs.size.threshold.max = 500, # Maximum size (in genes) for database gene sets to be considered (default: 500)\n reverse.sign = F, # Reverse direction of gene list (pos. enrichment becomes negative, etc.) (default: F)\n preproc.type = 0, # Preproc.normalization: 0=none, 1=col(z-score)., 2=col(rank) and row(z-score)., 3=col(rank). (def: 0)\n random.seed = 111, # Random number generator seed. (default: 123456)\n perm.type = 0, # For experts only. Permutation type: 0 = unbalanced, 1 = balanced (default: 0)\n fraction = 1.0, # For experts only. Subsampling fraction. Set to 1.0 (no resampling) (default: 1.0)\n replace = F, # For experts only, Resampling mode (replacement or not replacement) (default: F)\n save.intermediate.results = F, # For experts only, save intermediate results (e.g. matrix of random perm. scores) (default: F)\n OLD.GSEA = F, # Use original (old) version of GSEA (default: F)\n use.fast.enrichment.routine = T # Use faster routine to compute enrichment for random permutations (default: T)\n )\"\"\".format(gct_file, cls_file, gmt_file, gsea_dir))\n\n r(\"\"\"GSEA.Analyze.Sets(\n directory = \"{}/\", # Directory where to store output and results (default: \"\")\n topgs = 20, # number of top scoring gene sets used for analysis\n height = 16,\n width = 16\n )\"\"\".format(gsea_dir))", "def get_results(self, preprocess=True):\n logger.debug('Calculating GSEA')\n measurement_df = self._get_measurement_df(preprocess)\n\n annot_df = self.data_source.get_annotations()\n joined = pd.merge(left=measurement_df, right=annot_df, left_index=True, right_index=True)\n joined = joined.set_index('entity_id')\n unique_ids = [self.data_source._get_unique_id(x) for x in joined.index.values]\n joined.index = unique_ids\n joined = joined.drop_duplicates(keep='first').sort_index()\n\n # gene_sets is a dict. key is pw name, values are a list of entries in that pathway\n gene_sets = {}\n assert len(self.data_source.dataset_pathways) > 0, 'No pathways found in the dataset'\n pathways = list(self.data_source.dataset_pathways)\n for pw in pathways:\n pathway_row_ids = self.data_source.dataset_pathways_to_row_ids[pw]\n pw_unique_ids = []\n for row_id in pathway_row_ids:\n pw_unique_ids.extend(self.data_source.dataset_row_id_to_unique_ids[row_id])\n pw_unique_ids = list(set(pw_unique_ids))\n gene_sets[pw] = pw_unique_ids\n\n # run GSEA for all comparisons\n all_dfs = []\n for comp in self.data_source.comparisons:\n if not is_comparison_used(comp, self.case, self.control):\n continue\n case = comp['case']\n control = comp['control']\n logger.debug('Running comparison case=%s control=%s' % (case, control))\n pheno_cols = set(self.data_source.get_experimental_design()['groups'][case])\n df_cols = measurement_df.columns.values\n\n # for each comparison, we need to create C (phenotype labels)\n # Loop over df_cols and store an indicator into C.\n # Entries in C is 1 if that column belongs to the case group, otherwise it's a 0\n C = []\n for col in df_cols:\n if col in pheno_cols:\n C.append(1)\n else:\n C.append(0)\n C = np.array(C)\n\n # actually runs GSEA here\n data = joined\n cls = C.tolist()\n outdir = None\n min_size = 1\n max_size = 1000\n permutation_num = self.num_resamples\n weighted_score_type = 1\n permutation_type = 'phenotype'\n method = self.method\n ascending = True\n processes = 1\n figsize = (6.5, 6)\n format = 'pdf',\n graph_num = 20\n no_plot = True\n seed = None\n verbose = False\n\n msea = MSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,\n weighted_score_type, permutation_type, method, ascending, processes,\n figsize, format, graph_num, no_plot, seed, verbose)\n msea.run()\n\n # convert GSEA results to dataframe\n df = msea.res2d\n df = df.reset_index()\n selected = df[['Term', 'pval', 'fdr', 'es']]\n selected = selected.rename(columns={'Term': 'mapids'}).set_index('mapids')\n\n col_name = comp['name'] + ' p-value'\n es_colname = comp['name'] + ' ES_score'\n if self.data_source.database_name is not None:\n comb_col_name = '%s %s %s' % (self.data_source.database_name, comp['name'], 'comb_p')\n else:\n comb_col_name = '%s %s' % (comp['name'], 'comb_p')\n\n pathway_df = selected.rename(columns={\n 'pval': col_name,\n 'es': es_colname,\n 'fdr': comb_col_name\n })\n all_dfs.append(pathway_df)\n\n # combine all the results across all comparisons\n combined_df = pd.concat(all_dfs, axis=1, sort=False)\n combined_df.index.name = 'mapids'\n\n # create a dataframe of pathway mapids and names\n pw_name_df = []\n for map_id in pathways:\n pw_name = self.data_source.pathway_dict[map_id]['display_name']\n pw_name_df.append((map_id, pw_name))\n pw_name_df = pd.DataFrame(pw_name_df, columns=['mapids', 'pw_name']).set_index(['mapids'])\n combined_df = pw_name_df.merge(combined_df, left_index=True, right_index=True)\n\n # add formula coverage information\n mapids = combined_df.index.values.tolist()\n cov_df = self.data_source._calculate_coverage_df(mapids)\n coverage_df = cov_df.reindex(combined_df.index) # make sure dfs are in same order before merging\n\n # Merge the two dfs together\n pathway_df = pd.merge(combined_df, coverage_df, left_index=True, right_index=True, how='outer')\n\n # del pathway_df.index.name\n pathway_df.rename_axis(None, inplace=True)\n\n # post-processing to filter pathway dataframe by the minimum number of hits\n pathway_df = post_filter_df_by_min_hits(pathway_df, self.data_source.min_hits)\n return pathway_df", "def ase(g, dim):\n\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n \"ase.interface.R\")\n cmd = \"\"\"\n source(\"%s\")\n fn <- function(g, dim) {\n ase.interface(g, dim)\n }\n \"\"\" % path\n\n return robjects.r(cmd)(g._object, dim)", "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()", "def get_ssa(self, name='main'):\n\n ssa = NetworkEnsemble()\n ssa.add_function(name, self.get_function())\n return ssa", "def write_ogse(args):\n if args.ref_diode:\n add_ogse_ref_diode(args.ogse_dir / DB_REF_DIODE, args.l1a_file)\n\n if args.avantes:\n add_ogse_wav_mon(args.ogse_dir / DB_WAV_MON, args.l1a_file)\n\n if args.helios:\n xds = helios_spectrum()\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/ReferenceSpectrum')\n\n if args.grande:\n xds = gsfc_polarizer()\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/SpectralDolP')\n for n_lamps in (1, 2, 3, 5, 9):\n if args.l1a_file.name.find(f'-L{n_lamps:1d}_') > 0:\n xds = grande_spectrum(n_lamps)\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/ReferenceSpectrum')\n break\n\n if args.opo_laser:\n target_cwl = args.l1a_file.stem.split('_')[2].split('-')[-1]\n xds = read_gse_excel(args.ogse_dir, target_cwl)\n if xds is not None:\n xds.to_netcdf(args.l1a_file, mode='a',\n group='/gse_data/OPO_laser')", "def run_gsea_experiments(self, perc_redundant):\n print('Perc redundant: {}'.format(perc_redundant))\n\n for i in range(self.iterations):\n print('\\ti = {}'.format(i))\n\n modified_gene_sets = copy.copy(self.gene_sets)\n\n redundant_genes = random.sample(self.uniq_genes, int(perc_redundant * len(self.uniq_genes)))\n\n for gene in redundant_genes:\n including_gsets = [\n gs_name for gs_name, gs_entry in modified_gene_sets.items()\n if gene in gs_entry['genes']\n ]\n new_gene_name = gene + '_REDUNDANT'\n mod_gsets = random.sample(including_gsets, int(0.5 * len(including_gsets)))\n\n for gs in mod_gsets:\n orig_genes = modified_gene_sets[gs]['genes']\n modified_gene_sets[gs]['genes'] = [\n new_gene_name if g == gene else g for g in orig_genes\n ]\n\n # write modified gene sets to disk\n gmt_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'reactome_gene_sets_{0:.2f}.gmt'.format(perc_redundant)\n )\n\n self.write_gmt_file(gmt_file, modified_gene_sets)\n\n # run GSEA\n cls_file = os.path.join(self.base_dir, 'output', 'gsea_exp.cls')\n gct_file = os.path.join(self.base_dir, 'output', 'gsea_exp.gct')\n\n gsea_dir = os.path.join(self.base_dir, 'output', 'gsea_{0:.2f}'.format(perc_redundant), 'gsea_output')\n shutil.rmtree(gsea_dir)\n os.mkdir(gsea_dir)\n\n self._run_gsea(gct_file, gmt_file, cls_file, gsea_dir)\n\n # gsea output files to process\n tumor_all_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.all.leading.genes.TUMOR.gmt'\n )\n\n tumor_leading_genes_file = os.path.join(\n gsea_dir,\n 'syngsea.leading.genes.TUMOR.gct'\n )\n\n tumor_summary_results_file = os.path.join(\n gsea_dir,\n 'syngsea.SUMMARY.RESULTS.REPORT.TUMOR.txt'\n )\n\n tumor_leading_genes = self.process_all_leading_genes(tumor_all_leading_genes_file)\n tumor_leading_gene_occurrences = self.process_leading_genes(tumor_leading_genes_file)\n tumor_summary_results = self.process_results_file(tumor_summary_results_file)\n\n gsea_output_dict = {\n 'leading_genes': tumor_leading_genes,\n 'leading_genes_by_occurrence': tumor_leading_gene_occurrences,\n 'summary': tumor_summary_results,\n 'gene_sets': modified_gene_sets\n }\n\n # save to pickle\n gsea_pickle_file = os.path.join(\n self.base_dir,\n 'output',\n 'gsea_{0:.2f}'.format(perc_redundant),\n 'trial_{}.pkl'.format(i)\n )\n\n pickle.dump(gsea_output_dict, open(gsea_pickle_file, 'wb'))", "def _GloveSim(self,testDf,a):\r\n #Obtain the course description for the given course number.\r\n doc = testDf['description'][a]\r\n #Iterate over each word in the document. For each word in the GloVe vocab, append the word vector to a list\r\n Vectors = []\r\n for word in doc:\r\n if word in self.gloveModel.vocab:\r\n vector = self.gloveModel.get_vector(word)\r\n Vectors.append(vector)\r\n #Turn the list of vectors into an array.\r\n Vectors = np.array(Vectors)\r\n \r\n #Calculate the mean, mean+1stdev, maximum, and minimum of this array (each operation reducing \r\n #the array to eliminate rows). Concatenate these 4 measures into one matrix to serve as an index for a \r\n #document.\r\n sd = np.std(Vectors,axis=0)\r\n a0 = np.average(Vectors,axis=0)\r\n asd = a0+sd\r\n amax = np.max(Vectors,axis=0)\r\n amin = np.amin(Vectors,axis=0)\r\n \r\n return np.stack((a0,asd,amax,amin),1)", "def mainGA(NAME, target_output, target_image): \n global toolbox\n\n print(\"Target image: {0} Target output: {1}\".format(target_image, target_output)) \n sys.stdout.flush()\n\n model = load_model(NAME) \n fit = Fitness(NAME, model, target_image, target_output)\n\n #Genetic operators \n toolbox.register(\"evaluate\", fit.evaluate)\n toolbox.register(\"mate\", cxTwoPointCopy) \n #toolbox.register(\"mate\", cxUniform)\n toolbox.register(\"mutate\", tools.mutGaussian, mu=0.0, sigma=0.1, indpb=0.05)\n toolbox.register(\"select\", tools.selTournament, tournsize=3)\n \n\n pop = toolbox.population(n=50)\n hof = tools.HallOfFame(1, similar=np.array_equal)\n \n #stats = tools.Statistics(lambda ind: ind.fitness.values)\n #stats.register(\"avg\", np.mean)\n #stats.register(\"std\", np.std)\n #stats.register(\"min\", np.min)\n #stats.register(\"max\", np.max)\n \n pop, log = algorithms.eaSimple(pop, toolbox, cxpb=CXPB, mutpb=MUTPB, \n ngen=NGEN, halloffame=hof, \n verbose=False)\n\n return hof[0]", "def main():\n # parse command-line parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('--verbose', action='store_true', help='be verbose')\n parser.add_argument('--ogse_dir', default='Logs', type=Path,\n help='directory with OGSE data')\n subparsers = parser.add_subparsers(help='sub-command help')\n parser_db = subparsers.add_parser('create_db',\n help='create new OGSE database')\n parser_db.add_argument('--ref_diode', nargs='*', default=[],\n help='names of reference-diode files')\n parser_db.add_argument('--wav_mon', nargs='*', default=[],\n help='names of Avantes wavelength-monitor files')\n parser_db.set_defaults(func=create_ogse_db)\n\n parser_wr = subparsers.add_parser('add',\n help=('add OGSE information to a'\n ' SPEXone Level-1A product'))\n parser_wr.add_argument('--ref_diode', action='store_true',\n help='add reference-diode data from OGSE database')\n parser_wr.add_argument('--avantes', action='store_true',\n help=('add Avantes wavelength monitoring'\n ' from OGSE database'))\n group_wr = parser_wr.add_mutually_exclusive_group()\n group_wr.add_argument('--helios', action='store_true',\n help='add Helios reference spectrum')\n group_wr.add_argument('--grande', action='store_true',\n help='add Grande reference spectrum')\n parser_wr.add_argument('--opo_laser', action='store_true',\n help='add wavelength of OPO laser')\n parser_wr.add_argument('l1a_file', default=None, type=Path,\n help='SPEXone L1A product')\n parser_wr.set_defaults(func=write_ogse)\n args = parser.parse_args()\n if args.verbose:\n print(args)\n\n # call whatever function was selected\n args.func(args)", "def _create_init_gp(self):\n reg_X = np.concatenate((self.pre_eval_points, self.history.query_points), axis=0)\n reg_Y = np.concatenate((self.pre_eval_vals, self.history.query_vals), axis=0)\n range_Y = reg_Y.max() - reg_Y.min()\n mean_func = lambda x: np.array([np.median(reg_X)] * len(x))\n kernel = SEKernel(self.domain_dim, range_Y/4.0,\n dim_bandwidths=0.05*np.sqrt(self.domain_dim))\n noise_var = (reg_Y.std()**2)/10\n self.gp = GP(reg_X, reg_Y, kernel, mean_func, noise_var)", "def eui_modesign():\n\n # setup design space\n # N ds ws wc lc g\n GAP = Struct()\n GAP.gd_min = np.array([1, 1e-3, 1e-3, 1e-3, 1e-3, 1e-5])\n GAP.gd_max = np.array([1e3, 1e-1, 1e-1, 1e-1, 1e-1, 1e-2])\n\n\n # setup genetic algorithm parameters--------------------------------------\n nobj=2 # number of objectives\n ngen=100 # number of generations\n npop = 100 # population size\n \n problem = MyProblem()\n problem.n_var = len(GAP.gd_min)\n problem.n_obj = nobj\n problem.n_constr = 2\n problem.xl = GAP.gd_min\n problem.xu = GAP.gd_max\n problem.elementwise_evaluation = True\n\n algorithm = NSGA2(\n pop_size=npop,\n eliminate_duplicates=True\n )\n \n # conduct the optimization-------------------------------------------------\n res = minimize(problem, algorithm, (\"n_gen\", ngen), verbose=True)\n\n # save results-------------------------------------------------------------\n return res", "def new_sga(self) -> float:\n increase_factor = (\n self.total_demand() / self.operations.productivity.total_m3_collected - 1\n )\n increase_factor *= 0.75\n return self.income_statement.opex.sga * (1 + increase_factor)", "def __init__(self, A, sigma_s_x, sigma_g_x, sigma_s_y, sigma_g_y):\n\n self._mode_x = GaussianSchellModel1D(A**0.5, sigma_s_x, sigma_g_x)\n self._mode_y = GaussianSchellModel1D(A**0.5, sigma_s_y, sigma_g_y)\n\n # For eigenvalue ordering.\n self._sorted_mode_indices = None", "def create_analysis_tools(self):\r\n raise NotImplementedError()", "def _construct_gan(self):\n self.critic.trainable = False\n gan = Model(self.encoder.input, self.critic(self.encoder.output))\n gan.compile(optimizer=self.critic_opt(lr=self.critic_learning_rate),\n loss='binary_crossentropy')\n return gan", "def runGA(dressCode, color, budget, poplength, generations, boost, error, show, best):\n\n print(\"[-] Running genetic algorithm...\", end=\"\\n\\n\")\n ga = GeneticAlgorithm( \n popSize=poplength, \n eliteSize=2,\n crossoverRate=0.9, \n mutationRate=0.2, \n generations=generations, \n dressCode=dressCode, \n color=color, \n budget=budget,\n boost=boost,\n error=error,\n show=show,\n )\n # start the genetic algorithm \n ga.start()\n if (best != -1):\n ga.showBestOutfit(best)\n ga.plotPerformance()", "def _gsea_score(self, gene_list, correl_vector, gene_set, weighted_score_type=1, \n single=False, scale=False):\n N = len(gene_list)\n tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int)\n\n if weighted_score_type == 0 :\n correl_vector = np.repeat(1, N)\n else:\n correl_vector = np.abs(correl_vector)**weighted_score_type\n\n # GSEA Enrichment Score\n Nhint = tag_indicator.sum()\n sum_correl_tag = np.sum(correl_vector*tag_indicator)\n\n no_tag_indicator = 1 - tag_indicator\n Nmiss = N - Nhint\n norm_tag = 1.0/sum_correl_tag\n norm_no_tag = 1.0/Nmiss\n RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag)\n\n if scale: RES = RES / N\n if single: # ssGSEA\n es = RES.sum()\n else:\n max_ES, min_ES = RES.max(), RES.min()\n es = max_ES if np.abs(max_ES) > np.abs(min_ES) else min_ES \n # extract values\n return es", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))", "def runse(self):\n\n # check for se catalog\n\n \n\n t = self.image.split('.fits')\n froot = t[0]\n # check for se catalog\n secat = froot+'.cat'\n\n os.system('ln -s ' +self.astrodir + '/default.* .') \n if self.instrument == 'h':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'i':\n defaultcat = 'default.sex.INT'\n self.keepsection=[1000,5000,0,4000]\n elif self.instrument == 'm':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'b':\n print(\"hey Rose - \")\n print(\"using default.sex.BOK!!!\")\n print()\n defaultcat = 'default.sex.BOK.getzp'\n header = fits.getheader(self.image)\n try:\n expt = header['EXPTIME']\n except KeyError:\n expt = 1.\n ADUlimit = 40000.\n if self.instrument == 'i':\n if (self.filter == 'r'):\n ADUlimit = 400000./60#/float(expt)\n elif self.filter == 'ha':\n ADUlimit = 40000./180.\n #print('saturation limit in ADU/s {:.1f}'.format(ADUlimit))\n if self.fwhm is None:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)\n #t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '\n if self.verbose:\n print('running SE first time to get estimate of FWHM')\n print(t)\n os.system(t)\n\n # clean up SE files\n # skipping for now in case the following command accidentally deletes user files\n # os.system('rm default.* .')\n\n\n ###################################\n # Read in Source Extractor catalog\n ###################################\n if self.verbose:\n print('reading in SE catalog from first pass')\n secat_filename = froot+'.cat'\n self.secat = fits.getdata(secat_filename,2)\n self.secat0 = self.secat\n # get median fwhm of image\n # for some images, this comes back as zero, and I don't know why\n fwhm = np.median(self.secat['FWHM_IMAGE'])*self.pixelscale\n \n \n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(fwhm)\n if float(fwhm) == 0:\n print('WARNING: measured FWHM is zero!')\n if self.verbose:\n print('running SE again with new FWHM to get better estimate of CLASS_STAR')\n else:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(self.fwhm)\n if self.verbose:\n print(t)\n print('running SE w/user input for FWHM to get better estimate of CLASS_STAR') \n #############################################################\n # rerun Source Extractor catalog with updated SEEING_FWHM\n #############################################################\n\n #print(t)\n os.system(t)\n self.read_se_cat()", "def get_gast(self):\n gast = self.Calculations.GAST()\n return gast", "def eta2gg ( self ) :\n ##\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n pre_eta = self.make_selection (\n 'PreEta2gg' ,\n FilterDesktop ,\n [ self.eta_ () ] ,\n Code = \"\"\"\n ( ADMASS ('eta') < 100 * MeV ) &\n ( PT > %s ) \n \"\"\" % self['ETA_PT'] \n )\n ##\n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n return self.make_selection (\n 'Eta2gg' ,\n Pi0Veto__Tagger2g ,\n [ pre_eta ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25015 ## unique ! \n )", "def cmd_gaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[2.5,0,10]\")\n cmds.append('Gaussian::res(x,r_m,r_s)')\n return cmds", "def makeAgnTable(size=100, database=VARIABILITY_DB, **kwargs):\n\n # a haphazard sample of galaxy SEDs\n sedFiles = ['Exp.31E06.0005Z.spec', 'Inst.79E06.1Z.spec', 'Const.50E07.0005Z.spec']\n conn = sqlite3.connect(database)\n c = conn.cursor()\n try:\n c.execute('''CREATE TABLE agn\n (galid int, varsimobjid int,\n internalAvBulge real, internalAvDisk real, redshift real,\n variability text,\n sedFilenameBulge text, sedFilenameDisk text, sedFilenameAgn text)''')\n conn.commit()\n except:\n return\n\n rng = np.random.RandomState(32)\n agn_tau = rng.random_sample(size)*100.0+100.0\n agn_sfu = rng.random_sample(size)*2.0\n agn_sfg = rng.random_sample(size)*2.0\n agn_sfr = rng.random_sample(size)*2.0\n agn_sfi = rng.random_sample(size)*2.0\n agn_sfz = rng.random_sample(size)*2.0\n agn_sfy = rng.random_sample(size)*2.0\n mjDisplacement = rng.random_sample(size)*5.0\n avBulge = rng.random_sample(size)*0.5+2.6\n avDisk = rng.random_sample(size)*0.5+2.6\n redshift = rng.random_sample(size)*0.5\n for i in range(size):\n varParam = {'varMethodName': 'applyAgn',\n 'pars': {'agn_tau': agn_tau[i], 'agn_sfu': agn_sfu[i], 'agn_sfg': agn_sfg[i],\n 'agn_sfr': agn_sfr[i], 'agn_sfi': agn_sfi[i], 'agn_sfz': agn_sfz[i],\n 'agn_sfy': agn_sfy[i], 't0_mjd': 48000.0+mjDisplacement[i],\n 'seed': rng.randint(0, 200000)}}\n\n paramStr = json.dumps(varParam)\n\n qstr = '''INSERT INTO agn VALUES (%i, %i, %f, %f, %f, '%s', '%s', '%s', '%s')''' % \\\n (i, i, avBulge[i], avDisk[i], redshift[i],\n paramStr,\n sedFiles[rng.randint(0, len(sedFiles))],\n sedFiles[rng.randint(0, len(sedFiles))],\n 'agn.spec')\n\n c.execute(qstr)\n conn.commit()\n conn.close()", "def main():\n region = 'Kanto'\n year = 2000\n # callParallelGA(region)\n callParallelReducedGA(region)\n \n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n \n region = 'Kansai'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)", "def __init__(self, A, sigma_s, sigma_g):\n self._A = A\n self._sigma_s = sigma_s\n self._sigma_g = sigma_g", "def create_ogse_db(args):\n if args.ref_diode:\n # read reference-diode data\n xds = read_ref_diode(args.ogse_dir, args.ref_diode, args.verbose)\n\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_REF_DIODE,\n mode='w', format='NETCDF4',\n group='/gse_data/ReferenceDiode')\n\n if args.wav_mon:\n # read reference-diode data\n xds = read_wav_mon(args.ogse_dir, args.wav_mon, args.verbose)\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_WAV_MON,\n mode='w', format='NETCDF4',\n group='/gse_data/WaveMonitor')", "def initiateAnalysis(self,):\n\n #\n # Imports\n #\n import os\n import sys\n\n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n \n #\n # for logmessages\n #\n tmpLogMessages = ['----------------\\n']\n tmpLogMessage = self.createLogHeader()\n tmpLogMessages.append(tmpLogMessage)\n #print tmpLogMessage\n \n #\n # check analysis path\n #\n if os.path.isdir(self.analysisPath):\n tmpLogMessage = 'WARNING: the analysis path already exists.\\n'\n print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n else:\n tmpLogMessage = 'Creating directory \"'+self.analysisPath+'\".\\n'\n #print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n os.makedirs(self.analysisPath)\n \n #\n # create the logfile\n #\n tmpLogMessages += self.openLogfileConnection()\n \n #\n # write tmpLogMessages to logfile\n #\n SEAseqPipeLine.logfile.write(''.join(tmpLogMessages))\n \n #\n # create the database\n #\n self.database.create()\n \n #\n # add run to runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n return 0", "def create_gar(self):\n print('Maketh the report!')\n # Date setup\n date = datetime.today().strftime('%Y-%m-%d')\n year = datetime.today().strftime('%Y')\n\n # Page setup\n geometry_options = {\"tmargin\": \"2cm\",\n \"lmargin\": \"1.8cm\",\n \"rmargin\": \"1.8cm\",\n \"headsep\": \"1cm\"}\n\n doc = pylatex.Document(page_numbers=False,\n geometry_options=geometry_options)\n\n header = self.produce_header_footer()\n\n doc.preamble.append(header)\n doc.change_document_style(\"header\")\n\n #\n # DOCUMENT BODY/CREATION\n with doc.create(pylatex.Section('GeneSippr Analysis Report', numbering=False)):\n doc.append('GeneSippr!')\n\n with doc.create(pylatex.Subsection('GeneSeekr Analysis', numbering=False)) as genesippr_section:\n with doc.create(pylatex.Tabular('|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|')) as table:\n # Header\n table.add_hline()\n table.add_row(self.genesippr_table_columns)\n for sample_name in self.samples:\n table_data = [sample_name]\n for data in self.genesippr_headers:\n try:\n print(sample_name, data, self.report_data['genesippr'][sample_name][data])\n table_data.append(self.report_data['genesippr'][sample_name][data])\n except KeyError:\n pass\n table.add_row(table_data)\n self.create_caption(genesippr_section, 'a', \"+ indicates marker presence : \"\n \"- indicates marker was not detected\")\n\n # Create the PDF\n doc.generate_pdf('{}_{}_{}'\n .format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date), clean_tex=False)\n print('{}_{}_{}'.format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date))\n # for report_name in self.report_data:\n # for sample_name in self.samples:\n # for header, value in self.report_data[report_name][sample_name].items():\n # print(report_name, sample_name, header, value)", "def gaModel(NGEN,\n CXPB,\n MUTPB,\n modelOmega,\n year,\n region,\n mean,\n tournsize,\n n_aval\n ):\n start = time.clock()\n # Attribute generator\n toolbox.register(\"attr_float\", random.random)\n toolbox.register(\"mate\", tools.cxOnePoint)\n # operator for selecting individuals for breeding the next\n # generation: each individual of the current generation\n # is replaced by the 'fittest' (best) of three individuals\n # drawn randomly from the current generation.\n toolbox.register(\"select\", tools.selTournament, tournsize=tournsize)\n toolbox.register(\"mutate\", tools.mutPolynomialBounded,\n indpb=0.1, eta=1, low=0, up=1)\n\n stats = tools.Statistics(key=lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean)\n stats.register(\"std\", numpy.std)\n stats.register(\"min\", numpy.min)\n stats.register(\"max\", numpy.max)\n\n # calculating the number of individuals of the\n # populations based on the number of executions\n y = int(n_aval / NGEN)\n x = n_aval - y * NGEN\n n = x + y\n\n toolbox.register(\"evaluate\", evaluationFunction,\n modelOmega=modelOmega, mean=mean)\n toolbox.register(\"individual\",\n tools.initRepeat,\n creator.Individual,\n toolbox.attr_float,\n len(modelOmega[0].bins)\n )\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n logbook = tools.Logbook()\n logbook.header = \"gen\", \"min\", \"avg\", \"max\", \"std\"\n\n pop = toolbox.population(n)\n # Evaluate the entire population\n # 2 model.bins: real data, generated model\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n for g in range(NGEN):\n print(g)\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < CXPB:\n toolbox.mate(child1, child2)\n del child1.fitness.values\n del child2.fitness.values\n for mutant in offspring:\n if random.random() < MUTPB:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n # The population is entirely replaced by the offspring,\n # but the last pop best_pop\n # Elitism\n best_pop = tools.selBest(pop, 1)[0]\n offspring = sorted(offspring, key=attrgetter(\"fitness\"), reverse=True)\n offspring[len(offspring) - 1] = best_pop\n random.shuffle(offspring)\n pop[:] = offspring\n # logBook\n record = stats.compile(pop)\n logbook.record(gen=g, **record)\n end = time.clock()\n generatedModel = models.model.newModel(modelOmega[0].definitions)\n # conferir se e bins o best_pop\n generatedModel.prob = best_pop\n generatedModel.bins = calcNumberBins(list(best_pop), mean)\n generatedModel.loglikelihood = best_pop.fitness.values\n generatedModel.definitions = modelOmega[0].definitions\n generatedModel.time = start - end\n generatedModel.logbook = logbook\n # output = generatedModel.loglikelihood\n # return((-1)*output[0])\n return generatedModel", "def buildStatGraph(args):\n g = newStatGraph()\n r = g.getroot()\n transcripts = lib_filter.getTranscripts(args.geneCheck, args.geneCheckDetails)\n for t in transcripts:\n if isOk(t.annotations):\n recordOk(r, t)\n else:\n recordNotOk(r, t)\n return g", "def get_scatter(ensemble, genes_query, grouping, ptile_start, ptile_end, tsne_outlier_bool, smoothing=False, max_points='10000', modality='mch'):\t\n\tmodalityu = modality.replace('snATAC','ATAC').replace('snRNA','RNA')\n\n\twith open(log_file,'a') as f:\n\t\tprint(' *** Running get_scatter', file=f) \n\t\tprint('modalityu=%s' % modalityu, file=f)\n\n\tgenes = genes_query.split()\n\n\tgene_name_str = \"\"\n\tx, y, text, mch = list(), list(), list(), list()\n\n\tif len(genes) == 1:\n\t\tpoints = get_gene_snATAC(ensemble=ensemble, gene=genes[0], grouping=grouping, outliers=True, smoothing=smoothing, max_points=max_points, modality=modalityu)\n\t\tgene_name = get_gene_by_id([ genes[0] ])[0]['gene_name']\n\t\ttitle = 'Gene body %s normalized counts: %s' % (modality, gene_name)\n\telse:\n\t\tpoints = get_mult_gene_snATAC(ensemble=ensemble, genes=genes, grouping=grouping, max_points=max_points, modality=modalityu)\n\t\tgene_infos = get_gene_by_id(genes)\n\t\tfor i, gene in enumerate(gene_infos):\n\t\t\tif i > 0 and i % 10 == 0:\n\t\t\t\tgene_name_str += \"<br>\"\n\t\t\tgene_name_str += gene['gene_name'] + '+'\n\t\tgene_name_str = gene_name_str[:-1]\n\t\ttitle = 'Avg. Gene body %s normalized counts: <br>%s' + (modality, gene_name_str)\n\n\n\tif points is None:\n\t\traise FailToGraphException\n\n\t### TSNE ###\n\tif grouping != 'dataset' and grouping != 'target_region':\n\t\t# if (modality=='RNA') and (grouping+'_RNA' not in points.columns): # If no cluster annotations available, group by cluster number instead\n\t\t# \tgrouping = \"cluster\"\n\t\tif (grouping+'_'+modalityu not in points.columns): \n\t\t\tgrouping = \"cluster\"\n\t\t\tif len(genes) == 1:\n\t\t\t\tpoints = get_gene_snATAC(ensemble, genes[0], grouping, True, smoothing, max_points, modality=modalityu)\n\t\t\telse:\n\t\t\t\tpoints = get_mult_gene_snATAC(ensemble, genes, grouping, smoothing, max_points, modality=modalityu)\n\t\t\tprint(\"**** Grouping by cluster\")\n\n\t# with open(log_file,'a') as f:\n\t# \tprint(points.head(), file=f) \n\n\tdatasets = points['dataset'].unique().tolist()\n\tannotation_additional_y = 0.00\n\tif grouping == 'dataset':\n\t\tunique_groups = datasets\n\t\tnum_clusters = len(unique_groups)\n\telif grouping == 'target_region':\n\t\tpoints['target_region'].fillna('N/A', inplace=True)\n\t\tunique_groups = points['target_region'].unique().tolist()\n\t\tnum_clusters = len(unique_groups)\n\telse:\n\t\tif grouping == 'cluster':\n\t\t\tannotation_additional_y = 0.025 # Necessary because legend items overlap with legend title (annotation) when there are many legend items\n\t\tnum_clusters = points['cluster_'+modalityu].max()\n\t\tunique_groups = points[grouping+'_'+modalityu].unique().tolist()\n\n\tcolors = generate_cluster_colors(len(unique_groups), grouping)\n\tsymbols = ['circle', 'square', 'cross', 'triangle-up', 'triangle-down', 'octagon', 'star', 'diamond']\n\n\ttraces_tsne = OrderedDict()\n\n\tlegend_x = -.17\n\tlayout_width = 1100;\n\n\tgrouping_clustering = grouping\n\tif grouping != 'dataset' and grouping != 'target_region':\n\t\tlayout_width = 1000;\n\t\tlegend_x = -.14\n\t\tgrouping_clustering = grouping+'_'+modalityu\n\n\tif tsne_outlier_bool:\n\t\ttop_x = points['tsne_x_'+modalityu].quantile(0.999)\n\t\tbottom_x = points['tsne_x_'+modalityu].quantile(0.001)\n\t\ttop_y = points['tsne_y_'+modalityu].quantile(0.999)\n\t\tbottom_y = points['tsne_y_'+modalityu].quantile(0.001)\n\n\telse:\n\t\ttop_x = points['tsne_x_'+modalityu].max()\n\t\tbottom_x = points['tsne_x_'+modalityu].min()\n\t\ttop_y = points['tsne_y_'+modalityu].max()\n\t\tbottom_y = points['tsne_y_'+modalityu].min()\n\n\trange_x = top_x - bottom_x\n\ttop_x = top_x + range_x * 0.1\n\tbottom_x = bottom_x - range_x*0.1\n\trange_y = top_y - bottom_y\n\ttop_y = top_y + range_y * 0.1\n\tbottom_y = bottom_y - range_y*0.1\n\n\tif len(points) > 3000:\n\t\tmarker_size = 2\n\telse:\n\t\tmarker_size = 4\n\n\t## 2D tSNE coordinates ##\n\tfor i, group in enumerate(unique_groups):\n\t\n\t\tpoints_group = points[points[grouping_clustering]==group]\n\t\tif grouping_clustering.startswith('cluster'):\n\t\t\tgroup_str = 'cluster_' + str(group)\n\t\telif grouping_clustering== \"dataset\":\n\t\t\tgroup = group.strip('CEMBA_')\n\t\t\tgroup_str = group\n\t\telse:\n\t\t\tgroup_str = group\n\n\t\tcolor_num = i\n\n\t\twith open(log_file,'a') as f:\n\t\t\tprint('%d %s %s' % (i,group,group_str), file=f) \n\t\t\tprint(points_group.columns, file=f)\n\n\t\ttrace2d = traces_tsne.setdefault(color_num, Scatter(\n\t\t\tx=list(),\n\t\t\ty=list(),\n\t\t\ttext=list(),\n\t\t\tmode='markers',\n\t\t\tvisible=True,\n\t\t\tname=group_str,\n\t\t\tlegendgroup=group,\n\t\t\tmarker={\n\t\t\t\t 'color': colors[color_num],\n\t\t\t\t 'size': marker_size,\n\t\t\t\t #'symbol': symbols[datasets.index(dataset)],\n\t\t\t},\n\t\t\thoverinfo='text'))\n\t\ttrace2d['x'] = points_group['tsne_x_'+modalityu].values.tolist()\n\t\ttrace2d['y'] = points_group['tsne_y_'+modalityu].values.tolist()\n\t\t# for point in points_group.itertuples(index=False): # Maybe there's a more elegant way to do this... EAM\n\t\t# \ttext = OrderedDict([('Cluster', point[4]),('Dataset', point[2]),])\n\t\t# \tif point[3]!='Null':\n\t\t# \t\ttext['Annotation'] = point[3]\n\t\t# \tif point[-1]!='None':\n\t\t# \t\ttext['RS2 Target Region'] = point[-1]\n\t\t# \ttrace2d['text'] = [build_hover_text(OrderedDict(text))]\n\t\ttrace2d['text'] = [build_hover_text(OrderedDict([('Annotation', point[3]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('Cluster', point[4]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('RS2 Target Region', point[-1]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('Dataset', point[2]),]))\n\t\t\t\t\t\t for point in points_group.itertuples(index=False)]\n\n\t### snATAC normalized counts scatter plot ###\n\tx = points['tsne_x_'+modalityu].tolist()\n\ty = points['tsne_y_'+modalityu].tolist()\n\tATAC_counts = points['normalized_counts'].copy()\n\ttext_ATAC = [build_hover_text(OrderedDict([('Annotation', point[3]),\n\t\t\t\t\t\t\t\t\t\t\t ('Cluster', point[4]),\n\t\t\t\t\t\t\t\t\t\t\t ('RS2 Target Region', point[-1]),\n\t\t\t\t\t\t\t\t\t\t\t ('Dataset', point[2]),\n\t\t\t\t\t\t\t\t\t\t\t ('<b>Normalized Counts</b>', round(point[-2], 5)),]))\n\t\t\t\t for point in points.itertuples(index=False)]\n\n\n\tATAC_dataframe = pd.DataFrame(ATAC_counts)\n\tstart = ATAC_dataframe.dropna().quantile(ptile_start)[0].tolist()\n\tend = ATAC_dataframe.dropna().quantile(ptile_end).values[0].tolist()\n\tend = max(end,start+0.01)\n\tATAC_colors = [set_color_by_percentile(x, start, end) for x in ATAC_counts]\n\n\tcolorbar_tickval = list(arange(start, end, (end - start) / 4))\n\tcolorbar_tickval[0] = start\n\tcolorbar_tickval.append(end)\n\tcolorbar_ticktext = [\n\t\tstr(round(x, num_sigfigs_ticklabels)) for x in arange(start, end, (end - start) / 4)\n\t]\n\tcolorbar_ticktext[0] = '<' + str(round(start, num_sigfigs_ticklabels))\n\tcolorbar_ticktext.append('>' + str(round(end, num_sigfigs_ticklabels)))\n\n\ttrace_ATAC = Scatter(\n\t\tmode='markers',\n\t\tx=x,\n\t\ty=y,\n\t\ttext=text_ATAC,\n\t\tmarker={\n\t\t\t'color': ATAC_colors,\n\t\t\t'colorscale': 'Viridis',\n\t\t\t'size': marker_size,\n\t\t\t'colorbar': {\n\t\t\t\t'x': 1.05,\n\t\t\t\t'len': 0.5,\n\t\t\t\t'thickness': 10,\n\t\t\t\t'title': 'Normalized Counts',\n\t\t\t\t'titleside': 'right',\n\t\t\t\t'tickmode': 'array',\n\t\t\t\t'tickvals': colorbar_tickval,\n\t\t\t\t'ticktext': colorbar_ticktext,\n\t\t\t\t'tickfont': {'size': 10}\n\t\t\t}\n\t\t},\n\t\tshowlegend=False,\n\t\tyaxis='y',\n\t\txaxis='x2',\n\t\thoverinfo='text')\n\n\tlayout = Layout(\n\t\tautosize=True,\n\t\theight=550,\n\t\twidth=layout_width,\n\t\t# title=title,\n\t\t# titlefont={'color': 'rgba(1,2,2,1)',\n\t\t # 'size': 16},\n\t\tlegend={'x':legend_x,\n\t\t\t\t'y':0.95,\n\t\t\t\t'tracegroupgap': 0.5,\n\t\t\t\t'bgcolor': 'rgba(0,0,0,0)'},\n\t\tmargin={'l': 0,\n\t\t\t\t'r': 0,\n\t\t\t\t'b': 30,\n\t\t\t\t't': 50,},\n\t\txaxis={\n\t\t\t'domain': [0, 0.49],\n\t\t\t'type': 'linear',\n\t\t\t'ticks': '',\n\t\t\t'dtick': 10,\n\t\t\t'tickwidth': 0,\n\t\t\t'showticklabels': False,\n\t\t\t'showline': True,\n\t\t\t'showgrid': False,\n\t\t\t'zeroline': False,\n\t\t\t'linecolor': 'black',\n\t\t\t'linewidth': 0.5,\n\t\t\t'mirror': False,\n\t\t\t'scaleanchor': 'x2',\n\t\t\t'range':[bottom_x,top_x]\n\t\t},\n\t\txaxis2={\n\t\t\t'domain': [0.51, 1],\n\t\t\t'type': 'linear',\n\t\t\t'ticks': '',\n\t\t\t'dtick': 10,\n\t\t\t'tickwidth': 0,\n\t\t\t'showticklabels': False,\n\t\t\t'showline': True,\n\t\t\t'showgrid': False,\n\t\t\t'zeroline': False,\n\t\t\t'linecolor': 'black',\n\t\t\t'linewidth': 0.5,\n\t\t\t'mirror': False,\n\t\t\t'scaleanchor': 'y',\n\t\t\t'range':[bottom_x,top_x]\n\t\t},\n\t\tyaxis={\n\t\t\t'domain': [0,1],\n\t\t\t'type': 'linear',\n\t\t\t'ticks': '',\n\t\t\t'dtick': 10,\n\t\t\t'tickwidth': 0,\n\t\t\t'showticklabels': False,\n\t\t\t'showline': True,\n\t\t\t'showgrid': False,\n\t\t\t'side': 'right',\n\t\t\t'zeroline': False,\n\t\t\t'linecolor': 'black',\n\t\t\t'linewidth': 0.5,\n\t\t\t'mirror': False,\n\t\t\t'range':[bottom_y,top_y]\n\t\t},\n\t\thovermode='closest',)\n\n\n\tfig = tools.make_subplots(\n\t\t\trows=1,\n\t\t\tcols=2,\n\t\t\tshared_xaxes=False,\n\t\t\tshared_yaxes=True,\n\t\t\tprint_grid=False,\n\t\t\tsubplot_titles=(\"tSNE colored by \"+grouping, title),\n\t\t\t)\n\n\tfor trace in traces_tsne.items():\n\t\tfig.append_trace(trace[1], 1,1)\n\tfig.append_trace(trace_ATAC, 1,2)\n\n\twith open(log_file,'a') as f:\n\t\tprint(trace_ATAC, file=f)\n\n\tfig['layout'].update(layout)\n\treturn plotly.offline.plot(\n\t\tfigure_or_data=fig,\n\t\toutput_type='div',\n\t\tshow_link=False,\n\t\tinclude_plotlyjs=False)", "def gath_geva(self,vec):\n if self.extended:\n return GathGeva(self.training_data,vec)\n else:\n return GathGeva(self.training_data[:,0:-1],vec)", "def analyze(g):\n record = {}\n logging.debug('Analyzing {} ({}, {})'.format(g.name, g.number_of_nodes(), g.number_of_edges()))\n\n if not nx.is_connected(g):\n raise DisconnectedError('{} is not connected: {}'.format(g, nx.number_connected_components(g)))\n\n record['name'] = g.name\n record['n'] = g.number_of_nodes()\n record['m'] = g.number_of_edges()\n record['density'] = nx.density(g)\n # record['degrees'] = networkx.degree(g)\n # record['diameter'] = networkx.diameter(g)\n\n # Compute modularity\n logging.debug('Compute modularity')\n record['modularity'] = _compute_modularity(g)\n\n empty_result = {'num_orbits': 'NA', 'num_generators': 'NA', 'aut_group_size': 'NA'}\n # Compute the automorphism group with saucy\n logging.debug('Convert saucy')\n g_pysaucy = networkx_to_pysaucy(g)\n colors = _get_color_partition(g)\n del g\n\n try:\n logging.debug('Run saucy')\n result = pysaucy.run_saucy(g_pysaucy, colors)\n except Exception as e:\n logging.error('{}: {}'.format(record['name'], e))\n record.update(empty_result)\n else:\n grpsize1, grpsize2, levels, nodes, bads, num_generators, support, orbit_ids = result\n\n orbit_sizes = collections.defaultdict(int)\n for orbit_id in orbit_ids:\n orbit_sizes[orbit_id] += 1\n record['num_orbits'] = len(orbit_sizes)\n record['num_generators'] = num_generators\n record['aut_group_size'] = grpsize1\n record['aut_group_size_exp'] = grpsize2\n\n return record", "def create_agent(self, env):\n\n if isinstance(env.action_space, gym.spaces.Discrete):\n return SarsaAgent(\n obs_dim=env.observation_space.shape[0],\n act_dim=env.action_space.n,\n hidden_sizes=[64],\n )\n\n raise ValueError(\"SARSA can only be used for discrete action spaces.\")", "def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n use_locking=False,\n name=\"GGT\",\n window=10,\n eps=1e-4,\n svd_eps=1e-6,\n sigma_eps=1e-2):\n super(GGTOptimizer, self).__init__(use_locking, name)\n self._set_hyper(\"lr\", learning_rate)\n self._set_hyper(\"beta1\", beta1)\n self._set_hyper(\"window\", window)\n self._set_hyper(\"eps\", eps)\n self._set_hyper(\"svd_eps\", svd_eps)\n self._set_hyper(\"sigma_eps\", sigma_eps)\n\n self.index_dict = {}\n self.shape_dict = {}", "def execute_expression_analysis(self):\n print (\"Expression analisys start...\")\n n = \"consexpression\"\n out_merge_table = ''\n self.execute_merge_table(self._count_table, out_merge_table)\n # 1 ------------------ edgeR -----------------\n out_edger = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_edger.csv\"\n self._edger = EdgeR(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_edger)\n self._edger.run_edger()\n # 2 ------------- BaySeq --------------------\n out_bayseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_baySeq.csv\"\n self._bayseq = BaySeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_bayseq)\n self._bayseq.run_bayseq()\n # 3 ------------- DESeq --------------------\n out_deseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_DESeq.csv\"\n self._deseq = DESeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_deseq)\n self._deseq.run_deseq()\n # 4 ------------- NOISeq --------------------\n out_noiseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_NOISeq.csv\"\n self._noiseq = Noiseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_noiseq)\n self._noiseq.run_noiseq()\n # 5 ------------- EBSeq --------------------\n out_ebseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_EBSeq.csv\"\n self._ebseq = Ebseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_ebseq)\n self._ebseq.run_ebseq()\n # 6 ------------- SAMSeq --------------------\n out_samseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_SAMSeq.csv\"\n self._samseq = SamSeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_samseq)\n self._samseq.run_samseq()\n # 7 ------------- limma-voom --------------------\n out_limmavoom = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_limmavoom.csv\"\n self._limmavoom = LimmaVoom(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_limmavoom)\n self._limmavoom.run_limmavoom()", "def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)", "def makePopulation(self):\n # nHearing = int(round(float(self.nAgents) * (1-self.propDeaf)))\n #nDeaf = int(math.ceil(float(self.nAgents) * self.propDeaf))\n # create population of agents (just an array of agentEpsilon instances)\n self.pop = [agentEpsilon(self.alpha,self.nManualSigns, self.nSpokenSigns , deaf=False, sex=random.choice([0,1]), ID= i) for i in range(self.nAgents)]", "def test_GA_sanity():\n\tga = GA.GA(2,3)\n\tgenomes = ga.seedGenomes()\n\tif len(genomes) != 2:\n\t\tprint \"Wrong number of genomes\"\n\tif len(genomes[0]) != 3:\n\t\tprint \"Wrong size in genomes\"\n\t#print genomes\n\t#live and learn\n\tfitnesses = [23, 45]\n\tga.fitnessUpdate(fitnesses)\n\tgenomes2 = ga.createNextGeneration()\n\tif len(genomes2) != 2:\n\t\tprint \"Wrong number of genomes\"\n\tif len(genomes2[0]) != 3:\n\t\tprint \"Wrong size in genomes\"\n\t#print genomes2", "def create_sts_model(train_x, train_y):\n model = GaussianNB()\n model.fit(train_x, train_y)\n save_model(model, \"simple_time_series\")\n return model", "def gene_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_genes_filename: 'output file for gene-level results, use .csv',\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculated\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n snp_thr: \"threshold for the minimum number of SNPs in a gene\" = 10,\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'specify the model for the regression, one betwenn normal/gamma' = 'normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'gene level regression, model: %s' %model,\n [input_snp_filename],\n [output_genes_filename,output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores, 'SNP threshold': snp_thr})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n logging.info(\"Reading SNP file: %s,\\n\\t with %s delimiter\"%(input_snp_filename, sep))\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\n\\t Number of SNPs: %s\\n\\t Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Creates the genes table with the number of SNPs for each gene and the basic stats values\n genes=g.Genes()\n genes.initialise_genes(snps.table.copy(), snps_thr=snp_thr)\n\n output_logger.info(\"Output gene table initialised:\\nNumber of genes: %s\\n\" \\\n %(str(genes.n_genes)) )\n\n snps.set_non_annotated(genes.cut_genes, 'NonCoding')\n\n if model == 'gamma':\n result = gr.analyse_gamma(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n else:\n result = gr.analyse_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept,\n )\n\n logging.info(\"Saving genes table\")\n genes.table = genes.table.merge(\n result, left_index=False, left_on=\"name\", right_on=\"name\")\n\n k = genes.table.n_snps / float(N_1kG)\n genes.table[\"h2g\"] = genes.table.bg_mean.astype(\"float\") * k\n\n genes.table = genes.table.sort_values(by=[\"P\", \"bg_median\"])\n\n genes.save_table(output_genes_filename)\n\n non_coding = genes.table[genes.table.name == \"NonCoding\"]\n h2g_tot = np.sum(genes.table[\"h2g\"].values) - non_coding[\"h2g\"].values\n\n output_logger.info(\" Non coding heritability : \" +\n str(non_coding[\"h2g\"].values) + \"\\n\")\n output_logger.info(\" Coding heritability : \" + str(h2g_tot) + \"\\n\")", "def GCASFit(self):\r\n\t\t#run the scatteringPeakInfo method to retrieve various peak attributes \r\n\t\tself.scatteringPeakInfo()\r\n\t\t\r\n\t\t#set parameters for fitting\r\n\t\tbaseline = self.scatteringBaseline\r\n\t\tx_vals = np.array(self.getAcqPoints())\r\n\t\ty_vals = np.array(self.getScatteringSignal()\t)\r\n\t\t\r\n\t\t#initial values for amplitude(a) center(u) and gauss width(sig)\r\n\t\tguess_a = self.scatteringMax \r\n\t\tguess_u = self.scatteringMaxPos\r\n\t\tguess_sig = 10\r\n\t\tp_guess = [53000,73,17, -0.031,-0.1]\r\n\t\t\r\n\t\tdef GCAS(x, a, xc, w, a3, a4):\r\n\t\t\treturn baseline + (a /(w*math.sqrt(2*math.pi)))*np.exp(-((x-xc)/w)**2/2.)*(1+np.abs((a3/6)*(((x-xc)/w)**3-3.*((x-xc)/w))+ (a4/24)*( ((x-xc)/w)**4 -6.*((x-xc)/w)**3 + 3 ) ))\r\n\t\t\t\t\t\t\r\n\t\t#run the fitting\r\n\t\t#try:\r\n\t\tpopt, pcov = curve_fit(GCAS, x_vals, y_vals, p0=p_guess)\r\n\t\t#except:\r\n\t\t#\tpopt, pcov = [np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan] \r\n\t\t\r\n\t\tself.FF_scattering_amp = popt[0]\r\n\t\tself.FF_peak_pos = popt[1] \r\n\t\tself.FF_width = popt[2]\r\n\t\tfit_result = []\r\n\t\tfor x in x_vals:\r\n\t\t\tfit_result.append(GCAS(x,popt[0],popt[1],popt[2],popt[3],popt[4]))\r\n\t\tself.FF_results = fit_result", "def create(self, the_type):\n\n # Get the list of code producing visitors. The hardwired list is\n # based on the project argument. A different set of visitors can\n # be assembled by project.\n\n project_visitor_list = self._buildVisitorList()\n\n # Instance the needed code snippet generator here.\n\n if the_type == \"initFiles\":\n code_section_generator = InitFiles.InitFiles()\n elif the_type == \"startSource\":\n code_section_generator = StartSource.StartSource()\n elif the_type == \"includes1\":\n code_section_generator = Includes1.Includes1()\n elif the_type == \"includes2\":\n code_section_generator = Includes2.Includes2()\n elif the_type == \"namespace\":\n code_section_generator = Namespace.Namespace()\n elif the_type == \"public\":\n code_section_generator = Public.Public()\n elif the_type == \"protected\":\n code_section_generator = Protected.Protected()\n elif the_type == \"private\":\n code_section_generator = Private.Private()\n elif the_type == \"finishSource\":\n code_section_generator = FinishSource.FinishSource()\n\n elif the_type == \"DictStart\":\n code_section_generator = DictStart.DictStart()\n elif the_type == \"DictHeader\":\n code_section_generator = DictHeader.DictHeader()\n elif the_type == \"DictBody\":\n code_section_generator = DictBody.DictBody()\n\n elif the_type == \"InstanceDictStart\":\n code_section_generator = InstanceDictStart.InstanceDictStart()\n elif the_type == \"InstanceDictHeader\":\n code_section_generator = InstanceDictHeader.InstanceDictHeader()\n elif the_type == \"InstanceDictBody\":\n code_section_generator = InstanceDictBody.InstanceDictBody()\n\n elif the_type == \"HtmlStart\":\n code_section_generator = HtmlStartPage.HtmlStartPage()\n elif the_type == \"HtmlDoc\":\n code_section_generator = HtmlDocPage.HtmlDocPage()\n\n elif the_type == \"MdStart\":\n code_section_generator = MdStartPage.MdStartPage()\n elif the_type == \"MdDoc\":\n code_section_generator = MdDocPage.MdDocPage()\n\n else:\n print(f\"GenFactory: unsupported code section ({the_type}).\")\n return None\n\n self._addVisitor(code_section_generator, project_visitor_list)\n\n return code_section_generator", "def create_test_galaxy(galname='G1_test',zred=6,R_gal=5):\n\n plt.close('all')\n\n # Default gas parameters\n Ngas = 3000\n x,y,z,vx,vy,vz = get_rotating_disk(Ngas,R_gal,400)\n SFR = np.zeros(Ngas)+1e-3\n Z = np.zeros(Ngas)+1.\n nH = np.zeros(Ngas)+1e-5\n Tk = np.zeros(Ngas)+1e3\n h = np.zeros(Ngas)+10**(-0.5)\n np.random.seed(Ngas+1)\n f_H21 = np.random.rand(Ngas)*1.\n m = np.zeros(Ngas)+10**(5.45)\n # Solar abundances used by Gizmo (email from RD), must be mass-fractions?:\n solar = [0.28,3.26e-3,1.32e-3,8.65e-3,2.22e-3,9.31e-4,1.08e-3,6.44e-4,1.01e-4,1.73e-3]\n a_He,a_C,a_N,a_O,a_Ne,a_Mg,a_Si,a_S,a_Ca,a_Fe = [solar[i]*Z for i in range(len(solar))]\n gas_params = {'x':x,'y':y,'z':z,'vx':vx,'vy':vy,'vz':vz,'SFR':SFR,'Z':Z,'nH':nH,'Tk':Tk,'h':h,'f_H21':f_H21,\\\n 'm':m,'a_He':a_He,'a_C':a_C,'a_N':a_N,'a_O':a_O,'a_Ne':a_Ne,'a_Mg':a_Mg,'a_Si':a_Si,'a_S':a_S,'a_Ca':a_Ca,'a_Fe':a_Fe}\n simgas = pd.DataFrame()\n for key,val in gas_params.items():\n simgas[key] = val\n simgas.to_pickle(d_temp+'sim/z'+'{:.2f}'.format(float(zred))+'_'+galname+'_sim0.gas')\n\n # Default stellar parameters\n Nstars = 1000\n x,y,z,vx,vy,vz = get_rotating_disk(Nstars,R_gal,400)\n SFR = np.zeros(Nstars)+10**(5.45)\n Z = np.zeros(Nstars)+0.1\n m = np.zeros(Nstars)+10**(6)\n age = np.zeros(Nstars)+50.\n star_params = {'x':x,'y':y,'z':z,'vx':vx,'vy':vy,'vz':vz,'SFR':SFR,'Z':Z,'m':m,'age':age}\n simstar = pd.DataFrame()\n for key,val in star_params.items():\n simstar[key] = val\n simstar.to_pickle(d_temp+'sim/z'+'{:.2f}'.format(float(zred))+'_'+galname+'_sim0.star')\n\n # Default dark matter parameters\n Ndm = 30000\n x,y,z,vx,vy,vz = get_rotating_disk(Ndm,R_gal,400)\n m = np.zeros(Ndm)+10**(5.45)\n dm_params = {'x':x,'y':y,'z':z,'vx':vx,'vy':vy,'vz':vz,'m':m}\n simdm = pd.DataFrame()\n for key,val in dm_params.items():\n simdm[key] = val\n simdm.to_pickle(d_temp+'sim/z'+'{:.2f}'.format(float(zred))+'_'+galname+'_sim0.dm')\n\n galnames_unsorted = [galname]\n zreds_unsorted = [zred]\n\n return galnames_unsorted,zreds_unsorted", "def make_asimov_significance_plots(self):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n outdir = os.path.join(self.outdir, 'Significances')\n mkdir(outdir)\n maintitle = self.make_main_title(\n end='Asimov Analysis Significances',\n end_center=True\n )\n\n # Want to ensure the resulting y range can show all of the plots\n # Therefore find the max and min sig of the whole set of data_sets\n maxsig = None\n minsig = None\n # xrange is easier\n hrange = self.inj_param_vals[-1]-self.inj_param_vals[0]\n xlims = [self.inj_param_vals[0]-0.1*hrange,\n self.inj_param_vals[-1]+0.1*hrange]\n\n for i in range(len(self.data_sets)):\n\n significances = self.deltachi2_significance(\n wh_to_th_metrics=self.wh_to_th[i]['metrics'],\n th_to_wh_metrics=self.th_to_wh[i]['metrics']\n )\n\n truth = self.labels[i][\n self.labels.keys()[0]].dict['data_name'].split('_')[0]\n plotlabel = 'True %s'%self.tex_axis_label(truth)\n \n self.make_1d_graph(\n xvals=self.inj_param_vals,\n yvals=significances,\n xlabel=self.inj_param_name,\n xunits=self.inj_param_units,\n ylabel=None,\n yunits=None,\n marker=self.marker_style(truth),\n color=self.plot_colour(truth),\n plotlabel=plotlabel,\n xlims=xlims\n )\n\n if maxsig is None:\n maxsig = max(significances)\n else:\n maxsig = max(maxsig, max(significances))\n if minsig is None:\n minsig = min(significances)\n else:\n minsig = min(minsig, min(significances))\n\n # Give a more descriptive y-axis label if only one thing being plotted\n if len(self.data_sets) == 1:\n alt = self.labels[\n self.labels.keys()[0]].dict['%s_name'%(\n self.wh_to_th[0]['params']['altfit'])].split('_')[0]\n plt.ylabel(r'%s from %s Significance $\\left(\\sigma\\right)$'%(\n self.tex_axis_label(truth),\n self.tex_axis_label(alt)\n ))\n else:\n plt.ylabel(r'Significance $\\left(\\sigma\\right)$', fontsize=24)\n\n vrange = maxsig - minsig\n plt.ylim(minsig-0.1*vrange, maxsig+0.2*vrange)\n plt.title(maintitle, fontsize=16)\n plt.legend(loc='best')\n plt.tight_layout()\n save_end = \"%s_asimov_significances\"%(self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=truth)\n if self.extra_points is not None:\n yminextra, ymaxextra = self.add_extra_points()\n yminall = min(yminextra, minsig)\n ymaxall = max(ymaxextra, maxsig)\n vrange = ymaxall - yminall\n if yminall == 0:\n plt.ylim(yminall, ymaxall+0.2*vrange)\n else:\n plt.ylim(yminall-0.1*vrange, ymaxall+0.3*vrange)\n plt.legend(loc='upper left')\n save_end = \"%s_asimov_significances_w_extra_points\"%(\n self.inj_param_name)\n self.save_plot(outdir=outdir, end=save_end, truth=truth)\n plt.close()", "def makeGeneratorAnalysisSequence( dataType,\n saveCutBookkeepers=False,\n runNumber=0,\n cutBookkeepersSystematics=False ):\n\n if dataType not in [\"mc\", \"afii\"] :\n raise ValueError (\"invalid data type: \" + dataType)\n\n if saveCutBookkeepers and not runNumber:\n raise ValueError (\"invalid run number: \" + 0)\n\n # Create the analysis algorithm sequence object:\n seq = AnaAlgSequence( \"GeneratorAnalysisSequence\" )\n\n # Set up the CutBookkeepers algorithm:\n if saveCutBookkeepers:\n alg = createAlgorithm('CP::AsgCutBookkeeperAlg', 'CutBookkeeperAlg')\n alg.runNumber = runNumber\n alg.enableSystematics = cutBookkeepersSystematics\n addPrivateTool( alg, 'truthWeightTool', 'PMGTools::PMGTruthWeightTool' )\n seq.append( alg, inputPropName = None )\n\n # Set up the weights algorithm:\n alg = createAlgorithm( 'CP::PMGTruthWeightAlg', 'PMGTruthWeightAlg' )\n addPrivateTool( alg, 'truthWeightTool', 'PMGTools::PMGTruthWeightTool' )\n alg.decoration = 'generatorWeight_%SYS%'\n alg.decorationRegex = '(^GEN_.*)'\n\n seq.append( alg, inputPropName = 'eventInfo',\n affectingSystematics = '(^GEN_.*)' )\n\n # Return the sequence:\n return seq", "def eeg_gain(self, eeg_file=None):\n LOG.info(\"Computing GainEEG...\")\n eeg_gain = om.GainEEG(self.om_inverse_head, self.om_source_matrix,\n self.om_head2sensor)\n LOG.info(\"eeg_gain: %d x %d\" % (eeg_gain.nlin(), eeg_gain.ncol()))\n if eeg_file is not None:\n LOG.info(\"Saving eeg_gain as %s...\" % eeg_file)\n eeg_gain.save(os.path.join(OM_STORAGE_DIR,\n eeg_file + OM_SAVE_SUFFIX))\n return om.asarray(eeg_gain)", "def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS", "def geneticAlgorithm(functionName, encoder: Encoding, maxGenerations = 10000, showLogs = True, trial = None, barData={}, writer=None):\n genetic_algorithm = GeneticAlgorithm(encoder=encoder)\n return genetic_algorithm.run(functionName, maxGenerations, showLogs, trial, barData, writer)", "def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )", "def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)", "def get_simulation():\n simulation = ba.GISASSimulation()\n simulation.setBeamParameters(12.8*ba.angstrom, 90.0*ba.deg, 0.0*ba.deg)\n simulation.setDetector(get_kws3_detector())\n simulation.setDetectorResolutionFunction(ba.ResolutionFunction2DGaussian(5.0, 5.0))\n simulation.setBeamIntensity(1.0e-4)\n distr_1 = ba.DistributionGaussian(1.28*ba.nm, 0.1)\n simulation.addParameterDistribution(\"*/Beam/Wavelength\", distr_1, 50, 2.0, ba.RealLimits.positive())\n simulation.getOptions().setIncludeSpecular(True)\n return simulation", "def create_isa_study(brapi_study_id):\n brapi_study = get_brapi_study(brapi_study_id)\n this_study = Study(filename=\"s_\" + str(brapi_study_id) + \".txt\")\n this_study.identifier = brapi_study['studyDbId']\n if 'name' in brapi_study:\n this_study.title = brapi_study['name']\n elif 'studyName' in brapi_study:\n this_study.title = brapi_study['studyName']\n\n this_study.comments.append(Comment(name=\"Study Start Date\", value=brapi_study['startDate']))\n this_study.comments.append(Comment(name=\"Study End Date\", value=brapi_study['endDate']))\n if brapi_study['location'] is not None and brapi_study['location']['name'] is not None :\n this_study.comments.append(Comment(name=\"Study Geographical Location\",\n value=brapi_study['location']['name']))\n else:\n this_study.comments.append(Comment(name=\"Study Geographical Location\",value=\"\"))\n\n study_design = brapi_study['studyType']\n oa_st_design = OntologyAnnotation(term=study_design)\n this_study.design_descriptors = [oa_st_design]\n\n oref_tt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_tt = OntologyAnnotation(term=\"phenotyping\", term_accession=\"\", term_source=oref_tt)\n oref_mt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_mt = OntologyAnnotation(term=\"multi-technology\", term_accession=\"\", term_source=oref_mt)\n isa_assay_file = \"a_\" + str(brapi_study_id) + \".txt\"\n this_assay = Assay(measurement_type=oa_tt, technology_type=oa_mt, filename=isa_assay_file)\n this_study.assays.append(this_assay)\n\n return this_study", "def __gia(self, *args, **kwargs):\n pass", "def testSanity(self):\n\t\tga = GA.GA(2,3)\n\t\tgenomes = ga.seedGenomes()\n\t\tself.assertEqual(len(genomes), 2, \n\t\t \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes[0]), 3, \n\t\t \"Wrong size in genomes\")\n\t\t#print genomes\n\t\t#live and learn\n\t\tfitnesses = [23, 45]\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes2 = ga.createNextGeneration()\n\t\tself.assertEqual(len(genomes2), 2, \n \"Wrong number of genomes\")\n\t\tself.assertEqual(len(genomes2[0]), 3, \n \"Wrong size in genomes\")", "def create_asg(AvailabilityZone):\n lc_name= lib.get_lc_name(stackname, ELBTargetGroupName, AvailabilityZone)\n\n logger.info('Creating launch-config for a new ASG: ' + lc_name)\n userdata='vmseries-bootstrap-aws-s3bucket=' + s3master\n \n try:\n response=asg.create_launch_configuration(LaunchConfigurationName=lc_name, \n ImageId=imageID, KeyName=keyname, SecurityGroups=[sg_untrust], InstanceType=instanceType,\n AssociatePublicIpAddress=False, EbsOptimized=True,\n IamInstanceProfile=iamprofilebs,\n BlockDeviceMappings=[\n {'DeviceName': \"/dev/xvda\", \n 'Ebs': \n {'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n }\n }\n ],\n UserData=userdata)\n except Exception as e:\n logger.error(\"[ASG LC error]: {}\".format(e))\n return False\n #Get ELB ARN\n tgtGrp = elbv2.describe_target_groups(Names=[ELBTargetGroupName])\n if tgtGrp == None:\n tgtGrp_arn = None\n logger.info('ELB target group is not found!')\n else:\n tgtGrp_d = tgtGrp['TargetGroups']\n tgtGrp_arn = tgtGrp_d[0].get('TargetGroupArn')\n print(\"targetgroup arn: \" + tgtGrp_arn)\n print( \"ELBTargetGroupName: \" +ELBTargetGroupName)\n \n asg_name = lib.get_asg_name(stackname, ELBTargetGroupName, AvailabilityZone)\n logger.info('Creating Auto-Scaling Group with name: ' + asg_name)\n tags={'ResourceId': asg_name, 'ResourceType': 'auto-scaling-group', 'Key': 'Name', 'Value': asg_name, 'PropagateAtLaunch':True}\n \n subnet=lib.choose_subnet(subnetuntrust, AvailabilityZone)\n try:\n response=asg.create_auto_scaling_group(AutoScalingGroupName=asg_name, LaunchConfigurationName=lc_name,\n MinSize=MinInstancesASG, MaxSize=MaximumInstancesASG, DesiredCapacity=MinInstancesASG,\n DefaultCooldown=ScalingPeriod, TargetGroupARNs=[tgtGrp_arn],\n VPCZoneIdentifier=subnet,\n Tags=[tags],\n HealthCheckGracePeriod=900)\n except Exception as e:\n logger.error(\"[ASG create error]: {}\".format(e))\n return False\n \n if create_asg_life_cycle(asg_name, AvailabilityZone) == False:\n return False\n \n scalein=asg_name + '-scalein'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scalein, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=-1, Cooldown=600)\n arn_scalein=response['PolicyARN']\n except Exception as e:\n logger.error(\"[ASG ScaleIn12 Policy]: {}\".format(e))\n return False\n \n scaleout=asg_name + '-scaleout'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scaleout, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=1, Cooldown=600)\n arn_scaleout=response['PolicyARN']\n except Exception as e:\n logger.info(\"[ASG ScaleOut123]: {}\".format(e))\n return False\n \n logger.info('ARN of Scale In and Scale Out: ' + arn_scalein + ' ' + arn_scaleout)\n logger.info('Adding Cloud Watch Alarm : ' + ScalingParameter + ' for ASG: ' + asg_name)\n if cw_func_add_alarms[ScalingParameter](asg_name, arn_scalein, arn_scaleout) == False:\n return False\n \n return True", "def analysis(self, checkid):\r\n return analysis.Analysis(self, checkid)", "def createGene(self):\n # Beginning and end of the alphabet for random gene generation\n Astart = 97\n Zend = 122\n return \"\".join(map(lambda i: chr(random.randint(Astart, Zend)), range(random.randint(4, 8)))).upper()", "def hpa_menu_create():\n global test_HPA\n test_HPA = HistoricPriceAnalyser.create()\n return test_HPA", "def isGAIA_STD(ra=None, dec=None, galb=None, gaiaaen=None, pmra=None, pmdec=None,\n parallax=None, parallaxovererror=None, ebv=None, gaiabprpfactor=None,\n gaiasigma5dmax=None, gaiagmag=None, gaiabmag=None, gaiarmag=None,\n gaiadupsource=None, gaiaparamssolved=None,\n primary=None, test=False, nside=2):\n if primary is None:\n primary = np.ones_like(gaiagmag, dtype='?')\n\n # ADM restrict all classes to dec >= -30.\n primary &= dec >= -30.\n std = primary.copy()\n\n # ADM the regular \"standards\" codes need to know whether something has\n # ADM a Gaia match. Here, everything is a Gaia match.\n gaia = np.ones_like(gaiagmag, dtype='?')\n\n # ADM determine the Gaia-based white dwarf standards.\n std_wd = isMWS_WD(\n primary=primary, gaia=gaia, galb=galb, astrometricexcessnoise=gaiaaen,\n pmra=pmra, pmdec=pmdec, parallax=parallax,\n parallaxovererror=parallaxovererror, photbprpexcessfactor=gaiabprpfactor,\n astrometricsigma5dmax=gaiasigma5dmax, gaiagmag=gaiagmag,\n gaiabmag=gaiabmag, gaiarmag=gaiarmag, paramssolved=gaiaparamssolved\n )\n\n # ADM apply the Gaia quality cuts for standards.\n std &= isSTD_gaia(primary=primary, gaia=gaia, astrometricexcessnoise=gaiaaen,\n pmra=pmra, pmdec=pmdec, parallax=parallax,\n dupsource=gaiadupsource, paramssolved=gaiaparamssolved,\n gaiagmag=gaiagmag, gaiabmag=gaiabmag, gaiarmag=gaiarmag)\n\n # ADM restrict to point sources.\n ispsf = gaia_psflike(gaiaaen, gaiagmag)\n std &= ispsf\n\n # ADM de-extinct the magnitudes before applying color cuts.\n gd, bd, rd = unextinct_gaia_mags(gaiagmag, gaiabmag, gaiarmag, ebv)\n\n # ADM apply the Gaia color cuts for standards.\n bprp = bd - rd\n gbp = gd - bd\n std &= bprp > 0.2\n std &= bprp < 0.9\n std &= gbp > -1.*bprp/2.0\n std &= gbp < 0.3-bprp/2.0\n\n # ADM remove any sources that have neighbors in Gaia within 3.5\"...\n # ADM for speed, run only sources for which std is still True.\n log.info(\"Isolating Gaia-only standards...t={:.1f}s\".format(time()-start))\n ii_true = np.where(std)[0]\n if len(ii_true) > 0:\n # ADM determine the pixels of interest.\n theta, phi = np.radians(90-dec), np.radians(ra)\n pixlist = list(set(hp.ang2pix(nside, theta, phi, nest=True)))\n # ADM read in the necessary Gaia files.\n fns = find_gaia_files_hp(nside, pixlist, neighbors=True)\n gaiaobjs = []\n gaiacols = [\"RA\", \"DEC\", \"PHOT_G_MEAN_MAG\", \"PHOT_RP_MEAN_MAG\"]\n for i, fn in enumerate(fns):\n if i % 25 == 0:\n log.info(\"Read {}/{} files for Gaia-only standards...t={:.1f}s\"\n .format(i, len(fns), time()-start))\n try:\n gaiaobjs.append(fitsio.read(fn, columns=gaiacols))\n except OSError:\n if test:\n pass\n else:\n msg = \"failed to find or open the following file: (ffopen) \"\n msg += fn\n log.critical(msg)\n raise OSError\n gaiaobjs = np.concatenate(gaiaobjs)\n # ADM match the standards to the broader Gaia sources at 3.5\".\n matchrad = 3.5*u.arcsec\n cstd = SkyCoord(ra[ii_true]*u.degree, dec[ii_true]*u.degree)\n cgaia = SkyCoord(gaiaobjs[\"RA\"]*u.degree, gaiaobjs[\"DEC\"]*u.degree)\n idstd, idgaia, d2d, _ = cgaia.search_around_sky(cstd, matchrad)\n # ADM remove source matches with d2d=0 (i.e. the source itself!).\n idgaia, idstd = idgaia[d2d > 0], idstd[d2d > 0]\n # ADM remove matches within 5 mags of a Gaia source.\n badmag = (\n (gaiagmag[ii_true][idstd] + 5 > gaiaobjs[\"PHOT_G_MEAN_MAG\"][idgaia]) |\n (gaiarmag[ii_true][idstd] + 5 > gaiaobjs[\"PHOT_RP_MEAN_MAG\"][idgaia]))\n std[ii_true[idstd][badmag]] = False\n\n # ADM add the brightness cuts in Gaia G-band.\n std_bright = std.copy()\n std_bright &= gaiagmag >= 16\n std_bright &= gaiagmag < 18\n\n std_faint = std.copy()\n std_faint &= gaiagmag >= 16\n std_faint &= gaiagmag < 19\n\n return std_faint, std_bright, std_wd", "def write_seisan(filename, args):\n bf = BaikalFile(filename)\n if not bf.valid:\n print(\"Invalid file {}\".format(filename))\n return\n header = bf.MainHeader\n # datetime\n date = datetime.datetime(header[\"year\"], header[\"month\"], header[\"day\"])\n delta = datetime.timedelta(seconds=header[\"to\"])\n dt = date + delta\n _time = dt.time() # time\n # make utc datetime\n utcdatetime = UTCDateTime(date.year, date.month, date.day,\n _time.hour, _time.minute, _time.second, _time.microsecond, precision=3)\n bf.traces = bf.traces.astype(np.int32)\n bf.traces = bf.traces[:3]\n traces = []\n for channel, data in zip(CHANNELS, bf.traces):\n stats = DEFAULT_STATS.copy()\n stats.update({\n \"station\": header['station'].upper()[:3],\n 'channel': channel,\n 'sampling_rate': int( 1./header[\"dt\"] ),\n \"delta\": header[\"dt\"],\n \"npts\": data.size,#shape[0]\n 'starttime': utcdatetime,\n })\n # save coordinates\n stats['gse2'][\"lat\"] = header['latitude']\n stats['gse2'][\"lon\"] = header[\"longitude\"]\n trace = Trace(data=data, header=stats)\n traces.append(trace)\n # create Stream\n stream = Stream(traces)\n #== write seisan\n # date\n name = \"{year:04}-{month:02}-{day:02}\".format(**header)\n # time\n name += \"-{t.hour:02}-{t.minute:02}\".format(t=stats['starttime'])\n # + station name + Day_of_Year\n name += \"{0}__{1:03}\".format(stats[\"station\"], stats['starttime'].timetuple().tm_yday)\n print('Writing GSE2 file %s.' % name)\n writeGSE2(stream, os.path.join(args.outdir, name))", "def generate_graph(\n locs_user, sp_user, study, trips_user=None, gap_threshold=None\n):\n # print(sp_user[\"location_id\"])\n # print(locs_user)\n AG = ActivityGraph(\n sp_user,\n locs_user,\n trips=trips_user,\n gap_threshold=gap_threshold,\n )\n # Add purpose feature\n if study == \"geolife\" or study == \"yumuv_graph_rep\":\n AG.add_node_features_from_staypoints(\n sp_user, agg_dict={\"started_at\": list, \"finished_at\": list}\n )\n else:\n AG.add_node_features_from_staypoints(\n sp_user,\n agg_dict={\n \"started_at\": list,\n \"finished_at\": list,\n \"purpose\": list,\n },\n )\n return AG", "def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)", "def single_analysis(config, name):\n # graphviz = GephiOutput()\n graphviz = GraphvizOutput()\n graphviz.output_file = name\n\n print \"Preparing test case...\"\n radio, lines = _prepare_test_case()\n\n print \"Running test case...\"\n with PyCallGraph(output=graphviz, config=config):\n _run_test_case(radio, lines)", "def create(self):\n # TODO: Properly validate data\n self._proj()\n if self.cfg.align_heading:\n self._align()\n self._griddata()\n if self.cfg.gap_filter[\"algorithm\"] != \"none\":\n self._gap_filter()", "def create_asa(self):\n self.asa_id = blockchain_utils.create_algorand_standard_asset(client=self.client,\n creator_private_key=self.app_creator_pk,\n unit_name=self.asa_unit_name,\n asset_name=self.asa_asset_name,\n total=1,\n decimals=0,\n manager_address=self.app_creator_address,\n reserve_address=self.app_creator_address,\n freeze_address=self.app_creator_address,\n clawback_address=self.app_creator_address,\n default_frozen=True)", "def eta_ ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n from StandardParticles import StdLooseEta2gg as inpts \n ##\n return self.make_selection (\n 'EtaStd' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = self['Eta2ggCut'] ,\n )", "def etap2rhog ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays \n pre_etap = self.make_selection (\n ## the unique tag \n 'PreEtapRhoG' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.gamma () ] ,\n ##\n DecayDescriptor = \" eta_prime -> pi+ pi- gamma\" ,\n ##\n DaughtersCuts = { 'gamma' : self['GammaCut'] } ,\n ## \n Combination12Cut = \"\"\" ( AM < 950 * MeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" , \n CombinationCut = \"\"\"\n ( APT > %s ) & \n in_range ( 500 * MeV , AM12 , 950 * MeV ) & \n ( ADAMASS ( 'eta_prime' ) < 100 * MeV ) \n \"\"\" % ( 0.9 * self['ETAP_PT'] ),\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 )\n \"\"\" % self['ETAP_PT'] \n )\n ## \n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger\n ## \n return self.make_selection (\n 'Etap2rhogamma' ,\n Pi0Veto__Tagger ,\n [ pre_etap ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25017 ## unique ! \n )", "def __init__(self, input=None, output=None, \\\n Gxx=None, Gyy=None, Gxy=None, ave=False):\n self.input=input\n self.output=output\n self.Gxx=Gxx\n self.Gyy=Gyy\n self.Gxy=Gxy\n self.ave=ave", "def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def create_parser():\n\n parser = argparse.ArgumentParser(description='Squeesar simulation')\n parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')\n parser.add_argument('-l', '--lambda', dest='lamda', type=float, default=56.0, help='Sensor wavelength (mm)')\n parser.add_argument('-ns', '--nshp', dest='n_shp', type=int, default=300,help='Number of neighbouring samples')\n parser.add_argument('-ni', '--nimg', dest='n_img', type=int, default=100, help='Number of images')\n parser.add_argument('-dd', '--decorr_days', dest='decorr_days', type=int, default=50, help='Decorrelatopn days')\n parser.add_argument('-tb', '--tmp_bl', dest='tmp_bl', type=int, default=6, help='Temporal baseline')\n parser.add_argument('-dr', '--def_rate', dest='deformation_rate', type=float, default=1 , help='Linear deformation rate. -- Default : 1 mm/y')\n parser.add_argument('-st', '--signal_type', dest='signal_type', type=str, default='linear',\n help = '(linear or nonlinear) deformation signal')\n parser.add_argument('-nr', '--n_sim', dest='n_sim', type=int, default=1000, help='Number of simulation')\n parser.add_argument('--plot', action='store_true', dest='plot_mat', default=False,\n help='plot and save coherence matrix')\n parser.add_argument('--se', action='store_true', dest='seasonality', default=False,\n help='plot and save coherence matrix')\n\n\n return parser", "def createGeneProduct(self):\n return _libsbml.FbcModelPlugin_createGeneProduct(self)", "def createAnalysis(self, study_id):\n try:\n con = self.getSFFDatabaseConnection()\n analysis_id=0\n db_output=con.cursor().callproc('create_analysis',\n [study_id,analysis_id])\n return db_output[1]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def run(self) -> SpreadSheetGraph:\n logger.debug(\"Running Genetic Search...\")\n self.initialize()\n\n for generation in range(self.configuration.n_gen):\n logger.debug(f\"Generation {generation}\")\n\n children: List[IndividualType] = []\n for _ in range(self.configuration.n_offspring):\n children.append(self.child_from_population())\n\n self.update_hall_of_fame(children)\n\n # Total generation population\n total_generation_population = self._population + children\n self._population = self.tournament_selection(total_generation_population)\n\n logger.debug(f\"Best individual: {self._hof_individual[0]}\")\n logger.debug(f\"Best rating: {self._hof_individual[1]}\")\n self.graph.edge_toggle_list = self._hof_individual[0]\n return self.graph", "def createGene(self, reference=\"\"):\n return _libsbml.Association_createGene(self, reference)", "def _create_ga_plots(ga_agent, output_directory):\n\n # create trace for plot\n makespans_traces, makespans_layout = _make_ga_traces(ga_agent)\n\n # create plot\n plot(dict(data=makespans_traces, layout=makespans_layout),\n filename=str(output_directory / 'ga_makespans.html'),\n auto_open=False)\n\n # create schedule\n ga_agent.best_solution.create_schedule_xlsx_file(str(output_directory / 'ga_schedule'), continuous=True)\n ga_agent.best_solution.create_gantt_chart_html_file(str(output_directory / 'ga_gantt_chart.html'), continuous=True)", "def plot_gene(adata, ax, x, y, type='gene', x_test=None, x_mean=None, x_cov=None, x_grad=None,\n scatter_kwgs=None):\n\n # basic scatter plot\n if scatter_kwgs is not None:\n ax = scv.pl.scatter(adata, x=x, y=y, ax=ax, show=False, **scatter_kwgs)\n else:\n ax = scv.pl.scatter(adata, x=x, y=y, ax=ax, show=False)\n ax.set_title(\" \")\n\n if x_test is not None:\n # add smoothed expression valued\n if x_mean is not None:\n ax.plot(x_test, x_mean, '-', color='orange', lw=3,\n label='Smoothed {} expression values'.format(type))\n # add covariance\n if x_cov is not None:\n ax.fill_between(x_test.flatten(), x_mean - np.sqrt(np.diag(x_cov)),\n x_mean + np.sqrt(np.diag(x_cov)),\n alpha=0.5, color='k')\n # add the derivative\n if x_grad is not None:\n ax.plot(x_test, x_grad, '--', color='orange', lw=3,\n label='Derivative of gene expression')\n\n ax.set_ylabel('{} expression'.format(type), fontsize=10)\n ax.set_xticks([])\n plt.legend(fontsize=10)", "def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('./config/analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics", "def SED(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n gal_num = getattr(GR,'gal_num')[p.gal_index]\n\n # Look for powderday SED\n found = True\n\n # Load MIRI filter curves\n MIRI = pd.read_csv('look-up-tables/observations/ImPCE_TN-00072-ATC-Iss2.txt',sep='\\t',skiprows=2,\\\n names=['Wave','F560W','F770W','F1000W','F1280W','F1130W','F1500W','F1800W','F2100W','F2550W'])\n\n # gal_num=12\n # file_location = p.d_data + 'pd_data/%s/sed_%i' % (p.sim_run,gal_num)\n # pd_data = pickle.load(open(file_location,'rb'), encoding='latin1')\n # wav = np.array(pd_data[0,:])[0]\n # flux = np.array(pd_data[1,:])[0]\n try:\n file_location = p.d_data + 'pd_data/%s/sed_%i' % (p.sim_run,gal_num)\n pd_data = pickle.load(open(file_location,'rb'), encoding='latin1')\n wav = np.array(pd_data[0,:])[0]\n flux = np.array(pd_data[1,:])[0]\n print('Found powderday output found for gal_index = %i (gal_num = %i)!' % (p.gal_index,gal_num))\n except:\n print('No powderday output found for gal_index = %i (gal_num = %i)!' % (p.gal_index,gal_num))\n found = False\n\n if p.select == 'AGN': \n try:\n file_location = p.d_data + 'pd_data/%s/sed_%i_agn' % (p.sim_run,gal_num)\n pd_data = pickle.load(open(file_location,'rb'), encoding='latin1')\n wav_agn = np.array(pd_data[0,:])[0]\n flux_agn = np.array(pd_data[1,:])[0]\n except:\n if found: print('no AGN spectra for gal_num %i' % gal_num)\n\n if found == True:\n wav_lines = []\n tot_flux = [] \n if p.select == 'AGN': tot_flux_agn = [] \n for line in p.lines:\n\n L_line = getattr(GR,'L_%s_sun' % line)[p.gal_index]\n D_L = getattr(GR,'lum_dist')[p.gal_index]\n\n L_line_Jy_km_s = aux.Lsun_to_Jy_km_s(L_line,D_L,line)\n\n freq = p.freq[line]\n\n wav_line = c.c.value / (freq*1e9) * 1e6 # microns\n\n if wav_line < np.max(wav):\n flux[np.argmin(np.abs(wav-wav_line))] += L_line_Jy_km_s\n\n if p.select == 'AGN': \n try: \n flux_agn[np.argmin(np.abs(wav-wav_line))] += L_line_Jy_km_s\n except:\n pass\n\n wav_lines += [wav_line]\n tot_flux += [flux[np.argmin(np.abs(wav-wav_line))]]\n\n if p.select == 'AGN': \n try: \n tot_flux_agn += [flux_agn[np.argmin(np.abs(wav-wav_line))]]\n except:\n pass\n\n fig,ax = plt.subplots(figsize=(12,6))\n # Show MIRI band\n ax.fill_between([5,28],[1e10,1e10],color='forestgreen',alpha=0.4)\n ax.loglog(wav,flux,'-',lw=2,label='Modeled spectrum\\nof $z=0$ simulated galaxy')\n try: \n ax.loglog(wav,flux_agn,'-',color='r',lw=2,label='with AGN')\n except:\n pass\n ax.set_xlabel(r'$\\lambda$ [$\\mu$m]')\n ax.set_ylabel('Flux (mJy)')\n ax.set_ylim([np.max(flux)*5/1e5,np.max(flux)*5.5])\n ax.set_xlim(1,10**3.1)\n\n cmap = plt.get_cmap('gist_rainbow_r')\n cmap = plt.get_cmap('brg')\n tot_flux = np.array(tot_flux)[wav_lines < np.max(wav)]\n line_names = np.array(p.lines)[wav_lines < np.max(wav)]\n wav_lines = np.array(wav_lines)[wav_lines < np.max(wav)]\n tot_flux = tot_flux[wav_lines.argsort()]\n line_names = line_names[wav_lines.argsort()]\n wav_lines = wav_lines[wav_lines.argsort()]\n colors = [cmap(i) for i in np.linspace(0, 1, len(wav_lines))]\n for i in range(len(wav_lines)):\n print(line_names[i],wav_lines[i])\n ax.plot(wav_lines[i],tot_flux[i],'x',mew=2,ms=5,color=colors[i])#,label=line_names[i])\n # ax.text(wav_lines[i]*0.8,tot_flux[i],line_names[i],fontsize=10,color=colors[i])\n if line_names[i] in ['H2_S(1)','[NeII]12','[FeII]25','[OI]63','[CII]158','[CI]370','[CI]610','CO(3-2)']:\n ax.text(wav_lines[i]*0.8,tot_flux[i]*3.5,line_names[i],fontsize=10,color=colors[i])\n ax.plot([wav_lines[i],wav_lines[i]],[tot_flux[i],tot_flux[i]*3],'--',lw=1,color=colors[i])\n if line_names[i] in ['H2_S(6)','H2_S(4)','H2_S(6)','[NII]122','[NII]205','[SIII]18']:\n ax.text(wav_lines[i]*0.8,tot_flux[i]*6.5,line_names[i],fontsize=10,color=colors[i])\n ax.plot([wav_lines[i],wav_lines[i]],[tot_flux[i],tot_flux[i]*6],'--',lw=1,color=colors[i])\n if line_names[i] in ['[OIV]25','[OIII]88']:\n ax.text(wav_lines[i]*0.8,tot_flux[i]/4.,line_names[i],fontsize=10,color=colors[i])\n ax.plot([wav_lines[i],wav_lines[i]],[tot_flux[i],tot_flux[i]/3],'--',lw=1,color=colors[i])\n if line_names[i] in ['[NeIII]15']:\n ax.text(wav_lines[i]*0.8,tot_flux[i]/6.5,line_names[i],fontsize=10,color=colors[i])\n ax.plot([wav_lines[i],wav_lines[i]],[tot_flux[i],tot_flux[i]/5],'--',lw=1,color=colors[i])\n if line_names[i] in ['[OI]145','H2_S(5)','H2_S(3)','H2_S(2)','H2_S(7)']:\n ax.text(wav_lines[i]*0.8,tot_flux[i]/9.,line_names[i],fontsize=10,color=colors[i])\n ax.plot([wav_lines[i],wav_lines[i]],[tot_flux[i],tot_flux[i]/7],'--',lw=1,color=colors[i])\n\n ax.legend(fontsize=13,fancybox=True, framealpha=0.5)\n\n print(MIRI.head())\n for f in MIRI.keys():\n if f != 'Wave':\n ax.fill_between(MIRI['Wave'].values,MIRI[f].values*1e5,alpha=0.6)\n ax.text(30,1e4,'JWST/MIRI filter curves',fontsize=15,color='steelblue')\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'SEDs/'): os.mkdir(p.d_plot + 'SEDs/') \n plt.savefig(p.d_plot + 'SEDs/sed_%s%s_%i.png' % (p.sim_name,p.sim_run,p.gal_index), format='png', dpi=300) \n\n # plt.close('all')", "def modified_eaSimple(population, toolbox, cxpb, mutpb, stats=None,\n halloffame=None, trace_file=None, time_limit=None):\n start_time = time.perf_counter()\n logbook = tools.Logbook()\n logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in population if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n if halloffame is not None:\n halloffame.update(population)\n\n record = stats.compile(population) if stats else {}\n logbook.record(gen=0, nevals=len(invalid_ind), **record)\n \n \n population.sort(key=lambda x: x.fitness, reverse=True)\n\n fitness_list = str(population[0].fitness)[1:-1].split(\", \")\n curr_best_fitness = float(fitness_list[0]) + float(fitness_list[1])\n\n output = open(trace_file, 'w')\n curr_time = time.perf_counter()\n running_time = curr_time - start_time\n output.write(str(running_time) + \", \" + str(int(float(fitness_list[1]))) + \"\\n\")\n\n random.shuffle(population)\n # Begin the generational process\n gen = 1\n while running_time < time_limit:\n # Select the next generation individuals\n offspring = toolbox.select(population, len(population))\n\n # Vary the pool of individuals\n offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n # Update the hall of fame with the generated individuals\n if halloffame is not None:\n halloffame.update(offspring)\n\n # Replace the current population by the offspring\n population[:] = offspring\n\n # Append the current generation statistics to the logbook\n record = stats.compile(population) if stats else {}\n logbook.record(gen=gen, nevals=len(invalid_ind), **record)\n \n curr_time = time.perf_counter()\n running_time = curr_time - start_time\n \n population.sort(key=lambda x: x.fitness, reverse=True)\n fitness_list = str(population[0].fitness)[1:-1].split(\", \")\n gen_best_fitness = float(fitness_list[0]) + float(fitness_list[1])\n if gen_best_fitness < curr_best_fitness:\n curr_best_fitness = gen_best_fitness\n output.write(str(running_time) + \", \" + str(int(float(fitness_list[1]))) + \"\\n\")\n\n random.shuffle(population)\n\n gen += 1\n\n return population, logbook", "def initialize_analyticsreporting():\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRETS_PATH, scope=SCOPES,\n message=tools.message_if_missing(CLIENT_SECRETS_PATH))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage('analyticsreporting.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)\n\n return analytics", "def analyse(self):\n pass", "def analysis(self):\r\n return analysis.Analysis(self.parent, self.object_id)", "def get_gene_snATAC(ensemble, gene, grouping, outliers, smoothing=False, max_points='10000', \n\tmodality='snATAC'):\n\tmodalityu = modality.replace('snATAC','ATAC').replace('snRNA','RNA')\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping:\n\t\treturn None\n\n\t# This query is just to fix gene id's missing the ensemble version number.\n\t# Necessary because the table name must match exactly with whats on the MySQL database.\n\t# Ex. ENSMUSG00000026787 is fixed to ENSMUSG00000026787.3 -> gene_ENSMUSG00000026787_3 (table name in MySQL)\n\tresult = db.get_engine(current_app, modality+'_data').execute(\"SELECT gene_id FROM genes WHERE gene_id LIKE %s\", (gene+\"%\",)).fetchone()\n\tgene_table_name = 'gene_' + result['gene_id'].replace('.','_')\n\n\tif smoothing and (modalityu=='ATAC'):\n\t\tcounts_type='smoothed_normalized_counts'\n\telse:\n\t\tcounts_type='normalized_counts'\n\n\tquery = \"SELECT cells.cell_id, cells.cell_name, cells.dataset, \\\n\t\t%(ensemble)s.annotation_%(modality)s, %(ensemble)s.cluster_%(modality)s, \\\n\t\t%(ensemble)s.tsne_x_%(modality)s, %(ensemble)s.tsne_y_%(modality)s, \\\n\t\t%(gene_table_name)s.%(counts_type)s as normalized_counts, \\\n\t\tdatasets.target_region \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \\\n\t\tLEFT JOIN datasets ON cells.dataset = datasets.dataset\" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'gene_table_name': gene_table_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'counts_type': counts_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'modality': modalityu}\n\n\tif max_points.isdigit():\n\t\tquery = query+\" ORDER BY RAND() LIMIT %(max_points)s\" % {'max_points': max_points}\n\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, '%s_data' % modality))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_gene_snATAC): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\tif df.empty: # If no data in column, return None\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_gene_snATAC): No snATAC data for {}\".format(str(now), ensemble))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\tif grouping == 'annotation':\n\t\tdf.fillna({'annotation_%s' % modality: 'None'}, inplace=True)\n\t\tdf['annotation_cat'] = pd.Categorical(df['annotation_%s' % modality], cluster_annotation_order)\n\t\tdf.sort_values(by='annotation_cat', inplace=True)\n\t\tdf.drop('annotation_cat', axis=1, inplace=True)\n\telif grouping == 'cluster':\n\t\tdf.sort_values(by='cluster_%s' % modality, inplace=True)\n\n\tdf['normalized_counts'].fillna(0, inplace=True)\n\n\treturn df", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def build_ensemble(obs_space: Box, action_space: Box, spec: EnsembleSpec) -> SME:\n models = [build(obs_space, action_space, spec) for _ in range(spec.ensemble_size)]\n cls = ForkedSME if spec.parallelize else SME\n ensemble = cls(models)\n return ensemble", "def __init__(\n self,\n width=20,\n height=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n ):\n super().__init__()\n # Set parameters\n self.width = width\n self.height = height\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n\n self.schedule = RandomActivationByTypeFiltered(self)\n self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True)\n self.datacollector = mesa.DataCollector(\n {\n \"Wolves\": lambda m: m.schedule.get_type_count(Wolf),\n \"Sheep\": lambda m: m.schedule.get_type_count(Sheep),\n \"Grass\": lambda m: m.schedule.get_type_count(\n GrassPatch, lambda x: x.fully_grown\n ),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.sheep_gain_from_food)\n sheep = Sheep(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.wolf_gain_from_food)\n wolf = Wolf(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, (x, y) in self.grid.coord_iter():\n fully_grown = self.random.choice([True, False])\n\n if fully_grown:\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n\n patch = GrassPatch(self.next_id(), (x, y), self, fully_grown, countdown)\n self.grid.place_agent(patch, (x, y))\n self.schedule.add(patch)\n\n self.running = True\n self.datacollector.collect(self)", "def create_ast(client: TypeAny) -> Globals:\n ast = Globals(client=client)\n\n modules: TypeList[TypeTuple[str, TypeAny]] = [\n (\"xgboost\", xgb),\n (\"xgboost.core\", xgb.core),\n (\"xgboost.sklearn\", xgb.sklearn),\n ]\n\n classes: TypeList[TypeTuple[str, str, TypeAny]] = [\n (\"xgboost.DMatrix\", \"xgboost.DMatrix\", xgb.core.DMatrix),\n (\"xgboost.core.DMatrix\", \"xgboost.core.DMatrix\", xgb.core.DMatrix),\n (\"xgboost.core.Booster\", \"xgboost.core.Booster\", xgb.core.Booster),\n (\n \"xgboost.core.XGBoostError\",\n \"xgboost.core.XGBoostError\",\n xgb.core.XGBoostError,\n ),\n # classifiers\n (\"xgboost.XGBClassifier\", \"xgboost.XGBClassifier\", xgb.XGBClassifier),\n (\"xgboost.XGBRFClassifier\", \"xgboost.XGBRFClassifier\", xgb.XGBRFClassifier),\n # (\"xgboost.dask.DaskXGBRFClassifier\"), Currently dask is not supported in syft\n # regreessors\n (\"xgboost.XGBRegressor\", \"xgboost.XGBRegressor\", xgb.XGBRegressor),\n (\"xgboost.XGBRFRegressor\", \"xgboost.XGBRFRegressor\", xgb.XGBRFRegressor),\n # (\"xgboost.dask.DaskXGBRFRegressor\"), Currently dask is not supported in syft\n ]\n\n methods = [\n (\"xgboost.train\", \"xgboost.core.Booster\"),\n (\"xgboost.core.Booster.predict\", \"numpy.ndarray\"),\n # classifiers\n (\"xgboost.XGBClassifier.fit\", \"xgboost.XGBClassifier\"),\n (\"xgboost.XGBClassifier.predict\", \"numpy.ndarray\"),\n (\"xgboost.XGBRFClassifier.fit\", \"xgboost.XGBRFClassifier\"),\n (\"xgboost.XGBRFClassifier.predict\", \"numpy.ndarray\"),\n # regressors\n (\"xgboost.XGBRegressor.fit\", \"xgboost.XGBRegressor\"),\n (\"xgboost.XGBRegressor.predict\", \"numpy.ndarray\"),\n (\"xgboost.XGBRFRegressor.fit\", \"xgboost.XGBRFClassifier\"),\n (\"xgboost.XGBRFRegressor.predict\", \"numpy.ndarray\"),\n ]\n\n add_modules(ast, modules)\n add_classes(ast, classes)\n add_methods(ast, methods)\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_storable_object_attr_convenience_methods()\n\n return ast", "def __main__():\n try:\n gff_file = sys.argv[1]\n mat_file = sys.argv[2]\n except:\n print __doc__\n sys.exit(-1)\n\n genes, transcripts, exons, utr3, utr5, cds = GFFParse(gff_file) \n gene_models = CreateGeneModels(genes, transcripts, exons, utr3, utr5, cds)\n # TODO Write to matlab/octave struct instead of cell arrays.\n sio.savemat(mat_file, \n mdict=dict(genes=gene_models), \n format='5', \n oned_as='row')", "def __init__(self, asa_factory: AsaFactory):\n super().__init__(asa_factory) # initialize step_in_progress flag\n self.agent, self.sampler, self.algo = asa_factory()\n self.batch_spec = self.sampler.batch_spec\n self.grad = None\n self.traj_infos = None\n self.opt_info = None", "def _annotate(self, generation: int):\n # Get pareto front\n pareto_front_scores = np.array(\n [individual.fitness.values for individual in self._population.individuals\n if individual.fitness.rank == 0]\n )\n\n # Calculate hypervolume\n self._evolution['hypervolume'][generation + 1] = hypervolume(pareto_front=pareto_front_scores)\n\n # Get number of solutions on the Pareto front\n self._evolution['num_solutions_front'][generation + 1] = len(pareto_front_scores)\n\n # Get best performance achieved for each objective\n self._evolution['best_values'][generation + 1] = np.max(pareto_front_scores, axis=0)", "def plot_asa_with_hippo(self, env):\n print(f'Plotting ASA with HiPPO - {env}')\n plt.figure()\n params = self.env_params[env]\n basic_run_split = max(11, min([itr for _, itr in params['true_asa_runs']]))\n\n # Basic run\n self.draw_reward_range(\n self.data(env, 'asa')\n .filter_basic_runs()\n .filter_itr_to(basic_run_split),\n env, color='darkblue'\n )\n self.draw_reward_range(\n self.data(env, 'asa')\n .filter_basic_runs()\n .filter_itr_from(basic_run_split),\n env, color='black', label='Base run without ASA skill'\n )\n\n # True ASA runs\n self.draw_reward_range(\n self.data(env, 'asa')\n .filter_seed_and_resumed_from(params['true_asa_runs'])\n .filter_itr_from(12)\n .append_prev_itr(),\n env, color='royalblue', label='With ASA skill'\n )\n\n # HiPPO runs\n hippo_colors = ['darkorange', 'orangered']\n hippo_exp_names = {\n 'mb': ['latdim3_period3_5', 'latdim10_period3_5'],\n 'gw': ['latdim14_period2_23', 'latdim50_period2_23']\n }\n hippo_labels = {\n 'mb': ['HiPPO - latent 3', 'HiPPO - latent 10'],\n 'gw': ['HiPPO - latent 18', 'HiPPO - latent 50']\n }\n hippo_unwanted_seeds = {\n 'mb': [],\n 'gw': [10, 40, 50, 100]\n }\n for exp_name, label, color in zip(hippo_exp_names[env], hippo_labels[env], hippo_colors):\n self.draw_reward_range(\n self.data(env, 'hippo', filter_unwanted_seeds=False)\n .filter_unwanted_seeds(hippo_unwanted_seeds[env])\n .filter_exp_name(exp_name),\n env, color=color, label=label\n )\n\n # Finalize\n self.tidy_plot(env, w=24, h=12)\n plt.legend()\n self.show_save_plot(f'asa-with-hippo-{env}')", "def CalculateGandAExpenses(self,problemManager):\n \n self.theEconomicDataManager.CalculateGandAExpenses(problemManager,self)\n \n return self.theEconomicDataManager.GandAOpex", "def GEEnasaNEXGDDP(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,username,folderOut,models = ['ACCESS1-0', 'bcc-csm1-1', 'BNU-ESM',\n 'CanESM2', 'CCSM4', 'CESM1-BGC', 'CNRM-CM5', 'CSIRO-Mk3-6-0',\n 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'inmcm4', 'IPSL-CM5A-LR',\n 'IPSL-CM5A-MR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR',\n 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M'], scalePix = 25000):\n \n # load required libraries\n import ee\n\n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['day'] = 'projd'\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n NEX = (ee.ImageCollection('NASA/NEX-GDDP')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n monthsEE = ee.List(list(range(0,(12*len(years)))))\n yearsEE = ee.List(years)\n\n######Turned off unit conversion, because it fails when there are too many pts\n## if (met == 'pr'):\n##\n## def Scale1(img):\n## return (img.float()\n## .multiply(86400)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(Scale1)\n## \n## elif any([(met == 'tasmin'),(met == 'tasmax')]):\n##\n## def KtoC(img):\n## return (img.float()\n## .subtract(273.15)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(KtoC)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),(met == 'pr')]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'day':\n\n img_col = NEX.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)", "def create_liver_glucose(target_dir):\n\n # external annotation file\n annotations_path = BASE_PATH / \"models\" / 'liver_glucose_annotations.xlsx'\n\n return creator.create_model(\n modules=['pyexsimo.models.liver_glucose'],\n filename=\"liver_glucose.xml\",\n target_dir=target_dir,\n annotations=str(annotations_path),\n create_report=True\n )", "def _construct_gateset_exp(self, **kwargs) -> dict:\n # Store the experimental accessible gate set :math:`\\tilde{\\mathcal{G}}`\n gateset_exp: Dict[str, np.ndarray] = dict()\n\n # Run tomographic quantum circuits to construct the experimental accessible quantum gate G_tilde_k\n pbar = tqdm(total=100, desc=\"GST Step 2/5: Running tomographic quantum circuits for gates, \"\n \"which is time consuming ...\", initial=20)\n for g_k in self._gateset.gates.keys():\n gateset_exp.update({g_k: self._gate_exp(g_k, **kwargs)})\n\n # Run tomographic quantum circuit to construct the experimental accessible rho_tilde\n pbar.desc = \"GST Step 3/5: Running tomographic quantum circuits for rho, which is time consuming ...\"\n pbar.update(20)\n gateset_exp.update({'rho': self._rho_exp(**kwargs)})\n\n # Run tomographic quantum circuit to construct the experimental accessible E_tilde\n pbar.desc = \"GST Step 4/5: Running tomographic quantum circuits for E, which is time consuming ...\"\n pbar.update(20)\n gateset_exp.update({'E': self._meas_exp(**kwargs)})\n\n # Record the results\n self._result.update({'gateset exp': gateset_exp})\n self._result.update({'g': gateset_exp[GateSet.NULL_GATE_NAME]})\n\n pbar.close()\n return gateset_exp", "def main():\n # Load and prepare dataset.\n ts_list = load_energy_weather_data()\n\n generator = gnt_class.FeatureGeneration(transformations='centroids') #gnt_class.Monotone()\n\n # feature selection model can be defined in the same way. If you don't use any, just leave as is\n selector = sel_class.FeatureSelection(on=False) #\n # first argument is your model class, then follow optional parameters as keyword arguments\n frc_model = frc_class.CustomModel(RandomForestRegressor, name=\"RF\")\n #frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.001)\n\n # train your model:\n model = demo_train(ts_list, frc_model=frc_model, fg_mdl=generator, fs_mdl=selector, verbose=VERBOSE)\n\n # evaluate errors on the test set\n train_error, train_std = competition_errors(model=model, names=TRAIN_FILE_NAMES, y_idx=TS_IDX)\n test_error, test_std = competition_errors(model=model, names=TEST_FILE_NAMES, y_idx=TS_IDX)\n\n\n print(\"Average MAPE across time series: train = {} with std {}, test = {} with std {}\".\n format(train_error, train_std, test_error, test_std))\n\n return train_error, test_error", "def exp_main(self, use_remote_data=True, test=False):\n test = str_to_bool(test)\n use_remote_data = str_to_bool(use_remote_data)\n root_uri = get_root_uri(use_remote_data)\n root_uri = os.path.join(root_uri, rv_output_dir)\n spacenet_config = VegasBuildings(use_remote_data)\n experiments = []\n runs = [0]\n\n noise_modes = [\n NoiseMode(NoiseMode.SHIFT, 0),\n NoiseMode(NoiseMode.SHIFT, 10),\n NoiseMode(NoiseMode.SHIFT, 20),\n NoiseMode(NoiseMode.SHIFT, 30),\n NoiseMode(NoiseMode.SHIFT, 40),\n NoiseMode(NoiseMode.SHIFT, 50),\n NoiseMode(NoiseMode.DROP, 0.0),\n NoiseMode(NoiseMode.DROP, 0.1),\n NoiseMode(NoiseMode.DROP, 0.2),\n NoiseMode(NoiseMode.DROP, 0.3),\n NoiseMode(NoiseMode.DROP, 0.4),\n NoiseMode(NoiseMode.DROP, 0.5)\n ]\n\n for nm in noise_modes:\n for run in runs:\n exp_id = get_exp_id(nm, run)\n task = build_task(spacenet_config.get_class_map())\n backend = build_fastai_backend(task, test)\n analyzer = rv.AnalyzerConfig.builder(rv.STATS_ANALYZER) \\\n .build()\n dataset = build_dataset(task, spacenet_config, test, nm)\n\n experiment = rv.ExperimentConfig.builder() \\\n .with_id(exp_id) \\\n .with_analyze_key('shift-0-0') \\\n .with_task(task) \\\n .with_backend(backend) \\\n .with_analyzer(analyzer) \\\n .with_dataset(dataset) \\\n .with_root_uri(root_uri) \\\n .build()\n experiments.append(experiment)\n\n return experiments" ]
[ "0.6862905", "0.59698844", "0.5741826", "0.5694374", "0.5569399", "0.55237055", "0.5385206", "0.53803694", "0.5294734", "0.5274186", "0.5242113", "0.51879853", "0.5178812", "0.5157552", "0.51452965", "0.51294", "0.5125397", "0.5094075", "0.50884384", "0.5040239", "0.4964143", "0.49224654", "0.49147126", "0.4898529", "0.48972753", "0.48955587", "0.48627296", "0.4849057", "0.48394692", "0.4805479", "0.47981277", "0.47963658", "0.4778211", "0.47748125", "0.47438243", "0.472691", "0.4722863", "0.4715311", "0.4708702", "0.4705305", "0.47021845", "0.46978796", "0.46970347", "0.46790916", "0.46679705", "0.4648615", "0.4643", "0.4627945", "0.46272808", "0.46263468", "0.4626036", "0.46232954", "0.46117568", "0.46108148", "0.46106395", "0.46101114", "0.4607204", "0.46060345", "0.45941648", "0.45933136", "0.45899892", "0.458174", "0.45812598", "0.45731547", "0.456384", "0.45590702", "0.4557441", "0.45548463", "0.45407817", "0.4531677", "0.45238158", "0.45235884", "0.4522792", "0.4515657", "0.45122835", "0.45033652", "0.44987673", "0.44977435", "0.4495692", "0.44877517", "0.4484756", "0.44805905", "0.4478928", "0.4478517", "0.44780722", "0.44763952", "0.447556", "0.4473141", "0.4471731", "0.44704777", "0.44699466", "0.44694462", "0.44685823", "0.44657552", "0.44653848", "0.44650948", "0.44596788", "0.4452578", "0.44468078", "0.44440517" ]
0.49073586
23
Main method to perform GSEA/MSEA analysis
def get_results(self, preprocess=True): logger.debug('Calculating GSEA') measurement_df = self._get_measurement_df(preprocess) annot_df = self.data_source.get_annotations() joined = pd.merge(left=measurement_df, right=annot_df, left_index=True, right_index=True) joined = joined.set_index('entity_id') unique_ids = [self.data_source._get_unique_id(x) for x in joined.index.values] joined.index = unique_ids joined = joined.drop_duplicates(keep='first').sort_index() # gene_sets is a dict. key is pw name, values are a list of entries in that pathway gene_sets = {} assert len(self.data_source.dataset_pathways) > 0, 'No pathways found in the dataset' pathways = list(self.data_source.dataset_pathways) for pw in pathways: pathway_row_ids = self.data_source.dataset_pathways_to_row_ids[pw] pw_unique_ids = [] for row_id in pathway_row_ids: pw_unique_ids.extend(self.data_source.dataset_row_id_to_unique_ids[row_id]) pw_unique_ids = list(set(pw_unique_ids)) gene_sets[pw] = pw_unique_ids # run GSEA for all comparisons all_dfs = [] for comp in self.data_source.comparisons: if not is_comparison_used(comp, self.case, self.control): continue case = comp['case'] control = comp['control'] logger.debug('Running comparison case=%s control=%s' % (case, control)) pheno_cols = set(self.data_source.get_experimental_design()['groups'][case]) df_cols = measurement_df.columns.values # for each comparison, we need to create C (phenotype labels) # Loop over df_cols and store an indicator into C. # Entries in C is 1 if that column belongs to the case group, otherwise it's a 0 C = [] for col in df_cols: if col in pheno_cols: C.append(1) else: C.append(0) C = np.array(C) # actually runs GSEA here data = joined cls = C.tolist() outdir = None min_size = 1 max_size = 1000 permutation_num = self.num_resamples weighted_score_type = 1 permutation_type = 'phenotype' method = self.method ascending = True processes = 1 figsize = (6.5, 6) format = 'pdf', graph_num = 20 no_plot = True seed = None verbose = False msea = MSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num, weighted_score_type, permutation_type, method, ascending, processes, figsize, format, graph_num, no_plot, seed, verbose) msea.run() # convert GSEA results to dataframe df = msea.res2d df = df.reset_index() selected = df[['Term', 'pval', 'fdr', 'es']] selected = selected.rename(columns={'Term': 'mapids'}).set_index('mapids') col_name = comp['name'] + ' p-value' es_colname = comp['name'] + ' ES_score' if self.data_source.database_name is not None: comb_col_name = '%s %s %s' % (self.data_source.database_name, comp['name'], 'comb_p') else: comb_col_name = '%s %s' % (comp['name'], 'comb_p') pathway_df = selected.rename(columns={ 'pval': col_name, 'es': es_colname, 'fdr': comb_col_name }) all_dfs.append(pathway_df) # combine all the results across all comparisons combined_df = pd.concat(all_dfs, axis=1, sort=False) combined_df.index.name = 'mapids' # create a dataframe of pathway mapids and names pw_name_df = [] for map_id in pathways: pw_name = self.data_source.pathway_dict[map_id]['display_name'] pw_name_df.append((map_id, pw_name)) pw_name_df = pd.DataFrame(pw_name_df, columns=['mapids', 'pw_name']).set_index(['mapids']) combined_df = pw_name_df.merge(combined_df, left_index=True, right_index=True) # add formula coverage information mapids = combined_df.index.values.tolist() cov_df = self.data_source._calculate_coverage_df(mapids) coverage_df = cov_df.reindex(combined_df.index) # make sure dfs are in same order before merging # Merge the two dfs together pathway_df = pd.merge(combined_df, coverage_df, left_index=True, right_index=True, how='outer') # del pathway_df.index.name pathway_df.rename_axis(None, inplace=True) # post-processing to filter pathway dataframe by the minimum number of hits pathway_df = post_filter_df_by_min_hits(pathway_df, self.data_source.min_hits) return pathway_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()", "def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()", "def main():\n # Load and prepare dataset.\n ts_list = load_energy_weather_data()\n\n generator = gnt_class.FeatureGeneration(transformations='centroids') #gnt_class.Monotone()\n\n # feature selection model can be defined in the same way. If you don't use any, just leave as is\n selector = sel_class.FeatureSelection(on=False) #\n # first argument is your model class, then follow optional parameters as keyword arguments\n frc_model = frc_class.CustomModel(RandomForestRegressor, name=\"RF\")\n #frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.001)\n\n # train your model:\n model = demo_train(ts_list, frc_model=frc_model, fg_mdl=generator, fs_mdl=selector, verbose=VERBOSE)\n\n # evaluate errors on the test set\n train_error, train_std = competition_errors(model=model, names=TRAIN_FILE_NAMES, y_idx=TS_IDX)\n test_error, test_std = competition_errors(model=model, names=TEST_FILE_NAMES, y_idx=TS_IDX)\n\n\n print(\"Average MAPE across time series: train = {} with std {}, test = {} with std {}\".\n format(train_error, train_std, test_error, test_std))\n\n return train_error, test_error", "def main():\n # parse command-line parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('--verbose', action='store_true', help='be verbose')\n parser.add_argument('--ogse_dir', default='Logs', type=Path,\n help='directory with OGSE data')\n subparsers = parser.add_subparsers(help='sub-command help')\n parser_db = subparsers.add_parser('create_db',\n help='create new OGSE database')\n parser_db.add_argument('--ref_diode', nargs='*', default=[],\n help='names of reference-diode files')\n parser_db.add_argument('--wav_mon', nargs='*', default=[],\n help='names of Avantes wavelength-monitor files')\n parser_db.set_defaults(func=create_ogse_db)\n\n parser_wr = subparsers.add_parser('add',\n help=('add OGSE information to a'\n ' SPEXone Level-1A product'))\n parser_wr.add_argument('--ref_diode', action='store_true',\n help='add reference-diode data from OGSE database')\n parser_wr.add_argument('--avantes', action='store_true',\n help=('add Avantes wavelength monitoring'\n ' from OGSE database'))\n group_wr = parser_wr.add_mutually_exclusive_group()\n group_wr.add_argument('--helios', action='store_true',\n help='add Helios reference spectrum')\n group_wr.add_argument('--grande', action='store_true',\n help='add Grande reference spectrum')\n parser_wr.add_argument('--opo_laser', action='store_true',\n help='add wavelength of OPO laser')\n parser_wr.add_argument('l1a_file', default=None, type=Path,\n help='SPEXone L1A product')\n parser_wr.set_defaults(func=write_ogse)\n args = parser.parse_args()\n if args.verbose:\n print(args)\n\n # call whatever function was selected\n args.func(args)", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def main():\n\n args = parse_cmd_line_args()\n\n random_state = check_random_state(args.random_seed)\n\n X, mu, A, phases = generate_data(args.n_features, n_samples=args.n_samples,\n period=args.period, order=args.order,\n noise_variance=args.noise_variance,\n random_state=random_state)\n\n if args.plot_data:\n plot_data_timeseries(X)\n\n best_fit, best_weights = fit_fembv_varx(\n X, n_components=args.n_components,\n max_tv_norm=args.max_tv_norm,\n memory=args.memory, n_init=args.n_init,\n tolerance=args.tolerance,\n max_iterations=args.max_iterations,\n verbose=args.verbose, random_state=random_state)\n\n if args.plot_weights:\n plot_weights_timeseries(best_weights, phases)", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def _run_gsea(self, gct_file, gmt_file, cls_file, gsea_dir):\n r = robjects.r\n r.source(self.gsea_r_location)\n r(\"\"\"GSEA( # Input/Output Files :-------------------------------------------\n input.ds = \"{}\", # Input gene expression Affy dataset file in RES or GCT format\n input.cls = \"{}\", # Input class vector (phenotype) file in CLS format\n gs.db = \"{}\", # Gene set database in GMT format\n output.directory = \"{}/\", # Directory where to store output and results (default: \"\")\n # Program parameters :----------------------------------------------------------------------------------------------------------------------------\n doc.string = \"syngsea\", # Documentation string used as a prefix to name result files (default: \"GSEA.analysis\")\n non.interactive.run = F, # Run in interactive (i.e. R GUI) or batch (R command line) mode (default: F)\n reshuffling.type = \"sample.labels\", # Type of permutation reshuffling: \"sample.labels\" or \"gene.labels\" (default: \"sample.labels\" \n nperm = 1000, # Number of random permutations (default: 1000)\n weighted.score.type = 1, # Enrichment correlation-based weighting: 0=no weight (KS), 1= weigthed, 2 = over-weigthed (default: 1)\n nom.p.val.threshold = -1, # Significance threshold for nominal p-vals for gene sets (default: -1, no thres)\n fwer.p.val.threshold = -1, # Significance threshold for FWER p-vals for gene sets (default: -1, no thres)\n fdr.q.val.threshold = 0.25, # Significance threshold for FDR q-vals for gene sets (default: 0.25)\n topgs = 20, # Besides those passing test, number of top scoring gene sets used for detailed reports (default: 10)\n adjust.FDR.q.val = F, # Adjust the FDR q-vals (default: F)\n gs.size.threshold.min = 10, # Minimum size (in genes) for database gene sets to be considered (default: 25)\n gs.size.threshold.max = 500, # Maximum size (in genes) for database gene sets to be considered (default: 500)\n reverse.sign = F, # Reverse direction of gene list (pos. enrichment becomes negative, etc.) (default: F)\n preproc.type = 0, # Preproc.normalization: 0=none, 1=col(z-score)., 2=col(rank) and row(z-score)., 3=col(rank). (def: 0)\n random.seed = 111, # Random number generator seed. (default: 123456)\n perm.type = 0, # For experts only. Permutation type: 0 = unbalanced, 1 = balanced (default: 0)\n fraction = 1.0, # For experts only. Subsampling fraction. Set to 1.0 (no resampling) (default: 1.0)\n replace = F, # For experts only, Resampling mode (replacement or not replacement) (default: F)\n save.intermediate.results = F, # For experts only, save intermediate results (e.g. matrix of random perm. scores) (default: F)\n OLD.GSEA = F, # Use original (old) version of GSEA (default: F)\n use.fast.enrichment.routine = T # Use faster routine to compute enrichment for random permutations (default: T)\n )\"\"\".format(gct_file, cls_file, gmt_file, gsea_dir))\n\n r(\"\"\"GSEA.Analyze.Sets(\n directory = \"{}/\", # Directory where to store output and results (default: \"\")\n topgs = 20, # number of top scoring gene sets used for analysis\n height = 16,\n width = 16\n )\"\"\".format(gsea_dir))", "def __main__():\n try:\n gff_file = sys.argv[1]\n mat_file = sys.argv[2]\n except:\n print __doc__\n sys.exit(-1)\n\n genes, transcripts, exons, utr3, utr5, cds = GFFParse(gff_file) \n gene_models = CreateGeneModels(genes, transcripts, exons, utr3, utr5, cds)\n # TODO Write to matlab/octave struct instead of cell arrays.\n sio.savemat(mat_file, \n mdict=dict(genes=gene_models), \n format='5', \n oned_as='row')", "def main(args):\n try:\n config_path = project_path + \"/\" + args.config\n input_data_path = project_path + \"/\" + args.input\n output_data_path = project_path + \"/\" + args.output\n\n config = load_config(config_path)\n\n # load data\n df = read_csv(input_data_path)\n df.loc[:, 'season'] = df['season'].astype('int')\n\n sys.stdout = open(output_data_path, 'w')\n check_balance(df)\n check_linelen(df, config['eda']['quantile'])\n\n groups = config['eda']['groups']\n\n for i in range(len(groups)):\n df_top_words = most_important_words(df, groups[i], **config['eda']['top_n_words'])\n fig = plot_tfidf_classfeats_h(df_top_words)\n fig.savefig('{}/EDA/top_words_{}.png'.format(project_path, i))\n\n\n except Exception as e:\n logger.error(\"Unexpected error occurred when eda: \" + str(e))", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def main():\n\n parser = argparse.ArgumentParser(description='Calculate Max Avg Degree estimate as max eigenvalue for biggraphs')\n parser.add_argument('G_fn', action='store',help='Full filename sparse graph (.mat)')\n parser.add_argument('lcc_fn', action='store',help='Full filename of largest connected component (.npy)')\n\n result = parser.parse_args()\n loadAdjMat(result.G_fn, result.lcc_fn)", "def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")", "def main():\n\n argparser = ArgumentParser()\n argparser.add_argument('--datapath', '-D', type=str, help='Relative path to cwd of a local data file')\n argparser.add_argument('--attack_model', '-AM', type=str, default='ANY', choices=['RandomForest', 'LogReg', 'LinearSVC', 'SVC', 'KNN', 'ANY'])\n argparser.add_argument('--runconfig', '-RC', default='runconfig_mia.json', type=str, help='Path relative to cwd of runconfig file')\n argparser.add_argument('--outdir', '-O', default='outputs/test', type=str, help='Path relative to cwd for storing output files')\n args = argparser.parse_args()\n\n # Load runconfig\n with open(path.join(cwd, args.runconfig)) as f:\n runconfig = json.load(f)\n print('Runconfig:')\n print(runconfig)\n\n # Load data\n RawDF, metadata = load_local_data_as_df(path.join(cwd, args.datapath))\n dname = args.datapath.split('/')[-1]\n RawDF['ID'] = [f'ID{i}' for i in arange(len(RawDF))]\n RawDF = RawDF.set_index('ID')\n\n print(f'Loaded data {dname}:')\n print(RawDF.info())\n\n # Randomly select nt target records T = (t_1, ..., t_(nt))\n targetIDs = choice(list(RawDF.index), size=runconfig['nTargets'], replace=False).tolist()\n Targets = RawDF.loc[targetIDs, :]\n\n # Drop targets from sample population\n RawDFdropT = RawDF.drop(targetIDs)\n\n # Add a crafted outlier target to the evaluation set\n targetCraft = craft_outlier(RawDF, runconfig['sizeTargetCraft'])\n targetIDs.extend(list(set(targetCraft.index)))\n Targets = Targets.append(targetCraft)\n\n # Sample adversary's background knowledge RawA\n rawAidx = choice(list(RawDFdropT.index), size=runconfig['sizeRawA'], replace=False).tolist()\n\n # Sample k independent target test sets\n rawTindices = [choice(list(RawDFdropT.index), size=runconfig['sizeRawT'], replace=False).tolist() for nr in range(runconfig['nIter'])]\n\n # List of candidate generative models to evaluate\n gmList = []\n for gm, paramsList in runconfig['generativeModels'].items():\n if gm == 'IndependentHistogram':\n for params in paramsList:\n gmList.append(IndependentHistogram(*params))\n elif gm == 'BayesianNet':\n for params in paramsList:\n gmList.append(BayesianNet(*params))\n elif gm == 'PrivBayes':\n for params in paramsList:\n gmList.append(PrivBayes(*params))\n elif gm == 'CTGAN':\n for params in paramsList:\n gmList.append(CTGAN(metadata, *params))\n elif gm == 'PateGan':\n for params in paramsList:\n gmList.append(PateGan(metadata, *params))\n else:\n raise ValueError(f'Unknown GM {gm}')\n\n for GenModel in gmList:\n print(f'----- {GenModel.__name__} -----')\n\n FeatureList = [NaiveFeatureSet(GenModel.datatype), HistogramFeatureSet(GenModel.datatype, metadata), CorrelationsFeatureSet(GenModel.datatype, metadata), EnsembleFeatureSet(GenModel.datatype, metadata)]\n\n prior = {LABEL_IN: runconfig['prior']['IN'], LABEL_OUT: runconfig['prior']['OUT']}\n\n if args.attack_model == 'RandomForest':\n AttacksList = [MIAttackClassifierRandomForest(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LogReg':\n AttacksList = [MIAttackClassifierLogReg(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LinearSVC':\n AttacksList = [MIAttackClassifierLinearSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'SVC':\n AttacksList = [MIAttackClassifierSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'KNN':\n AttacksList = [MIAttackClassifierKNN(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'ANY':\n AttacksList = []\n for F in FeatureList:\n AttacksList.extend([MIAttackClassifierRandomForest(metadata, prior, F),\n MIAttackClassifierLogReg(metadata, prior, F),\n MIAttackClassifierKNN(metadata, prior, F)])\n else:\n raise ValueError(f'Unknown AM {args.attack_model}')\n\n # Run privacy evaluation under MIA adversary\n results = evaluate_mia(GenModel, AttacksList, RawDFdropT, Targets, targetIDs, rawAidx, rawTindices,\n runconfig['sizeRawT'], runconfig['sizeSynT'], runconfig['nSynT'],\n runconfig['nSynA'], runconfig['nShadows'], metadata)\n\n outfile = f\"{dname}{GenModel.__name__}MIA\"\n\n with open(path.join(f'{args.outdir}', f'{outfile}.json'), 'w') as f:\n json.dump(results, f, indent=2, default=json_numpy_serialzer)", "def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)", "def main():\n\n NUM_TRAIN = noise.init_train_thresh\n NUM_TEST = 20\n XDIM = 1\n\n # Train the emulator\n x_train = np.random.uniform(size=(NUM_TRAIN, XDIM))\n y_train = np.array([noise(x) for x in x_train])\n\n # Output error estimates\n noise.output_err = True\n\n # Get values from the trained emulator\n x_emu = np.random.uniform(size=(NUM_TEST, XDIM))\n\n y_emu = np.zeros_like(x_emu)\n y_err = np.zeros_like(x_emu)\n\n for i, x in enumerate(x_emu):\n val, err = noise(x)\n y_emu[i] = val\n y_err[i] = err\n\n # Plot the results\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.scatter(x_train[:, 0], y_train, marker=\"+\", label=\"training values\")\n ax.errorbar(\n x_emu,\n y_emu[:, 0],\n yerr=y_err.flatten(),\n linestyle=\"None\",\n marker=\"o\",\n capsize=3,\n label=\"emulator\",\n color=\"red\",\n )\n\n ax.legend()\n\n # `__file__` is undefined when running in sphinx\n try:\n fig.savefig(__file__ + \".png\")\n except NameError:\n pass", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def __main__():\n\n args = setup_parser().parse_args()\n args = check_args(args)\n\n exp_times = gfind(band=args.band, detsize=args.detsize,\n exponly=args.exponly, gaper=args.gaper,\n maxgap=args.maxgap, minexp=args.minexp, quiet=args.quiet,\n retries=args.retries, skypos=args.skypos,\n trange=args.trange, verbose=args.verbose,\n skyrange=args.skyrange)", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def main():\n # Load in original data\n origin_data = pd.read_csv('/Users/apple/Desktop/CSE_163/cse163_project/'\n + 'Admission_Predict_Ver1.1.csv',\n sep=r'\\s*,\\s*', header=0, encoding='ascii',\n engine='python')\n\n # Research question 1\n lasso_regression(origin_data)\n\n # Research question 2\n # We drop the 'Serial No.' column because it is unrelated to our analysis.\n df = origin_data.drop(columns=['Serial No.'])\n find_correlation(df)\n boxplots_testscores_vs_admission(df)\n\n # Research question 3\n university_rating_analysis(origin_data)", "def main():\n # Goal is to model the OSSOS resonance detections given a file with parameters for those resonances.\n # e.g. from Crompvoets et al. (2021)\n\n # now run a survey simulation.\n params = sys.argv[1]\n H_max = float(sys.argv[2])\n outfile=f\"{os.path.splitext(params)[0]}_Model.dat\"\n print(f\"Saving results to {outfile}\")\n if not os.access(outfile, os.R_OK):\n run(outfile, params, 123456789, H_max=H_max)\n\n # confirm this looks like the OSSOS detections using rose plot.\n face_down_plot(outfile)", "def run():\n # get arguments\n args = parse_args()\n assert args.batch_size % args.gpu_num == 0\n assert args.gru_hidden_size % 2 == 0\n\n # create a logger\n logger = logging.getLogger(\"GACM\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n check_path(args.save_dir)\n check_path(args.load_dir)\n check_path(args.result_dir)\n check_path(args.summary_dir)\n if args.log_dir:\n check_path(args.log_dir)\n file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.info('Running with args : {}'.format(args))\n\n logger.info('Checking the directories...')\n for dir_path in [args.save_dir, args.result_dir, args.summary_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n global Dataset\n global Agent\n logger.info('Agent version: {}.0'.format(args.agent_version))\n logger.info('Dataset version: {}.0'.format(args.dataset_version))\n logger.info('Checking the directories...')\n Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset\n Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent\n \n if args.pretrain:\n pretrain(args)\n if args.train:\n train(args)\n if args.test:\n test(args)\n if args.rank:\n rank(args)\n if args.generate_synthetic_dataset:\n generate_synthetic_dataset(args)\n logger.info('run done.')", "def main():\n\n # Initial message\n taq_data_tools_responses_physical_short_long.taq_initial_message()\n\n # Tickers and days to analyze\n year = '2008'\n tickers = ['AAPL', 'GOOG']\n taus_p = [x for x in range(10, 101, 10)]\n tau = 1000\n\n # Basic folders\n taq_data_tools_responses_physical_short_long.taq_start_folders(year)\n\n # Run analysis\n taq_data_plot_generator(tickers, year, tau, taus_p)\n\n print('Ay vamos!!!')\n\n return None", "def execute_sesmg_DEMO(self, demo_file, demo_results):\n print(demo_file)\n print(demo_results)\n\n sesmg_main(scenario_file=demo_file,\n result_path=demo_results,\n num_threads=2,\n graph=False,\n results=False,\n plotly=True)", "def main():\n\tparser = argparse.ArgumentParser(\n\t\tusage = '%(prog)s [OPTIONS] [ARGS...]',\n\t\tdescription='Calculate something',\n\t\tepilog='Contact simon.clematide@uzh.ch'\n\t\t)\n\tparser.add_argument('--version', action='version', version='0.99')\n\tparser.add_argument('-l', '--logfile', dest='logfile',\n\t\t\t\t\t\thelp='write log to FILE', metavar='FILE')\n\tparser.add_argument('-q', '--quiet',\n\t\t\t\t\t\taction='store_true', dest='quiet', default=False,\n\t\t\t\t\t\thelp='do not print status messages to stderr')\n\tparser.add_argument('-d', '--debug',\n\t\t\t\t\t\taction='store_true', dest='debug', default=False,\n\t\t\t\t\t\thelp='print debug information')\n\tparser.add_argument('-s', '--lm_dir',\n\t\t\t\t\t\taction='store', dest='lm_dir', default='resources.d/taggers/language-model/',\n\t\t\t\t\t\thelp='directory where LMs are stored %(default)')\n\tparser.add_argument('-i', '--iob_dir',\n\t\t\t\t\t\taction='store', dest='iob_dir', default='data.d/quaero/quaero_iob',\n\t\t\t\t\t\thelp='directory where iob training material is located %(default)')\n\tparser.add_argument('-t', '--tagger_dir',\n\t\t\t\t\t\taction='store', dest='tagger_dir', default='resources.d/taggers',\n\t\t\t\t\t\thelp='directory where to store training output %(default)')\n\tparser.add_argument('-n', '--ner_cycle',\n\t\t\t\t\t\taction='store', dest='ner_cycle', default='ner',\n\t\t\t\t\t\thelp='ner experiment cycle %(default)')\n\tparser.add_argument('-c', '--correction_mode',\n\t\t\t\t\t\taction='store', dest='correction_mode', default='raw',\n\t\t\t\t\t\thelp='correction mode of the NEs in training data %(default)')\n\tparser.add_argument('-m', '--lm_domain',\n\t\t\t\t\t\taction='store', dest='lm_domain', default='pressfr',\n\t\t\t\t\t\thelp='character level language model domain %(default)')\n\tparser.add_argument('-p', '--train_patience',\n\t\t\t\t\t\taction='store', dest='train_patience', type=int, default=3,\n\t\t\t\t\t\thelp='training patience %(default)')\n\tparser.add_argument('-W', '--use_wiki_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_wiki_wordemb', default=False,\n\t\t\t\t\t\thelp='use pre-trained wiki word embeddings')\n\tparser.add_argument('-P', '--use_press_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_press_wordemb', default=False,\n\t\t\t\t\t\thelp='use indomain press word embeddings')\n\tparser.add_argument('-C', '--use_crf',\n\t\t\t\t\t\taction='store_true', dest='use_crf', default=False,\n\t\t\t\t\t\thelp='use CRF layer')\n\tparser.add_argument('args', nargs='*')\n\toptions = parser.parse_args()\n\tif options.logfile:\n\t\tlogging.basicConfig(filename=logfile)\n\tif options.debug:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\n\ttrain_tagger(options)", "def main():\n region = 'Kanto'\n year = 2000\n # callParallelGA(region)\n callParallelReducedGA(region)\n \n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)\n\n \n region = 'Kansai'\n year = 2000\n callParallelReducedGA(region)\n # callParallelGA(region)", "def main():\n\tdata = load_dataset()\n\tdata = normalize_data(data, cols_to_norm)\n\ttrain, test = generate_train_testset(data)\n\n\tX_train = train.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_train = train.dropna(axis=0)['EVENT']\n\n\tX_test = test.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_test = test.dropna(axis=0)['EVENT']\n\n\tmodel = XGBClassifier(n_estimators=1000, random_state=42)\n\tmodel.fit(X_train, y_train)\n\n\tprint(model)\n\n\ty_pred = model.predict(X_test)\n\tpredictions = [round(value) for value in y_pred]\n\n\taccuracy = accuracy_score(y_test, predictions)\n\tprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\n\tf1 = f1_score(y_test, y_pred)\n\tprint(\"F1: %.6f%%\" % (f1))", "def main(args):\n\n # Labels to be used in the experiment.\n task_ids = {Labels.hotness.value: LossTypes.mse,\n Labels.tempo.value: LossTypes.mse,\n Labels.loudness.value: LossTypes.mse}\n\n # Get the training, validation and testing set data and ground-truths\n x_train, x_validate, x_test, y_train, y_validate, y_test = fetch_data(task_ids)\n\n exp = Experiment(expt_name=args.experiment_name, task_ids=task_ids, x_train=x_train, x_validate=x_validate,\n x_test=x_test, y_train=y_train, y_validate=y_validate, y_test=y_test,\n model_class=LowLevelSharingModel, learning_rate=args.learning_rate,\n batch_size=args.batch_size, num_epochs=args.num_epochs)\n exp.initialize_network()\n exp.train()\n sys.stderr.write(\"------\\n\")\n sys.stderr.write(\"Training complete. Logs, outputs, and model saved in \" + os.getcwd() + \"\\n\")", "def main(args=None):\n opt = _parse_options(args)\n plot_analysis(opt)\n plt.show()", "def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()", "def main():\n ex = Experiment(SEED)\n ex.main()", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(\n version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\n \"-f\",\n \"--filename\",\n dest=\"file_name\",\n type=\"string\",\n help=\"Expression data set(ranked list of genes) [default=%default].\")\n\n parser.add_option(\n \"-g\",\n \"--geneset\",\n dest=\"geneset\",\n type=\"string\",\n help=\"Annotated gene sets database for enrichment analysis\"\n \"[default=%default].\")\n\n parser.add_option(\n \"-m\",\n \"--minimum\",\n dest=\"min_gene\",\n type=\"int\",\n help=\"gene sets smaller than this are excluded from the analysis \"\n \"[default=%default].\")\n\n parser.add_option(\n \"-x\",\n \"--maximum\",\n dest=\"max_gene\",\n type=\"int\",\n help=\"gene sets larger than this are excluded from the analysis \"\n \"[default=%default].\")\n\n parser.add_option(\n \"-s\",\n \"--randomseed\",\n dest=\"seed\",\n type=\"int\",\n help=\"A number use to initialize a pseudorandom number generator\"\n \" [default=%default].\")\n\n parser.add_option(\n \"-n\",\n \"--permutation\",\n dest=\"iteration\",\n type=\"int\",\n help=\"Number of permutations to perform in assessing the statistical significance of the enrichment score [default=%default].\")\n\n parser.add_option(\n \"-d\",\n \"--display\",\n dest=\"plot_no\",\n type=\"int\",\n help=\"Displays enrichment plots for the specified no. of gene sets with the highest absolute normalized enrichment scores (each phenotype) [default=%default].\")\n\n parser.add_option(\n \"-l\",\n \"--num_leading\",\n dest=\"fdr_num\",\n type=\"int\",\n help=\"Number of genesets for leading edge analysis, by default top 11 enriched genesets will be used for this analysis.\"\n \"Minimum number of genesets should be 4. [default=%default].\")\n\n parser.set_defaults(\n file_name=None,\n geneset=None,\n min_gene=25,\n max_gene=500,\n seed=42,\n iteration=1000,\n plot_no=20,\n fdr_num=10,\n )\n (options, args) = E.Start(parser, add_database_options=True)\n # Preprocess expression file.\n id, expression_value = read_expression(options.file_name)\n\n # Preprocess geneset\n ex_list, in_list, geneset_indicator = preprocess_geneset(\n options.geneset, options.min_gene, options.max_gene, id)\n generate_gen_set_report(\n ex_list,\n in_list,\n options.min_gene,\n options.max_gene,\n geneset_indicator)\n\n # Store filtered geneset. Because in_list contains annotation and\n # description also,for each geneset.\n GG = [item[4:len(item)] for item in in_list]\n\n # Store index of each id (from the ranked list of expression data as a\n # dicitionary)\n ind_dict = dict((k, i) for i, k in enumerate(id))\n\n # Calculate enrichment score for each geneset.\n # Create boolean array for total number of id in expression data and float\n # array for Enrichment Score of options.iteration permutation.\n temp = np.zeros((len(id),), dtype=np.bool)\n temp_2 = np.zeros((len(id),), dtype=np.float)\n store_permute = np.zeros((options.iteration, len(GG)), dtype=np.float)\n original_es = np.zeros((len(GG),), dtype=np.float)\n original_es_index = np.zeros((len(GG),), dtype=np.int)\n original_nes = np.zeros((len(GG),), dtype=np.float)\n permute_nes = np.zeros((options.iteration, len(GG)), dtype=np.float)\n nominal_p = np.zeros((len(GG),), dtype=np.float)\n nominal_p_nes = np.zeros((len(GG),), dtype=np.float)\n store_enrichment_score = []\n store_gene_leading_info = np.zeros((3, len(GG)), dtype=np.int)\n store_gene_leading_matrix = []\n\n count = 0\n for i in GG:\n inter = intersect(ind_dict, i[0])\n indices = sorted([ind_dict[x] for x in inter])\n S = len(i[0])\n enrich_score = calculate_enrichment_score(\n indices, temp, expression_value, S, temp_2)\n del indices\n del inter\n a1 = [\n np.absolute(\n np.max(enrich_score)), np.absolute(\n np.min(enrich_score))]\n if(np.absolute(np.max(enrich_score)) == np.absolute(np.min(enrich_score))):\n original_es[count] = np.max(enrich_score)\n original_es_index[count] = np.argmax(enrich_score)\n else:\n if(a1.index(max(a1)) == 0):\n original_es[count] = np.max(enrich_score)\n original_es_index[count] = np.argmax(enrich_score)\n else:\n original_es[count] = np.min(enrich_score)\n original_es_index[count] = np.argmin(enrich_score)\n store_enrichment_score.append(list(enrich_score))\n # This section has been added by me for \"Leading Edge Analysis\".\n t0 = original_es_index[count]\n if(original_es[count] < 0):\n se = temp[t0:len(id)]\n else:\n se = temp[0:t0 + 1]\n c0 = np.sum(se == 1)\n c1 = len(se)\n tag = (c0 * 100) / S\n tag2 = (c0 * 100) / len(id)\n gene_l = (c1 * 100) / len(id)\n signal_l = (tag / 100) * (1 - (gene_l / 100)) * \\\n (len(id) / (len(id) - S))\n store_gene_leading_info[0][count] = tag\n store_gene_leading_info[1][count] = gene_l\n store_gene_leading_info[2][count] = signal_l * 100\n temp_2.fill(0)\n temp.fill(0)\n del a1\n del enrich_score\n count = count + 1\n\n print(\"Enrichment score calculation has been successfully completed\")\n\n # Calculate Randon Background by random permutation of gene set.\n # calculate total element in gene set.\n s = 0\n for t in GG:\n s = s + len(t[0])\n size_info = [len(t[0]) for t in GG]\n\n # Random index generation\n np.random.seed(options.seed)\n id_new = np.array(id)\n\n # Calculate enrichment score for permuted genesets\n count = 0\n for per in range(0, options.iteration):\n if((per % 100) == 0):\n print(per)\n for i in range(0, len(size_info)):\n tt = size_info[i]\n tar = id_new[np.random.randint(len(id), size=(1, tt))]\n inter = intersect(ind_dict, tar[0])\n indices = sorted([ind_dict[x] for x in inter])\n S = len(tar[0])\n enrich_score = calculate_enrichment_score(\n indices, temp, expression_value, S, temp_2)\n del indices\n del inter\n a1 = [\n np.absolute(\n np.max(enrich_score)), np.absolute(\n np.min(enrich_score))]\n if(np.absolute(np.max(enrich_score)) == np.absolute(np.min(enrich_score))):\n store_permute[per, i] = np.max(enrich_score)\n else:\n if(a1.index(max(a1)) == 0):\n store_permute[per, i] = np.max(enrich_score)\n else:\n store_permute[per, i] = np.min(enrich_score)\n del a1\n temp.fill(0)\n temp_2.fill(0)\n del enrich_score\n\n print(\"Enrichment score calculation for permuted sets has been successfully completed\")\n\n # Calculation of empirical p-value.\n for i in range(0, len(original_es)):\n if(original_es[i] >= 0):\n A1 = store_permute[:, i] >= original_es[i]\n A2 = store_permute[A1, i]\n nominal_p[i] = np.divide(len(A2), options.iteration)\n else:\n A1 = store_permute[:, i] <= original_es[i]\n A2 = store_permute[A1, i]\n nominal_p[i] = np.divide(len(A2), options.iteration)\n\n # Normalization of enrichment score\n\n for i in range(0, len(GG)):\n if(original_es[i] < 0):\n A1 = store_permute[:, i] < 0\n A2 = np.mean(np.absolute(store_permute[A1, i]))\n original_nes[i] = np.divide(original_es[i], A2)\n else:\n A1 = store_permute[:, i] >= 0\n A2 = np.mean(np.absolute(store_permute[A1, i]))\n original_nes[i] = np.divide(original_es[i], A2)\n\n # Normalization of enrichment score for each permutation.\n\n for per in range(0, options.iteration):\n for i in range(0, len(GG)):\n if(store_permute[per, i] < 0):\n A1 = store_permute[:, i] < 0\n A2 = np.mean(np.absolute(store_permute[A1, i]))\n permute_nes[per, i] = np.divide(store_permute[per, i], A2)\n else:\n A1 = store_permute[:, i] >= 0\n A2 = np.mean(np.absolute(store_permute[A1, i]))\n permute_nes[per, i] = np.divide(store_permute[per, i], A2)\n\n print(\"Normalization has been successfully completed\")\n\n # Calculation of empirical p-value for normalized enrichment score.\n\n for i in range(0, len(original_nes)):\n if(original_nes[i] >= 0):\n A1 = permute_nes[:, i] >= original_nes[i]\n A2 = permute_nes[A1, i]\n nominal_p_nes[i] = np.divide(len(A2), options.iteration)\n else:\n A1 = permute_nes[:, i] <= original_nes[i]\n A2 = permute_nes[A1, i]\n nominal_p_nes[i] = np.divide(len(A2), options.iteration)\n\n nes_up_index = np.array([], dtype=np.int)\n nes_down_index = np.array([], dtype=np.int)\n # Extract two set NES>0 and NES<0\n if(sum(original_nes > 0) > 0):\n NN = original_nes[original_nes > 0]\n up_for_plot = np.where(original_nes >= 0)[0]\n nes_up = -np.sort(-NN)\n nes_up_index = np.argsort(-NN)\n del NN\n if(sum(original_nes < 0) > 0):\n NN = original_nes[original_nes < 0]\n down_for_plot = np.where(original_nes < 0)[0]\n nes_down = np.sort(NN)\n nes_down_index = np.argsort(NN)\n del NN\n\n # Calculate FDR\n fdr_upregulated = np.array([], dtype=np.float)\n fdr_downregulated = np.array([], dtype=np.float)\n if(len(nes_up_index) > 0):\n fdr_upregulated = np.zeros((len(nes_up),), dtype=np.float)\n if(len(nes_down_index) > 0):\n fdr_downregulated = np.zeros((len(nes_down),), dtype=np.float)\n\n # For upregulated:\n if(len(nes_up_index) > 0):\n p_value_up = nominal_p_nes[original_nes > 0]\n b, fdr_upregulated, w1, w2 = sm.multipletests(\n p_value_up[nes_up_index], alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)\n\n # For downregulated\n if(len(nes_down_index) > 0):\n p_value_down = nominal_p_nes[original_nes < 0]\n b, fdr_downregulated, w1, w2 = sm.multipletests(\n p_value_down[nes_down_index], alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)\n\n print(\"FDR calculation has been successfully completed\")\n\n # Generate graphical report and detail table report also\n kk = os.path.basename(options.file_name)\n part1, part2 = kk.split('.')\n kk = os.path.basename(options.geneset)\n part3, part4 = kk.split('.')\n nam1 = \"CGAT_REPORT_FOR_upregulated\"\n nam2 = \"CGAT_REPORT_FOR_downregulated\"\n nam3 = \"CGAT_REPORT_FOR_fdr_sorted_up_down\"\n nam4 = \"CGAT_Summary_Report\"\n file_to_report_1 = \".\".join([nam1, part1, \"xls\"])\n file_to_report_2 = \".\".join([nam2, part1, \"xls\"])\n file_to_report_3 = \".\".join([nam3, part1, \"xls\"])\n file_to_report_4 = \".\".join([nam4, part1, \"txt\"])\n\n # PREPARE FINAL SUMMARY REPORT.\n f = open(file_to_report_4, \"w\")\n f.write(\"Enrichment in phenotype (upregulated):\" + \"\\n\")\n f.write(\"@ \" + str(len(nes_up_index)) + \"/\" +\n str(len(in_list)) + \" gene sets are upregulated\" + \"\\n\")\n f.write(\"@ \" + str(sum(fdr_upregulated < 0.05)) +\n \" gene sets are significant at FDR < 5%\" + \"\\n\")\n f.write(\"@ \" + str(sum(fdr_upregulated < 0.01)) +\n \" gene sets are significant at FDR < 1%\" + \"\\n\")\n w1 = nominal_p[original_nes >= 0]\n w2 = nominal_p[original_nes < 0]\n f.write(\"@ \" +\n str(sum(w1 < 0.01)) +\n \" gene sets are significantly enriched at nominal pvalue < 1%\" +\n \"\\n\")\n f.write(\"@ \" +\n str(sum(w1 < 0.05)) +\n \" gene sets are significantly enriched at nominal pvalue < 5%\" +\n \"\\n\\n\")\n f.write(\"Enrichment in phenotype (downregulated):\" + \"\\n\")\n f.write(\"@ \" + str(len(nes_down_index)) + \"/\" +\n str(len(in_list)) + \" gene sets are upregulated\" + \"\\n\")\n f.write(\"@ \" + str(sum(fdr_downregulated < 0.05)) +\n \" gene sets are significant at FDR < 5%\" + \"\\n\")\n f.write(\"@ \" + str(sum(fdr_downregulated < 0.01)) +\n \" gene sets are significant at FDR < 1%\" + \"\\n\")\n f.write(\"@ \" +\n str(sum(w2 < 0.01)) +\n \" gene sets are significantly enriched at nominal pvalue < 1%\" +\n \"\\n\")\n f.write(\"@ \" +\n str(sum(w2 < 0.05)) +\n \" gene sets are significantly enriched at nominal pvalue < 5%\" +\n \"\\n\\n\")\n f.close()\n\n # Plot enrichment score of top gene set for each phenotype.\n\n # If specified number of top genesets for plotting enrichemnt score is higher than the total number of genesets.I will\n # plot enrichemnt score for all genesets.\n #if(options.plot_no >= len(GG)):\n #options.plot_no = len(GG)\n if(options.plot_no >= len(nes_up_index)):\n options.plot_no = len(nes_up_index)\n if(options.plot_no >= len(nes_down_index)):\n options.plot_no = len(nes_down_index)\n xcc = int(options.plot_no / 2) + 1\n for i in range(0, options.plot_no):\n if(len(nes_up_index) > 0):\n aw = up_for_plot[nes_up_index[i]]\n plot_enrichment_score(\n store_enrichment_score,\n original_es_index,\n original_es,\n in_list,\n aw,\n len(id))\n plot_random_ES(store_permute, aw, in_list)\n\n # Downregulated\n if(len(nes_down_index) > 0):\n aw = down_for_plot[nes_down_index[i]]\n plot_enrichment_score(\n store_enrichment_score,\n original_es_index,\n original_es,\n in_list,\n aw,\n len(id))\n plot_random_ES(store_permute, aw, in_list)\n for i in range(0, options.plot_no):\n if(len(nes_up_index) > 0):\n aw = up_for_plot[nes_up_index[i]]\n plot_enrichment_score_subplot(\n store_enrichment_score,\n original_es_index,\n original_es,\n in_list,\n aw,\n len(id),\n xcc,\n i + 1,\n \"enplot_upregulated_summary.jpeg\",\n options.plot_no)\n for i in range(0, options.plot_no):\n if(len(nes_down_index) > 0):\n aw = down_for_plot[nes_down_index[i]]\n plot_enrichment_score_subplot(\n store_enrichment_score,\n original_es_index,\n original_es,\n in_list,\n aw,\n len(id),\n xcc,\n i + 1,\n \"enplot_downregulated_summary.jpeg\",\n options.plot_no)\n # Plot summary of top 20 genesets of each phenotype\n if(len(nes_up_index) > 0):\n plot_summary_report(nes_up_index, original_nes,\n up_for_plot, in_list, \"upregulated\")\n if(len(nes_down_index) > 0):\n plot_summary_report(nes_down_index, original_nes,\n down_for_plot, in_list, \"downregulated\")\n\n # Generate pvalue vs ES graph.\n if(len(nes_down_index) > 0):\n aw = down_for_plot[nes_down_index]\n if(len(nes_up_index) > 0):\n aw2 = up_for_plot[nes_up_index]\n plt.figure(figsize=(8, 6), dpi=80)\n plt.plot(original_nes, nominal_p, 'ko')\n if(len(nes_up_index) > 0):\n plt.plot(nes_up, nominal_p[aw2], 'k-')\n if(len(nes_down_index) > 0):\n plt.plot(nes_down, nominal_p[aw], 'k-')\n plt.xticks(fontsize=12, weight='bold')\n plt.yticks(fontsize=12, weight='bold')\n plt.xlabel(\"\\nNormalized Enrichment Score(NES)\", **axis_font)\n plt.ylabel('\\np-value', **axis_font, labelpad=28)\n plt.title('NES vs Significance', **title_font)\n plt.grid()\n plt.tight_layout()\n ax2 = plt.twinx()\n if(len(nes_up_index) > 0):\n ax2.plot(\n nes_up,\n fdr_upregulated,\n marker='s',\n color='firebrick',\n linestyle='')\n if(len(nes_down_index) > 0):\n ax2.plot(\n nes_down,\n fdr_downregulated,\n marker='s',\n color='firebrick',\n linestyle='')\n ticks_font = font_manager.FontProperties(\n family='Helvetica',\n style='normal',\n size=12,\n weight='bold',\n stretch='normal')\n for label in ax2.get_yticklabels():\n label.set_fontproperties(ticks_font)\n ax2.tick_params('y', colors='firebrick')\n # ax2.tick_params('y', colors='firebrick',labelsize = 'x-large',width=20)\n ax2.yaxis.set_tick_params(\n 'y',\n colors='firebrick',\n labelsize='x-large',\n width=20)\n ax2.set_ylabel('\\nFDR q-value', **axis_font)\n plt.savefig('Genesets_null_distribution.jpeg', bbox_inches='tight')\n plt.close()\n\n # The histogram of the ES across all genesets\n plt.figure(figsize=(11, 6), dpi=80)\n n, bins, patches = plt.hist(original_nes, 50, normed=False, facecolor='darkgreen',\n alpha=0.9, histtype='step', lw=4, color='darkgreen')\n plt.xticks(fontsize=12, weight='bold')\n plt.yticks(fontsize=12, weight='bold')\n plt.xlabel('\\nNormalized Enrichment scores across genesets', **axis_font)\n plt.ylabel('Frequency', **axis_font, labelpad=28)\n plt.title('Histogram of Normalized Enrichment Score', **title_font)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig('Genesets_NES_Histogram.jpeg', bbox_inches='tight')\n plt.close()\n\n print(\"Graphical reports has been successfully completed\")\n\n # Write detail report for upregulated gene.\n if(len(nes_up_index) > 0):\n with open(file_to_report_1, 'w') as f:\n f.write(\"NAME\\tGENE SET(GS)\\tGS DETAILS\\tSIZE\\tENRICHMENT SCORE(ES)\\tNORMALIZED ENRICHMENT SCORE(NES)\\tEMPERICAL p-value\\tFDR-q value\\tRANK AT MAX\\tLEADING EDGE\\n\")\n for i in range(0, len(nes_up_index)):\n aw = up_for_plot[nes_up_index[i]]\n itt = in_list[aw]\n f.write(itt[0] +\n \"\\t\" +\n itt[0] +\n \"\\t\" +\n itt[1] +\n \"\\t\" +\n str(itt[3]) +\n \"\\t\" +\n str(original_es[aw]) +\n \"\\t\" +\n str(original_nes[aw]) +\n \"\\t\" +\n str(nominal_p[aw]) +\n \"\\t\" +\n str(fdr_upregulated[i]) +\n \"\\t\" +\n str(original_es_index[aw]) +\n \"\\t\" +\n \"tags=\" +\n str(store_gene_leading_info[0][aw]) +\n \"%, list=\" +\n str(store_gene_leading_info[1][aw]) +\n \"%\" +\n \"\\n\")\n f.close()\n\n # Write detail report for downregulated gene.\n if(len(nes_down_index) > 0):\n with open(file_to_report_2, 'w') as f:\n f.write(\"NAME\\tGENE SET(GS)\\tGS DETAILS\\tSIZE\\tENRICHMENT SCORE(ES)\\tNORMALIZED ENRICHMENT SCORE(NES)\\tEMPERICAL p-value\\tFDR-q value\\tRANK AT MAX\\tLEADING EDGE\\n\")\n for i in range(0, len(nes_down_index)):\n aw = down_for_plot[nes_down_index[i]]\n itt = in_list[aw]\n f.write(itt[0] +\n \"\\t\" +\n itt[0] +\n \"\\t\" +\n itt[1] +\n \"\\t\" +\n str(itt[3]) +\n \"\\t\" +\n str(original_es[aw]) +\n \"\\t\" +\n str(original_nes[aw]) +\n \"\\t\" +\n str(nominal_p[aw]) +\n \"\\t\" +\n str(fdr_downregulated[i]) +\n \"\\t\" +\n str(original_es_index[aw]) +\n \"\\t\" +\n \"tags=\" +\n str(store_gene_leading_info[0][aw]) +\n \"%, list=\" +\n str(store_gene_leading_info[1][aw]) +\n \"%\" +\n \"\\n\")\n f.close()\n\n # Write detail report of up and down regulated genes in to a single file,\n # sorted by FDR values\n merge_mat = np.append(fdr_upregulated, fdr_downregulated)\n merge_index = np.argsort(np.append(fdr_upregulated, fdr_downregulated))\n merge_x = []\n merge_y = []\n count = 0\n with open(file_to_report_3, 'w') as f:\n f.write(\"NAME\\tGENE SET(GS)\\tGS DETAILS\\tSIZE\\tENRICHMENT SCORE(ES)\\tNORMALIZED ENRICHMENT SCORE(NES)\\tEMPERICAL p-value\\tFDR-q value\\tRANK AT MAX\\tLEADING EDGE\\n\")\n for i in merge_index:\n CW = merge_mat[i]\n if(i >= len(fdr_upregulated)):\n aw = down_for_plot[nes_down_index[i - len(fdr_upregulated)]]\n else:\n aw = up_for_plot[nes_up_index[i]]\n itt = in_list[aw]\n ######################################\n # This section is for summary plot of enriched genesets.\n if(count < 20):\n merge_x.append(itt[0])\n merge_y.append(original_nes[aw])\n count = count + 1\n #####################################\n f.write(itt[0] +\n \"\\t\" +\n itt[0] +\n \"\\t\" +\n itt[1] +\n \"\\t\" +\n str(itt[3]) +\n \"\\t\" +\n str(original_es[aw]) +\n \"\\t\" +\n str(original_nes[aw]) +\n \"\\t\" +\n str(nominal_p[aw]) +\n \"\\t\" +\n str(CW) +\n \"\\t\" +\n str(original_es_index[aw]) +\n \"\\t\" +\n \"tags=\" +\n str(store_gene_leading_info[0][aw]) +\n \"%, list=\" +\n str(store_gene_leading_info[1][aw]) +\n \"%\" +\n \"\\n\")\n f.close()\n # Plot top 20 enriched genset sorted by FDR values.\n plot_summary_report_for_fdr(merge_x, merge_y)\n del merge_mat\n del merge_index\n del merge_x\n del merge_y\n\n print(\"Reports have been successfully generated\")\n\n '''\n ********************************************************************************************************************\n LEADING EDGE ANALYSIS\n *********************************************************************************************************************\n THIS SECTION IS FOR LEADING EDGE ANALYSIS.\n '''\n\n print(\"Leading edge analysis has been started\")\n # Make a directory.\n newpath = r'Leading_Edge_Analysis'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n\n store_gene_leading_matrix = generate_leading_edge_m(\n nes_up_index,\n nes_down_index,\n fdr_upregulated,\n fdr_downregulated,\n in_list,\n options.fdr_num,\n ind_dict,\n down_for_plot,\n up_for_plot,\n temp,\n original_es,\n original_es_index,\n id_new,\n store_gene_leading_matrix,\n GG,\n store_gene_leading_info)\n\n # PREPARE LEADING EDGE SUMMARY FILE.\n f = open(\"Leading_Edge_Analysis/CGAT_LEADING_EDGE_ANALYSIS_SUMMARY.tsv\", \"w\")\n f.write(\"Details of gene sets and signal used in this analysis\" + \"\\n\")\n f.write(\"There were \" +\n str(options.fdr_num) +\n \" gene sets used in the leading edge analysis (see below for details)\" +\n \"\\n\\n\")\n f.write(\n \"Gene set\" +\n \"\\t\" +\n \"Members\" +\n \"\\t\" +\n \"Members in signal\" +\n \"\\t\" +\n \"Tag%\" +\n \"\\t\" +\n \"List%\" +\n \"\\t\" +\n \"FDR-q value\" +\n \"\\n\")\n for i in store_gene_leading_matrix:\n c = i\n f.write(c[0] + \"\\t\" + str(c[1]) + \"\\t\" + str(c[2]) + \"\\t\" +\n str(c[4]) + \"%\\t\" + str(c[5]) + \"%\\t\" + str(c[6]) + \"\\n\")\n f.close()\n\n # PREPARE LEADING EDGE MATRIX FILE.\n with open(\"Leading_Edge_Analysis/CGAT_leading_edge_matrix_for_results.gmx\", 'w') as f:\n for_pd_in = []\n count = 0\n for i in store_gene_leading_matrix:\n c = i\n for_pd_in.append(c[3])\n f.write(c[0] + \"_signal\")\n if(count == (len(store_gene_leading_matrix) - 1)):\n f.write(\"\\n\")\n else:\n f.write(\"\\t\")\n count = count + 1\n\n count = 0\n for i in range(0, len(store_gene_leading_matrix)):\n f.write(\"na\")\n if(count == (len(store_gene_leading_matrix) - 1)):\n f.write(\"\\n\")\n else:\n f.write(\"\\t\")\n count = count + 1\n v = pd.DataFrame(for_pd_in)\n v1 = v.transpose()\n v1.to_csv(f, encoding='utf-8', index=False, sep='\\t', header=False)\n f.close()\n\n # PREPARE PLOT FOR GENE IN SUBSETS\n G1 = {}\n subset_dict = {}\n subset_dict_assign_matrix = {}\n name_l_x = []\n name_l_y = []\n # exp_for_clustering=[]\n\n for i in for_pd_in:\n G1 = i\n c1 = dict.fromkeys(G1, 1)\n subset_dict.update(c1)\n subset_dict_assign_matrix.update(c1)\n\n count = 0\n for j in subset_dict_assign_matrix.keys():\n subset_dict_assign_matrix[j] = count\n count = count + 1\n name_l_x.append(j)\n name_l_y = []\n for i in store_gene_leading_matrix:\n name_l_y.append(i[0] + \"_signal\")\n\n # CREATE ASSIGNMENT MATRIX FOR LEADING EDGE SUBSET\n store_unclustered_boolean = np.zeros(\n (len(store_gene_leading_matrix),\n len(subset_dict_assign_matrix)),\n dtype=np.int)\n store_clustered_boolean = np.zeros(\n (len(store_gene_leading_matrix),\n len(subset_dict_assign_matrix)),\n dtype=np.float)\n count = 0\n\n for i in store_gene_leading_matrix:\n c = i\n for jj in range(0, len(c[3])):\n xx = subset_dict_assign_matrix[c[3][jj]]\n store_unclustered_boolean[count][xx] = 1\n store_clustered_boolean[count][xx] = expression_value[ind_dict[c[3][jj]]]\n count = count + 1\n\n # PREPARE HEAT MAP FOR OVERLAPPING GENE SET UNCLUSTERED.\n jac_l = []\n # intensity_for_cluster = []\n jac_l, subset_dict = heatmap_leading_edge_subset(\n store_gene_leading_matrix, subset_dict, jac_l)\n\n # HEATMAP OF UNCLUSTERED AND CLUSTERED ASSIGNMENT MATRIX\n cMap = ListedColormap(['white', 'red'])\n heatmap_plot_assign(\n store_unclustered_boolean,\n name_l_x,\n name_l_y,\n 'Leading_Edge_Analysis/Leading_Edge_heatmap_unclustered.jpeg',\n 0,\n cMap)\n leading_edge_clustering(\n store_unclustered_boolean,\n store_clustered_boolean,\n name_l_x,\n name_l_y)\n\n # PLOT DISTRIBUTION oF GENES AMONG LEADING SUBSETS\n temp_dict_k = []\n temp_dict_v = []\n for i in subset_dict.keys():\n if(subset_dict[i] > 1):\n temp_dict_k.append(i)\n temp_dict_v.append(subset_dict[i])\n\n fig, ax = plt.subplots()\n ax.grid(zorder=3)\n ax.xaxis.grid()\n ax.yaxis.labelpad = 28\n plt.bar(range(len(temp_dict_k)), temp_dict_v, width=0.3, color='magenta')\n plt.xticks(range(len(temp_dict_k)), temp_dict_k)\n # Format\n fig = plt.gcf()\n fig.set_size_inches(12, 10)\n plt.yticks(fontsize=12, weight='bold')\n # plt.yticks(np.arange(min(temp_dict_v), max(temp_dict_v) + 1, 1.0))\n plt.xlabel('\\nGenes', **axis_font)\n plt.ylabel('\\nNumber Of Gene Sets', **axis_font)\n plt.title('Distribution of genes among leading edge subsets', **title_font)\n labels = ax.get_xticklabels()\n plt.setp(labels, rotation=90, **axis_font_h)\n plt.tight_layout()\n plt.savefig(\n 'Leading_Edge_Analysis/Genes_in_leading_edge_subset.jpeg',\n bbox_inches='tight')\n plt.close()\n del temp_dict_k\n del temp_dict_v\n\n # PLOT HISTOGRAM OF JACQUARD\n plt.figure(figsize=(11, 6), dpi=80)\n n, bins, patches = plt.hist(\n jac_l, 70, normed=1, color='darkgreen', alpha=0.9)\n plt.xticks(fontsize=12, weight='bold')\n plt.yticks(fontsize=12, weight='bold')\n plt.xlabel('\\nJacquard', **axis_font)\n plt.ylabel('Number of occurences', **axis_font, labelpad=28)\n plt.title('Overlapping among leading edge subset pairs', **title_font)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(\n 'Leading_Edge_Analysis/Jacquard_distribution.jpeg',\n bbox_inches='tight')\n plt.close()\n\n print(\"Leading edge analysis has been finished\")\n E.Stop()", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id$\",\n usage = globals()[\"__doc__\"] )\n\n parser.add_option( \"-p\", \"--proc\", dest=\"processors\", type=\"int\",\n help = \"use # processors [%default]\" )\n\n parser.set_defaults(\n processors = 1 )\n\n\n options, args = E.Start( parser, argv = argv )\n\n t1 = Test( RunnerGat, \n small_test_segmented_workspaces(), \n [ ValidatorNumSamples,\n ValidatorSegmentDistribution ] )\n\n t1.run( options.stdout, \n processors = options.processors )\n\n E.Stop()", "def main():\n\n config = None\n\n try:\n args = get_args()\n config = process_config(args.config)\n raise RuntimeError(\"Missing or invalid arguments\")\n except Exception as e:\n logging.error(\"Failed\", exc_info=e)\n\n print(\"Create the data generator.\")\n # data_loader = MnistDataLoader(config=config)\n data_loader = IrisDataLoader(config=config)\n train_data = data_loader.get_train_data()\n test_data = data_loader.get_test_data()\n\n print(\"Build the model\")\n # cnn_model = ConvModel(config=config).build_model()\n cnn_model = ANNModel(config=config).build_model()\n\n print(\"Load the best weights\")\n cnn_model.load_weights(\"experiments/{}/{}/checkpoints/{}-weights.best.hdf5\".format(\n config.evaluation.date, config.exp.name, config.exp.name))\n\n print(\"Evaluate the model\")\n print(\"Training Metrics\")\n evaluate(model=cnn_model, data=train_data)\n print(\"Testing Metrics\")\n evaluate(model=cnn_model, data=test_data)\n\n # print(\"Visualize loss and accuracy for Training and Validation data\")\n # plot_history(config=config)\n\n # print(\"Plotting ROC Curve\")\n # plot_roc(model=cnn_model, data=test_data)\n\n print(\"Classifcation Accuracy Report\")\n classification_accuracy_report(model=cnn_model, data=test_data)", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def main(arguments):\n # create aliases\n SPLIT = arguments.split\n IN_PATH = arguments.inference_path\n DATASET_ROOT = arguments.dataset_root\n DATASET = arguments.dataset\n SEG_THRESHOLD = arguments.segmentation_threshold\n IDX = arguments.index\n RANGE = arguments.range\n V2 = arguments.v2\n CKPT = arguments.ckpt_filename\n\n try:\n st, ed, num = map(float, RANGE.split(':'))\n num = int(num)\n except:\n log.error('Invalid range')\n\n # dataset = CoNSeP(download=False, root=DATASET_PATH)\n dataset = getattr(dataset_reader, DATASET)(download=False, root=DATASET_ROOT+DATASET+\"/\")\n\n metrics = VALID_METRICS.keys()\n\n aggregated_metrics = {}\n thresholds = []\n\n for step, k in enumerate(np.linspace(st, ed, num)):\n thresholds.append(k)\n mlflow.log_metric('threshold', k, step=step)\n output_map = get_instance_output(True, IDX, \n root=IN_PATH, split=SPLIT, \n h=SEG_THRESHOLD, k=k, \n ckpt=CKPT, dot_refinement=V2)\n label, _ = dataset.read_labels(IDX, SPLIT)\n s = score(output_map, label, *metrics)\n for metric in metrics:\n value = s[metric]\n if isinstance(value, dict):\n for key, val in value.items():\n if not isinstance(val, list):\n metric_name = metric + '_' + key\n mlflow.log_metric(metric_name, val, step=step)\n if metric_name not in aggregated_metrics:\n aggregated_metrics[metric_name] = []\n aggregated_metrics[metric_name].append(val)\n else:\n mlflow.log_metric(metric, value, step=step)\n if metric not in aggregated_metrics:\n aggregated_metrics[metric] = []\n aggregated_metrics[metric].append(value)\n\n for metric, score_list in aggregated_metrics.items():\n mlflow.log_metric(\"average_\" + metric, sum(score_list) / len(score_list))\n\n mlflow.log_metric(\"best_threshold\", thresholds[np.argmax(aggregated_metrics['DQ_point'])])", "def main(args):\n ## Starting time\n start_time = datetime.now()\n ## Reading all elements and converting to python dictionary\n param_dict = vars(args)\n ## Checking for correct input\n param_vals_test(param_dict)\n #\n # Creating instance of `ReadML` with the input parameters\n param_dict['ml_args'] = ReadML(**param_dict)\n ## Program message\n prog_msg = param_dict['Prog_msg']\n # Adding additional parameters\n param_dict = add_to_dict(param_dict)\n ##\n ## Creating Folder Structure\n # proj_dict = cwpaths.cookiecutter_paths(__file__)\n proj_dict = param_dict['ml_args'].proj_dict\n proj_dict = directory_skeleton(param_dict, proj_dict)\n ##\n ## Printing out project variables\n print('\\n'+50*'='+'\\n')\n for key, key_val in sorted(param_dict.items()):\n if key != 'Prog_msg':\n print('{0} `{1}`: {2}'.format(prog_msg, key, key_val))\n print('\\n'+50*'='+'\\n')\n ##\n ## Feature keys\n param_dict['feat_cols_dict'] = param_dict['ml_args'].feat_cols_names_dict(\n return_all=True)\n ##\n ## Reading in the main catalogue\n catl_pd = catl_file_read_clean(param_dict, proj_dict)\n ###\n ### ------ Figures ------ ###\n ##\n ## Comparison of estimated group masses via HAM and Dynamical Masses\n frac_diff_model(param_dict, proj_dict, plot_opt=param_dict['plot_opt'])\n #\n # Covariance Matrix\n covariance_plot(catl_pd, param_dict, proj_dict)\n #\n # Traditional methods for estimating masses\n # pred_masses_halo_mass(param_dict, proj_dict)\n #\n # Fractional Difference plots vs True mass of galaxy GROUPS\n # frac_diff_groups_model(param_dict, proj_dict,\n # plot_opt=param_dict['plot_opt'])\n ##\n ## End time for running the catalogues\n end_time = datetime.now()\n total_time = end_time - start_time\n print('{0} Total Time taken (Create): {1}'.format(prog_msg, total_time))", "def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='mode')\n\n # Add sub-parser for feature extraction\n parser_extract = subparsers.add_parser('extract')\n parser_extract.add_argument('dataset',\n choices=['training', 'validation', 'test'],\n )\n\n # Add sub-parser for training\n subparsers.add_parser('train')\n\n # Add sub-parser for inference\n parser_predict = subparsers.add_parser('predict')\n parser_predict.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n\n # Add sub-parser for evaluation\n parser_evaluate = subparsers.add_parser('evaluate')\n parser_evaluate.add_argument('task',\n nargs='?',\n choices=['tagging', 'sed', 'all'],\n default='all',\n )\n parser_evaluate.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n parser_evaluate.add_argument('--thresholds', action='store_true')\n\n args = parser.parse_args()\n\n if args.mode == 'extract':\n extract(cfg.to_dataset(args.dataset))\n elif args.mode == 'train':\n train()\n elif args.mode == 'predict':\n predict(cfg.to_dataset(args.dataset))\n elif args.mode == 'evaluate':\n eval_all = args.task == 'all'\n dataset = cfg.to_dataset(args.dataset)\n if args.task == 'tagging' or eval_all:\n evaluate_audio_tagging(dataset, args.thresholds)\n if args.task == 'sed' or eval_all:\n evaluate_sed(dataset)", "def runse(self):\n\n # check for se catalog\n\n \n\n t = self.image.split('.fits')\n froot = t[0]\n # check for se catalog\n secat = froot+'.cat'\n\n os.system('ln -s ' +self.astrodir + '/default.* .') \n if self.instrument == 'h':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'i':\n defaultcat = 'default.sex.INT'\n self.keepsection=[1000,5000,0,4000]\n elif self.instrument == 'm':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'b':\n print(\"hey Rose - \")\n print(\"using default.sex.BOK!!!\")\n print()\n defaultcat = 'default.sex.BOK.getzp'\n header = fits.getheader(self.image)\n try:\n expt = header['EXPTIME']\n except KeyError:\n expt = 1.\n ADUlimit = 40000.\n if self.instrument == 'i':\n if (self.filter == 'r'):\n ADUlimit = 400000./60#/float(expt)\n elif self.filter == 'ha':\n ADUlimit = 40000./180.\n #print('saturation limit in ADU/s {:.1f}'.format(ADUlimit))\n if self.fwhm is None:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)\n #t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '\n if self.verbose:\n print('running SE first time to get estimate of FWHM')\n print(t)\n os.system(t)\n\n # clean up SE files\n # skipping for now in case the following command accidentally deletes user files\n # os.system('rm default.* .')\n\n\n ###################################\n # Read in Source Extractor catalog\n ###################################\n if self.verbose:\n print('reading in SE catalog from first pass')\n secat_filename = froot+'.cat'\n self.secat = fits.getdata(secat_filename,2)\n self.secat0 = self.secat\n # get median fwhm of image\n # for some images, this comes back as zero, and I don't know why\n fwhm = np.median(self.secat['FWHM_IMAGE'])*self.pixelscale\n \n \n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(fwhm)\n if float(fwhm) == 0:\n print('WARNING: measured FWHM is zero!')\n if self.verbose:\n print('running SE again with new FWHM to get better estimate of CLASS_STAR')\n else:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(self.fwhm)\n if self.verbose:\n print(t)\n print('running SE w/user input for FWHM to get better estimate of CLASS_STAR') \n #############################################################\n # rerun Source Extractor catalog with updated SEEING_FWHM\n #############################################################\n\n #print(t)\n os.system(t)\n self.read_se_cat()", "def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def main():\n\n print(\"Finding maximum clade credibility tree...\")\n find_mcc_tree()\n print(\"Computing clade credibilities...\")\n compute_clade_probabilities()\n if _CAN_PLOT:\n print(\"Plotting maximum clade credibility tree...\")\n plot_mcc_tree()\n else:\n print(\"Skipping plotting tree due to lack of PyQt4 support. :(\")\n print(\"Computing posterior mean paramter estimates...\")\n ranked_means = utils.write_means(\"indoeuropean.log\", \"parameter_means.csv\")\n print(\"Computing ranking correlations...\")\n compute_ranking_correls(ranked_means)\n print(\"Generating LaTeX table...\")\n make_table(ranked_means)\n print(\"Generating rate variation figure...\")\n make_figure(\"category_rates.eps\")", "def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def main():\r\n # LOading the Test images & labels\r\n params, test_images, test_labels = get_data()\r\n\r\n # Accuracy on Test Data\r\n accuracy = model_score(params, test_images, test_labels, act='sig')\r\n print ('\\nAccuracy : ' + str(accuracy) + ' %\\n')", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def main():\n # combineSloEff(makePlots=True, writeDB=False, runMC=False, seedNum=1)\n GPXSloEff(makePlots=True, writeDB=False)", "def main():\n\tparser = argparse.ArgumentParser(description=\"Estimate the efferents modulation induced by EES and afferent input together\")\n\tparser.add_argument(\"eesFrequency\", help=\"ees frequency\", type=float, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"eesAmplitude\", help=\"ees amplitude (0-600] or %%Ia_II_Mn\")\n\tparser.add_argument(\"species\", help=\"simulated species\", choices=[\"rat\",\"human\"])\n\tparser.add_argument(\"inputFile\", help=\"neural network structure file (e.g. fsSFrFfMnArtModHuman.txt)\")\n\tparser.add_argument(\"name\", help=\"name to add at the output files\")\n\tparser.add_argument(\"--mnReal\", help=\"Real Mn flag, IntFire Mn otherwise\",action=\"store_true\")\n\tparser.add_argument(\"--simTime\", help=\"simulation time\", type=int, default=1000)\n\tparser.add_argument(\"--burstingEes\", help=\"flag to use burst stimulation\", action=\"store_true\")\n\tparser.add_argument(\"--nPulsesPerBurst\", help=\"number of pulses per burst\", type=int, default=5)\n\tparser.add_argument(\"--burstsFrequency\", help=\"stimulation frequency within bursts\",type=float, default=600, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"--seed\", help=\"positive seed used to initialize random number generators (default = time.time())\", type=int, choices=[gt.Range(0,999999)])\n\targs = parser.parse_args()\n\n\tif args.seed is not None: sh.save_seed(args.seed)\n\telse: sh.save_seed(int(time.time()))\n\n\t# Import simulation specific modules\n\tfrom simulations import ForSimSpinalModulation\n\tfrom NeuralNetwork import NeuralNetwork\n\tfrom EES import EES\n\tfrom BurstingEES import BurstingEES\n\tfrom NetworkStimulation import NetworkStimulation\n\n\t# Initialze variables...\n\tif args.eesAmplitude[0]==\"%\": eesAmplitude = [float(x) for x in args.eesAmplitude[1:].split(\"_\")]\n\telse: eesAmplitude = float(args.eesAmplitude)\n\tname = args.name+\"_amp_\"+args.eesAmplitude+\"_freq_\"+str(args.eesFrequency)\n\tpc = h.ParallelContext()\n\tnn=NeuralNetwork(pc,args.inputFile)\n\tif not args.burstingEes: ees = EES(pc,nn,eesAmplitude,args.eesFrequency,pulsesNumber=100000,species=args.species)\n\telse: ees = BurstingEES(pc,nn,eesAmplitude,args.eesFrequency,args.burstsFrequency,args.nPulsesPerBurst,species=args.species)\n\tees.get_amplitude(True)\n\tprint \"The stimulation frequency is: \",args.eesFrequency,\" Hz\"\n\tafferentsInput = None\n\n\tcellsToRecord = {}\n\tcellsToRecord['Iaf'] = nn.cells['SOL']['Iaf']\n\tcellsToRecord['MnS']=nn.cells['SOL']['MnS']\n\t# cellsToRecord['MnFf']=nn.cells['SOL']['MnFf']\n\t# cellsToRecord['MnFr']=nn.cells['SOL']['MnFr']\n\t# modelTypes = {\"MnS\":\"artificial\",\"MnFr\":\"artificial\",\"MnFf\":\"artificial\",\"Iaf\":\"artificial\"}\n\tmodelTypes = {\"MnS\":\"artificial\",\"Iaf\":\"artificial\"}\n\tsimulation = ForSimSpinalModulation(pc,nn,cellsToRecord,modelTypes, afferentsInput, None, None, args.simTime)\n\tsimulation.set_results_folder(\"../../results/AffEffModSweap/\")\n\tsimulation.run()\n\tsimulation.raster_plot(name,False)\n\tcomm.Barrier()\n\n\tsimulation.save_results(name)", "def main():\n parser = make_argument_parser()\n args = parser.parse_args()\n\n input_dirs = args.inputdirs\n tf = args.factor\n valid_chroms = args.validchroms\n valid_input_dirs = args.validinputdirs\n test_chroms = args.testchroms\n epochs = args.epochs\n patience = args.patience\n learningrate = args.learningrate\n seed = args.seed\n utils.set_seed(seed)\n dropout_rate = args.dropout\n L = args.seqlen\n w = args.motifwidth\n utils.L = L\n utils.w = w\n utils.w2 = w/2\n negatives = args.negatives\n assert negatives > 0\n meta = args.meta\n gencode = args.gencode\n motif = args.motif\n\n num_motifs = args.kernels\n num_recurrent = args.recurrent\n num_dense = args.dense\n \n features = ['bigwig'] \n\n if tf:\n print 'Single-task training:', tf\n singleTask = True\n if meta:\n print 'Including metadata features'\n features.append('meta')\n if gencode:\n print 'Including genome annotations'\n features.append('gencode')\n else:\n print 'Multi-task training'\n singleTask = False\n #Cannot use any metadata features\n assert not meta\n assert not gencode\n\n if args.outputdir is None:\n clobber = True\n output_dir = args.outputdirc\n else:\n clobber = False\n output_dir = args.outputdir\n\n try: # adapted from dreme.py by T. Bailey\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n if not clobber:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'but you specified not to clobber it') % output_dir\n sys.exit(1)\n else:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'so it will be clobbered') % output_dir\n\n print 'Loading genome'\n genome = utils.load_genome()\n if valid_input_dirs:\n print 'You specified at least one validation input directory'\n assert singleTask # This option only works for single-task training\n print 'Loading ChIP labels'\n if singleTask:\n chip_bed_list, nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(input_dirs, tf)\n if valid_input_dirs:\n valid_chip_bed_list, valid_nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(valid_input_dirs, tf)\n num_tfs = 1\n else:\n assert len(input_dirs) == 1 # multi-task training only supports one cell line\n input_dir = input_dirs[0]\n tfs, positive_windows, y_positive, nonnegative_regions_bed = \\\n utils.load_chip_multiTask(input_dir)\n num_tfs = len(tfs)\n print 'Loading bigWig data'\n bigwig_names, bigwig_files_list = utils.load_bigwigs(input_dirs)\n num_bigwigs = len(bigwig_names)\n if valid_input_dirs:\n valid_bigwig_names, valid_bigwig_files_list = utils.load_bigwigs(valid_input_dirs)\n assert valid_bigwig_names == bigwig_names\n if not singleTask:\n bigwig_files = bigwig_files_list[0]\n if meta:\n print 'Loading metadata features'\n meta_names, meta_list = utils.load_meta(input_dirs)\n if valid_input_dirs:\n valid_meta_names, valid_meta_list = utils.load_load(valid_input_dirs)\n assert valid_meta_names == meta_names\n else:# meta option was not selected, pass empty metadata features to the functions\n meta_list = [[] for bigwig_files in bigwig_files_list]\n if valid_input_dirs:\n valid_meta_list = [[] for bigwig_files in valid_bigwig_files_list]\n \n print 'Making features'\n if singleTask:\n if not valid_input_dirs: #validation directories not used, must pass placeholder values\n valid_chip_bed_list = None\n valid_nonnegative_regions_bed_list = None\n valid_bigwig_files_list = None\n valid_meta_list = None \n datagen_train, datagen_valid = \\\n utils.make_features_singleTask(chip_bed_list,\n nonnegative_regions_bed_list, bigwig_files_list, bigwig_names,\n meta_list, gencode, genome, epochs, negatives, valid_chroms, test_chroms, \n valid_chip_bed_list, valid_nonnegative_regions_bed_list, \n valid_bigwig_files_list, valid_meta_list)\n else:\n datagen_train, datagen_valid = \\\n utils.make_features_multiTask(positive_windows, y_positive,\n nonnegative_regions_bed, bigwig_files, bigwig_names,\n genome, epochs, valid_chroms, test_chroms)\n print 'Building model'\n if num_recurrent == 0:\n print 'You specified 0 LSTM units. Omitting BLSTM layer'\n if num_recurrent < 0:\n print 'You specified less than 0 LSTM units. Replacing BLSTM layer with global max-pooling layer'\n if meta or gencode:\n num_meta = 0\n if meta:\n num_meta = len(meta_names)\n if gencode:\n num_meta += 6\n model = utils.make_meta_model(num_tfs, num_bigwigs, num_meta, num_motifs, num_recurrent, num_dense, dropout_rate)\n else:\n model = utils.make_model(num_tfs, num_bigwigs, num_motifs, num_recurrent, num_dense, dropout_rate)\n\n if motif:\n assert singleTask # This option only works with single-task training\n motifs_db = utils.load_motif_db('resources/HOCOMOCOv9.meme')\n if tf in motifs_db:\n print 'Injecting canonical motif'\n pwm = motifs_db[tf]\n pwm += 0.001\n pwm = pwm / pwm.sum(axis=1)[:, np.newaxis]\n pwm = np.log2(pwm/0.25)\n utils.inject_pwm(model, pwm)\n output_tf_file = open(output_dir + '/chip.txt', 'w')\n if singleTask:\n output_tf_file.write(\"%s\\n\" % tf)\n else:\n for tf in tfs:\n output_tf_file.write(\"%s\\n\" % tf)\n output_tf_file.close()\n output_feature_file = open(output_dir + '/feature.txt', 'w')\n for feature in features:\n output_feature_file.write(\"%s\\n\" % feature)\n output_feature_file.close()\n output_bw_file = open(output_dir + '/bigwig.txt', 'w')\n for bw in bigwig_names:\n output_bw_file.write(\"%s\\n\" % bw)\n output_bw_file.close()\n if meta:\n output_meta_file = open(output_dir + '/meta.txt', 'w')\n for meta_name in meta_names:\n output_meta_file.write(\"%s\\n\" % meta_name)\n output_meta_file.close()\n model_json = model.to_json()\n output_json_file = open(output_dir + '/model.json', 'w')\n output_json_file.write(model_json)\n output_json_file.close()\n train(datagen_train, datagen_valid, model, epochs, patience, learningrate, output_dir)", "def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def main():\n # Path used in assembly and previously discovered min year value.\n split_in_dir_path = \"../../data/split\"\n avg_5_in_dir_path = \"../../data/averaged_5\"\n avg_25_in_dir_path = \"../../data/averaged_25\"\n avg_50_in_dir_path = \"../../data/averaged_50\"\n dates_mat_path = \"../../data/dates_matrix/dates_matrix.npy\"\n min_year = 1962\n data_out_dir_path = \"../../data/rnn_set/data\"\n labels_out_dir_path = \"../../data/rnn_set/labels\"\n assemble_set(\n split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,\n avg_50_in_dir_path, dates_mat_path, min_year,\n data_out_dir_path, labels_out_dir_path\n )", "def mainGA(NAME, target_output, target_image): \n global toolbox\n\n print(\"Target image: {0} Target output: {1}\".format(target_image, target_output)) \n sys.stdout.flush()\n\n model = load_model(NAME) \n fit = Fitness(NAME, model, target_image, target_output)\n\n #Genetic operators \n toolbox.register(\"evaluate\", fit.evaluate)\n toolbox.register(\"mate\", cxTwoPointCopy) \n #toolbox.register(\"mate\", cxUniform)\n toolbox.register(\"mutate\", tools.mutGaussian, mu=0.0, sigma=0.1, indpb=0.05)\n toolbox.register(\"select\", tools.selTournament, tournsize=3)\n \n\n pop = toolbox.population(n=50)\n hof = tools.HallOfFame(1, similar=np.array_equal)\n \n #stats = tools.Statistics(lambda ind: ind.fitness.values)\n #stats.register(\"avg\", np.mean)\n #stats.register(\"std\", np.std)\n #stats.register(\"min\", np.min)\n #stats.register(\"max\", np.max)\n \n pop, log = algorithms.eaSimple(pop, toolbox, cxpb=CXPB, mutpb=MUTPB, \n ngen=NGEN, halloffame=hof, \n verbose=False)\n\n return hof[0]", "def output_main(args):\n\t#clean input file (fold and remove escape chars)\n\treference = clean_fasta(args.infile)\n\tfilterthreshold = args.threshold\n\t#look up proper readset using readset module\n\treadset = args.readset\n\t#if readset is in fasta format, inject fake quality scores\n\t\n\t#run bwa\n\tsamfile = run_bwa(reference, readset)\n\t#convert sam to bam file, and sort\n\tsortedbam = sam_to_sorted_bam(reference, samfile)\n\t#run variant caller freebayes\n\tvcffile = run_var_caller(reference, sortedbam)\n\t#run hapcut suite\n\thapoutfile = run_haplotyper(reference, vcffile, sortedbam, filterthreshold)\n\t#convert hapcut output to sequence and gff\n\tcalls_to_gff(reference, hapoutfile)", "def execute_expression_analysis(self):\n print (\"Expression analisys start...\")\n n = \"consexpression\"\n out_merge_table = ''\n self.execute_merge_table(self._count_table, out_merge_table)\n # 1 ------------------ edgeR -----------------\n out_edger = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_edger.csv\"\n self._edger = EdgeR(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_edger)\n self._edger.run_edger()\n # 2 ------------- BaySeq --------------------\n out_bayseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_baySeq.csv\"\n self._bayseq = BaySeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_bayseq)\n self._bayseq.run_bayseq()\n # 3 ------------- DESeq --------------------\n out_deseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_DESeq.csv\"\n self._deseq = DESeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_deseq)\n self._deseq.run_deseq()\n # 4 ------------- NOISeq --------------------\n out_noiseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_NOISeq.csv\"\n self._noiseq = Noiseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_noiseq)\n self._noiseq.run_noiseq()\n # 5 ------------- EBSeq --------------------\n out_ebseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_EBSeq.csv\"\n self._ebseq = Ebseq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_ebseq)\n self._ebseq.run_ebseq()\n # 6 ------------- SAMSeq --------------------\n out_samseq = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_SAMSeq.csv\"\n self._samseq = SamSeq(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_samseq)\n self._samseq.run_samseq()\n # 7 ------------- limma-voom --------------------\n out_limmavoom = self._exp_dao._read_directory + \"/\" + self._exp_dao._name + \"_limmavoom.csv\"\n self._limmavoom = LimmaVoom(out_merge_table, self._exp_dao._group_name, self._exp_dao._replic, out_limmavoom)\n self._limmavoom.run_limmavoom()", "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def run():\n\n parser = argparse.ArgumentParser(description='Run the GOComp PSSE based validation tool on a problem instance')\n \n parser.add_argument('raw', help='raw - complete path and file name to a RAW file')\n parser.add_argument('con', help='con - complete path and file name to a CON file')\n parser.add_argument('inl', help='inl - complete path and file name to a INL file')\n parser.add_argument('mon', help='mon - complete path and file name to a MON file')\n parser.add_argument('sub', help='sub - complete path and file name to a SUB file')\n \n args = parser.parse_args()\n \n try:\n raw = args.raw\n con = args.con\n inl = args.inl\n mon = args.mon\n sub = args.sub\n except:\n print (\"exception in parsing the validation command\")\n raise\n else:\n run_main(raw, con, inl, mon, sub)", "def main():\n spark = create_spark_session()\n logging.info('Spark Session created')\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-emr-project\"\n #input_data = './data/'\n #output_data = '/Users/daniel/Desktop/output/'\n logging.info(f'Set input path to {input_data}')\n logging.info(f'Set output path to {output_data}')\n \n copy_raw_data(spark, input_data, output_data)\n\n s3_data = restore_data_from_s3(spark, output_data)\n \n sas_desc_string = load_sas_desc_file(input_data)\n \n process_fact_table(spark, s3_data, output_data, sas_desc_string)\n \n process_dim_tables(spark, s3_data, output_data, sas_desc_string)\n\n data_quality_check(spark, output_data)\n \n logging.info('ETL process successfully finished.')", "def main():\n # construct the argument parse and parse the arguments\n args = argparse.ArgumentParser()\n args.add_argument(\"-d\", \"--dataset\", required=True, help=\"path to input dataset\")\n args = vars(args.parse_args())\n\n # grab the list of image paths\n print(\"[INFO] loading images...\")\n image_paths = list(paths.list_images(args[\"dataset\"]))\n\n # initialize the image preprocessor, load the dataset from disk,\n # and reshape the data matrix\n preprocessor = SimplePreprocessor(32, 32)\n loader = SimpleDatasetLoader(preprocessors=[preprocessor])\n (data, labels) = loader.load(image_paths, verbose=500)\n data = data.reshape((data.shape[0], 3072))\n\n # encode the labels as integers\n label_encoder = LabelEncoder()\n labels = label_encoder.fit_transform(labels)\n # partition the data into training and testing splits using 75% of\n # the data for training and the remaining 25% for testing\n (train_x, test_x, train_y, test_y) = train_test_split(data, labels, test_size=0.25, random_state=5)\n\n # loop over our set of regularizers\n for regularizer in (None, \"l1\", \"l2\"):\n # train a SGD classifier using a softmax loss function and the\n # specified regularization function for 10 epochs\n print(\"[INFO] training model with `{}` penalty\".format(regularizer))\n model = SGDClassifier(\n loss=\"log\", penalty=regularizer, max_iter=10, learning_rate=\"constant\", tol=1e-3, eta0=0.01, random_state=42\n )\n model.fit(train_x, train_y)\n # evaluate the classifier\n acc = model.score(test_x, test_y)\n print(\"[INFO] `{}` penalty accuracy: {:.2f}%\".format(regularizer, acc * 100))", "def main():\n\n file_list = []\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n # print(f'Reading files in {folder}')\n file_list = file_list + [os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')]\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)\n\n try:\n print(f'Analyzing final cumulative file data.')\n # print(file_list)\n g = GED_Repo(file_list)\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)", "def main(argv):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser = parse.parse_agglo_from_labelsets(parser)\n parser = parse.parse_common(parser)\n args = parser.parse_args()\n\n agglo_from_labelsets(\n args.inpufile,\n args.labelset_files,\n args.fwmap,\n args.outputfile,\n args.save_steps,\n args.protective,\n )", "def main(args):\n \n if len(args) == 2 and args[1] == \"--test\":\n # Run the tests\n return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)\n \n options = parse_args(args) # This holds the nicely-parsed options object\n \n # Get the halStats output, annotated with coverage\n halstats_list = metric_halstats(options.hal)\n \n # Print the halStats output\n pprint.pprint(halstats_list)\n \n # Get the coverages vs ref for everything but ref\n coverages = [entry[\"Coverage\"] for entry in halstats_list if\n entry[\"GenomeName\"] != \"ref\"]\n \n for coverage in coverages:\n # Save all the coverages\n tsv.TsvWriter(options.coverage_file).list_line(options.tag +\n [coverage])\n \n \n if options.beds is not None or options.truth is not None:\n # We need a MAF for checkGenes and for the precision/recall\n # calculations.\n maf_filename = hal2maf(options.hal)\n \n if options.beds is not None:\n \n # We're going to check the alignment agains the genes\n import checkGenes\n class_counts, gene_sets, gene_pairs = checkGenes.check_genes(\n maf_filename, options.beds)\n \n for classification, count in class_counts.iteritems():\n tsv.TsvWriter(options.gene_category_file).list_line(options.tag +\n [classification, count])\n \n # Print the output\n pprint.pprint(class_counts)\n pprint.pprint(gene_sets)\n pprint.pprint(gene_pairs)\n \n if options.truth is not None:\n \n # We're going to get precision and recall against the truth.\n pr_tuples = metric_mafcomparator(maf_filename, options.truth)\n\n for precision, recall in pr_tuples:\n # Output each PR pair\n\n # TODO: Output better \n print(precision, recall)\n \n # Save them\n tsv.TsvWriter(options.precision_recall_file).list_line(options.tag +\n [precision, recall])\n \n if options.beds is not None or options.truth is not None:\n # Clean up the MAF\n os.unlink(maf_filename)", "def process_args():\n\n parser = argparse.ArgumentParser()\n\n # argument group for parameters related to input/output\n # (e.g. filenames, logging/verbosity options, target genes)\n #\n # these don't affect the model output, and thus don't need to be saved\n # with the results of the experiment\n io = parser.add_argument_group('io',\n 'arguments related to script input/output, '\n 'note these will *not* be saved in metadata ')\n io.add_argument('--custom_genes', nargs='*', default=None,\n help='currently this needs to be a subset of top_50')\n io.add_argument('--gene_set', type=str,\n choices=['top_50', 'vogelstein', 'custom'],\n default='top_50',\n help='choose which gene set to use. top_50 and vogelstein are '\n 'predefined gene sets (see data_utilities), and custom allows '\n 'any gene or set of genes in TCGA, specified in --custom_genes')\n io.add_argument('--log_file', default=None,\n help='name of file to log skipped genes to')\n io.add_argument('--results_dir', default=cfg.results_dirs['multimodal'],\n help='where to write results to')\n io.add_argument('--verbose', action='store_true')\n\n # argument group for parameters related to model training/evaluation\n # (e.g. model hyperparameters, preprocessing options)\n #\n # these affect the output of the model, so we want to save them in the\n # same directory as the experiment results\n opts = parser.add_argument_group('model_options',\n 'parameters for training/evaluating model, '\n 'these will affect output and are saved as '\n 'experiment metadata ')\n opts.add_argument('--debug', action='store_true',\n help='use subset of data for fast debugging')\n opts.add_argument('--n_dim', nargs='*', default=None,\n help='list of compressed dimensions to use, defaults to '\n 'uncompressed data for all data types')\n opts.add_argument('--num_folds', type=int, default=4,\n help='number of folds of cross-validation to run')\n opts.add_argument('--overlap_data_types', nargs='*',\n default=['expression'],\n help='data types to define set of samples to use; e.g. '\n 'set of data types for a model comparison, use only '\n 'overlapping samples from these data types')\n opts.add_argument('--seed', type=int, default=cfg.default_seed)\n opts.add_argument('--subset_mad_genes', type=int, default=cfg.num_features_raw,\n help='if included, subset gene features to this number of '\n 'features having highest mean absolute deviation')\n opts.add_argument('--training_data', nargs='*', default=['expression'],\n help='which data types to train model on')\n\n args = parser.parse_args()\n\n args.results_dir = Path(args.results_dir).resolve()\n\n if args.log_file is None:\n args.log_file = Path(args.results_dir, 'log_skipped.tsv').resolve()\n\n if args.gene_set == 'custom':\n if args.custom_genes is None:\n parser.error('must include --custom_genes when --gene_set=\\'custom\\'')\n args.gene_set = args.custom_genes\n del args.custom_genes\n elif (args.gene_set != 'custom' and args.custom_genes is not None):\n parser.error('must use option --gene_set=\\'custom\\' if custom genes are included')\n\n # check that all training data types are defined in config\n if (len(set(args.training_data).intersection(set(cfg.data_types.keys()))) !=\n len(set(args.training_data))):\n parser.error('training_data data types must be in config.data_types')\n\n # check that all data types in overlap_data_types are valid\n #\n # here I'm just checking this argument against the non-compressed data types,\n # downstream code will check if data types we request compressed data for\n # really have compressed data, but don't need to catch that here\n check_all_data_types(parser, args.overlap_data_types, args.debug)\n\n # split args into defined argument groups, since we'll use them differently\n arg_groups = du.split_argument_groups(args, parser)\n io_args, model_options = arg_groups['io'], arg_groups['model_options']\n\n # if no n_dim argument provided, set all to None\n if model_options.n_dim is None:\n model_options.n_dim = [None] * len(model_options.training_data)\n else:\n # convert None strings from argparse to python Nones\n model_options.n_dim = (\n [None if n == 'None' else n for n in model_options.n_dim]\n )\n\n # add some additional hyperparameters/ranges from config file to model options\n # these shouldn't be changed by the user, so they aren't added as arguments\n model_options.alphas = cfg.alphas\n model_options.l1_ratios = cfg.l1_ratios\n\n # for these experiments, we need to standardize all data types that are not\n # already PCA compressed\n model_options.standardize_data_types = (\n [t for ix, t in enumerate(model_options.training_data)\n if model_options.n_dim[ix] == None]\n )\n\n return io_args, model_options", "def main():\n args = setup_args()\n header_info = extract_header_info_from_probes(args.probe)\n\n for gene in header_info.keys():\n # check there is a folder for gene, else create it\n gene_out_dir = os.path.join(args.output_path, gene)\n if not os.path.exists(gene_out_dir):\n os.mkdir(gene_out_dir)\n\n gene_msa_fname = os.path.join(gene_out_dir, '{}_msa.fa'.format(gene))\n gene_ref = os.path.join(os.path.abspath(args.gene_refs), gene + '.fa')\n generate_msa_for_gene(gene, header_info[gene], gene_ref, gene_msa_fname)", "def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def run(self):\n\n # Peak intensity error analysis.\n self.interpreter.spectrum.error_analysis()\n\n # Calculate the NOEs.\n self.interpreter.minimise.calculate()\n\n # Save the NOEs.\n self.interpreter.value.write(param='noe', file=self.file_root+'.out', dir=self.results_dir, force=True)\n\n # Save the results.\n self.interpreter.results.write(file='results', dir=self.results_dir, force=True)\n\n # Create Grace plots of the data.\n self.interpreter.grace.write(y_data_type='peak_intensity', file='intensities.agr', dir=self.grace_dir, force=True)\n self.interpreter.grace.write(y_data_type='noe', file='noe.agr', dir=self.grace_dir, force=True)\n\n # Save the program state.\n if self.save_state:\n self.interpreter.state.save(state=self.file_root+'.save', dir=self.results_dir, force=True)", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"xTrain\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"yTrain\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"xTest\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"yTest\",\n help=\"filename for labels associated with the test data\")\n parser.add_argument(\"lr\", type=float, help=\"learning rate\")\n parser.add_argument(\"bs\", type=int, help=\"batch size\")\n parser.add_argument(\"epoch\", type=int, help=\"max number of epochs\")\n parser.add_argument(\"--seed\", default=334, \n type=int, help=\"default seed number\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = file_to_numpy(args.xTrain)\n yTrain = file_to_numpy(args.yTrain)\n xTest = file_to_numpy(args.xTest)\n yTest = file_to_numpy(args.yTest)\n\n # setting the seed for deterministic behavior\n np.random.seed(args.seed) \n model = SgdLR(args.lr, args.bs, args.epoch)\n trainStats = model.train_predict(xTrain, yTrain, xTest, yTest)\n print(trainStats)", "def main(args):\n master, result = pathlib.Path(args.master), pathlib.Path(args.result)\n\n # update Exam.autograder_format\n assert args.format in [\"otter\", \"ok\"], f\"Autograder format {args.format} invalid\"\n Exam.autograder_format = args.format\n\n # load notebook and parse\n nb = nbformat.read(master, as_version=NB_VERSION)\n parse_notebook(nb)\n\n # seed np.random in advance of creating student versions\n seed = args.seed or Exam.config.get(\"seed\", 42)\n np.random.seed(seed)\n\n # create autograder notebook\n nb_name = master.name\n create_and_write_autograder_exam(result / \"autograder\", nb_name)\n\n # create exams\n for i in range(Exam.config[\"num_students\"]):\n if (i + 1) % 50 == 0 and not args.quiet:\n print(f\"Generating exam {i + 1}\")\n output_dir = result / f\"exam_{i}\"\n create_and_write_exam_instance(output_dir, nb_name, Exam.config[\"num_questions\"])\n\n # all_tests_path = result / 'tests'\n # os.makedirs(all_tests_path, exist_ok=True)\n # write_all_version_tests(all_tests_path)\n\n # generate Gradescope zip file\n if Exam.config.get(\"generate\", {}):\n if not args.quiet:\n print(\"Generating autograder zip file...\")\n generate(args.result, Exam.config.get(\"generate\"))", "def runGA(dressCode, color, budget, poplength, generations, boost, error, show, best):\n\n print(\"[-] Running genetic algorithm...\", end=\"\\n\\n\")\n ga = GeneticAlgorithm( \n popSize=poplength, \n eliteSize=2,\n crossoverRate=0.9, \n mutationRate=0.2, \n generations=generations, \n dressCode=dressCode, \n color=color, \n budget=budget,\n boost=boost,\n error=error,\n show=show,\n )\n # start the genetic algorithm \n ga.start()\n if (best != -1):\n ga.showBestOutfit(best)\n ga.plotPerformance()", "def main():\r\n\r\n # Command-line arguments\r\n training_data = argv[1]\r\n hypothesis_out = argv[2]\r\n learning_type = argv[3]\r\n test = argv[4]\r\n labels = None\r\n if len(argv) > 5:\r\n labels = argv[5]\r\n\r\n # Parse data and determine features\r\n feat_obj = FeatureParser(training_data)\r\n data = FeatureData(feat_obj.features)\r\n\r\n # Train model using DT or DT + adaboost\r\n train(data, hypothesis_out, learning_type)\r\n\r\n # Predict on test set with trained model\r\n predictions = predict(hypothesis_out, test, learning_type)\r\n\r\n # Evaluate accuracy of test data if provided lables\r\n if labels:\r\n accuracy = evaluate(predictions, labels)\r\n print('Model accuracy on test data:',str(accuracy) + '%')", "def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return", "def main():\n\n # start at loading the dataset\n data = h1bdata_loading()\n merged_data = pd.concat([data[year] for year in range(2010,2017)], ignore_index= True)\n raw_data = h1b_data(data)\n \n \n\n # Then clean the data\n #h1b_data = Clean_df(raw_data)\n #print(\"data cleaned >>>\")\n\n\n while True:\n try:\n print (\"================================ H1b Visa Approve Rate Exploring ================================\")\n print (\"\")\n print (\" How do you want to explore the H1b Data? \")\n print (\" <a> : Overview \t\t \")\n print (\" <b> : Location \")\n print (\" <c> : Industry \")\n print (\" <d> : Company \") \n print (\" You can always input 'quit' to leave the system \")\n print (\"=================================================================================================\")\n\n key = option_input()\n if key == 'a':\n overview(data)\n if key == 'b':\n location(data)\n if key == 'c':\n industry_exploring(merged_data)\n if key == 'd':\n company_exploring(merged_data)\n except wrong_option_exception:\n print (\"Invalid option, please reselect.\")", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def run(prog, args):\n\t#parse arguments\n\tparser = ArgumentParser(prog = prog, usage=\"%(prog)s [-options] [input] [output]\\n\", version=\"%(prog)s v0.1.1\")\n\tparser.add_argument(\"--exclude\", type = str, dest=\"exclude\", help=\"exclude mtDNA sites from analysis\")\n\tparser.add_argument(\"--remove\", type = str, dest=\"remove\", help=\"remove families from analysis\")\n\tparser.add_argument(\"--keep\", type = str, dest=\"keep\", help=\"keep only the families for analysis\")\n\tparser.add_argument(\"--depth\", type = float, default = 10, dest=\"depth\", help=\"the minimum read depth of all variants\")\n\tparser.add_argument(\"--depth-min\", type = float, default = 40, dest=\"depth_min\", help=\"the minimum read depth of heteroplasmies\")\n\tparser.add_argument(\"--hq-min\", type = float, default = 0.7, dest=\"hq_min\", help=\"the minimum ratio of high-quality reads of heteroplasmies\")\n\tparser.add_argument(\"--llr-min\", type = float, default = 5, dest=\"llr_min\", help=\"the minimum quality score of heteroplasmies\")\n\tparser.add_argument(\"--sbias-min\", type = float, default = 0.001, dest=\"sbias_min\", help=\"the minimum P value for strand bias analysis of heteroplasmies\")\n\tparser.add_argument(\"--frac-min\", type = float, default = 0.01, dest=\"frac_min\", help=\"the minimum minor allele fraction of heteroplasmies\")\n\tparser.add_argument(\"--dev-frac-min\", type = float, default = 0.90, dest=\"dev_frac_min\", help=\"the minimum variant allele fraction of homoplasmies\")\n\tparser.add_argument(\"--annotate\", type = str, dest=\"annotate\", help=\"annotate variants according to the file specified\")\n\tparser.add_argument(\"--output-ped\", default=False, action=\"store_true\", dest=\"output_ped\", help=\"output the variants detected to a ped file\")\n\tparser.add_argument(\"--output-hsd\", default=False, action=\"store_true\", dest=\"output_hsd\", help=\"output major allele to the hsd file\")\n\tparser.add_argument(\"--output-minor-hsd\", default=False, action=\"store_true\", dest=\"output_minor_hsd\", help=\"output minor allele to the hsd file\")\n\tparser.add_argument(\"input\", help=\"the variant file output from scan\")\n\tparser.add_argument(\"output\", help=\"the prefix of output files\")\n\toptions = parser.parse_args(args)\n\t\n\t#initialize globle variables\n\tglobal llr_mim, sbias_min\n\tllr_min = options.llr_min\n\tsbias_min = options.sbias_min\n\t\n\tpos_excl = {}\n\tif (options.exclude):\n\t\twith open(options.exclude) as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.strip()\n\t\t\t\ttry:\n\t\t\t\t\tpos_excl[int(line)] = 1\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\tfam_excl = {}\n\tif (options.remove):\n\t\twith open(options.remove) as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.strip()\n\t\t\t\ttry:\n\t\t\t\t\tfam_excl[int(line)] = 1\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\thead, data = readMtVariant(options.input, fam_excl, pos_excl)\n\t#head = \"family\\tsample\\tchr\\tpos\\tref\\tdepth\\tdepth_fwd\\tdepth_rev\\tallele\\tA\\tT\\tC\\tG\\ta\\tt\\tc\\tg\\theteroplasmy\\tsubstitution\\thet_allele\\thet_freq\\thet_freq_mle\\thet_freq_llr\\thet_low\\thet_high\\thet_p_fisher\\thet_p_sbias\".split(\"\\t\")\n\thead.append(\"stat\")\n\t\n\tannot = {}\n\tannot_len = 0\n\tif (options.annotate):\n\t\t#read annotation file\n\t\t#build annotation table\n\t\tif (options.annotate.endswith(\".csv\")):\n\t\t\tdelim = \",\"\n\t\telse:\n\t\t\tdelim = \"\\t\"\n\t\twith open(options.annotate, \"r\") as fh:\n\t\t\tline = fh.readline()\n\t\t\tline = line.rstrip(\"\\r\\n\").split(delim)\n\t\t\tn = line.index(\"id\")\n\t\t\thead.extend(line[n+1:])\n\t\t\tannot_len = len(line) - n - 1\n\t\t\tfor line in fh:\n\t\t\t\tline = line.rstrip(\"\\r\\n\").split(delim)\n\t\t\t\tif (not line):\n\t\t\t\t\tcontinue\n\t\t\t\tid = line[n]\n\t\t\t\tannot[id] = line[n+1:]\n\tannot_null = [\"\",]*annot_len\n\t\n\tif (options.output_hsd):\n\t\tout_hsd = open(options.output + \".qc.hsd\", \"wb\")\n\t\tout_hsd.write(\"SampleId\\tRange\\tHaplogroup\\tPolymorphisms (delimited with tabs)\\n\")\n\t\tif (options.output_minor_hsd):\n\t\t\tout_minor_hsd = open(options.output + \"minor.qc.hsd\", \"wb\")\n\t\t\tout_minor_hsd.write(\"SampleId\\tRange\\tHaplogroup\\tPolymorphisms (delimited with tabs)\\n\")\n\t\telse:\n\t\t\tout_minor_hsd = None\n\telse:\n\t\tout_hsd = None\n\t\tout_minor_hsd = None\n\t\n\tvar_all = getMtVariant(data, depth_min = 0, depth_ratio_min = 0)\n\tsample_all = []\n\tif (options.keep):\n\t\twith open(options.keep, \"rb\") as fh:\n\t\t\tfor line in fh:\n\t\t\t\tline = line.rstrip(\"\\r\\n\")\n\t\t\t\tif (not line):\n\t\t\t\t\tcontinue\n\t\t\t\tfamily, sample = line.split(\"\\t\")\n\t\t\t\tif (family not in fam_excl):\n\t\t\t\t\tsample_all.append([family, sample])\n\telse:\n\t\tfor family in sorted(var_all.keys()):\n\t\t\tfor sample in sorted(var_all[family].keys()):\n\t\t\t\tsample_all.append([family, sample])\n\t\n\t#output sample names\n\t#order corresponds to that of the samples in the ped file and the hsd file\n\twith open(options.output + \".qc.tfam\", \"wb\") as out_fam:\n\t\tfor family, sample in sample_all:\n\t\t\t#use the default phenotype value -9\n\t\t\tout_fam.write(\"\\t\".join([family, sample, \"0\", \"0\", \"-9\", \"-9\"])+\"\\n\")\n\t\n\tsites_all = {}\n\twith open(options.output + \".qc.annot\", \"wb\") as out:\n\t\t#output the head line\n\t\tout.write(\"\\t\".join(head) + \"\\n\")\n\t\tidx = 0 #sample idx\n\t\tfor family, sample in sample_all:\n\t\t\tif (family in var_all and sample in var_all[family]):\n\t\t\t\tvar = var_all[family][sample]\n\t\t\telse:\n\t\t\t\tvar = {}\n\t\t\thomoplasmy = []\n\t\t\theteroplasmy = []\n\t\t\tfor pos in sorted(var.keys()):\n\t\t\t\tv = var[pos]\n\t\t\t\tif (v):\n\t\t\t\t\tif (isHeteroplasmy(v, depth_min = options.depth_min, depth_strand = 0, depth_ratio_min = options.hq_min, freq_min = options.frac_min)):\n\t\t\t\t\t\tstat = \"heteroplasmy\"\n\t\t\t\t\t\theteroplasmy.append(v)\n\t\t\t\t\t\tadd_var = True\n\t\t\t\t\t\ta1 = v.allele\n\t\t\t\t\t\ta2 = v.alt_allele\n\t\t\t\t\telif (v.depth >= options.depth and v.allele != v.ref and v.dev_freq >= options.dev_frac_min):\n\t\t\t\t\t\tstat = \"homoplasmy\"\n\t\t\t\t\t\thomoplasmy.append(v)\n\t\t\t\t\t\tadd_var = True\n\t\t\t\t\t\ta1 = a2 = v.allele\n\t\t\t\t\telif (v.alt_freq_raw > options.frac_min):\n\t\t\t\t\t\t#variant does not pass the filters of variant quality and strand bias (see MTVariant)\n\t\t\t\t\t\tstat = \"heteroplasmy possible\"\n\t\t\t\t\t\tadd_var = False\n\t\t\t\t\t\ta1 = a2 = None\n\t\t\t\t\telse:\n\t\t\t\t\t\tstat = \"unkown\"\n\t\t\t\t\t\tadd_var = False\n\t\t\t\t\t\ta1 = a2 = None\n\t\t\t\t\tout.write(\"\\t\".join(v.line_cache + [stat,] + annot.get(v.id,annot_null))+\"\\n\")\n\t\t\t\telse:\n\t\t\t\t\tstat = \"unkown\"\n\t\t\t\t\tadd_var = True\n\t\t\t\t\ta1 = a2 = \"N\"\n\t\t\t\tif (add_var):\n\t\t\t\t\tif (pos not in sites_all):\n\t\t\t\t\t\tsites_all[pos] = [0, 0, 0, {}] ##homoplamy, #heteroplasmy, #missing, #{sample: allele}\n\t\t\t\t\tsite = sites_all[pos]\n\t\t\t\t\tsite[3][idx]= a1+\"\\t\"+a2\n\t\t\t\t\tif (a1 != a2):\n\t\t\t\t\t\tsite[1] += 1\n\t\t\t\t\telif (a2 == \"N\"):\n\t\t\t\t\t\tsite[2] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsite[0] += 1\n\t\t\tidx += 1\n\t\t\tif (out_hsd):\n\t\t\t\t#use sample index (one-based) instead of the real sample name\n\t\t\t\t#output major alleles\n\t\t\t\tmajor_allele = [[v.pos,str(v.allele)] for v in homoplasmy] + [[v.pos,str(v.allele)] for v in heteroplasmy if v.dev_freq >= options.dev_frac_min]\n\t\t\t\tif (not major_allele):\n\t\t\t\t\tmajor_allele = [\"1G\",]\n\t\t\t\telse:\n\t\t\t\t\tmajor_allele.sort()\n\t\t\t\t\tmajor_allele = [str(p)+str(a) for p, a in major_allele]\n\t\t\t\tout_hsd.write(\"\\t\".join([str(idx),\"1-16569;\",\"?\"] + major_allele)+\"\\n\")\n\t\t\t\tif (out_minor_hsd):\n\t\t\t\t\tminor_allele = [[v.pos,str(v.alt_allele)] for v in heteroplasmy]\n\t\t\t\t\tif (minor_allele):\n\t\t\t\t\t\tminor_allele.sort()\n\t\t\t\t\t\tminor_allele = [str(p)+str(a) for p, a in minor_allele]\n\t\t\t\t\tout_minor_hsd.write(\"\\t\".join([str(idx),\"1-16569;\",\"?\"] + minor_allele)+\"\\n\")\n\n\tif (out_hsd):\n\t\tout_hsd.close()\n\t\tif (out_minor_hsd):\n\t\t\tout_minor_hsd.close()\n\t\n\tif (options.output_ped):\n\t\tsites = sorted(sites_all.keys())\n\t\tout_ped = open(options.output + \".qc.tped\", \"wb\")\n\t\tout_map = open(options.output + \".qc.map\", \"wb\")\n\t\tfor i in sites:\n\t\t\tsite = sites_all[i]\n\t\t\tout_ped.write(\"\\t\".join([\"26\",str(i)+reference_seq[i],\"0\",str(i)]))\n\t\t\tout_ped.write(\"\\t\")\n\t\t\tsite_sample = site[3]\n\t\t\tref = reference_seq[i]+\"\\t\"+reference_seq[i]\n\t\t\tout_ped.write(\"\\t\".join([site_sample.get(j,ref) for j in range(len(sample_all))]))\n\t\t\tout_ped.write(\"\\n\")\n\t\t\tout_map.write(\"\\t\".join([\"26\",str(i)+reference_seq[i],\"0\",str(i)]+list(map(str,site[:3])))+\"\\n\")\n\t\"\"\"\n\tas plink does not handle multi-allelic variants\n\tto remove these variants in R\n\tnallele <- apply(tped[,5:ncol(tped)],1,function(x){length(unique(x))})\n\twrite.table(tped[nallele==2,], \"biallele.tped\", sep=\"\\t\", quote = F,col.names = F, row.names = F)\n\t\"\"\"", "def main():\n\n args = parse_arguments()\n\n # check the validity of parameters specifying input/output\n if args.input_file is None and \\\n (args.input_suffix is None or args.directory is None):\n raise RuntimeError('No input data supplied! You have to specify either'\n ' -i or -I and the data directory.')\n if args.output_file is None and \\\n (args.output_suffix is None or args.directory is None):\n raise RuntimeError('No output file speficied! You have to specify '\n 'either -o or -O and the data directory.')\n if args.gt_file is None and \\\n (args.gt_suffix is None or args.directory is None):\n raise RuntimeError('No ground truth file speficied! You have to '\n 'specify either -g or -G and the data directory.')\n \n # read the test data\n ocr_dict = dict(load_pairs_from_file(args.input_file)) \\\n if args.input_file is not None \\\n else dict(load_pairs_from_dir(args.directory, args.input_suffix))\n cor_dict = dict(load_pairs_from_file(args.output_file)) \\\n if args.output_file is not None \\\n else dict(load_pairs_from_dir(args.directory, args.output_suffix))\n gt_dict = dict(load_pairs_from_file(args.gt_file)) \\\n if args.gt_file is not None \\\n else dict(load_pairs_from_dir(args.directory, args.gt_suffix))\n line_triplets = \\\n ((ocr_dict[key].strip(), cor_dict[key].strip(), gt_dict[key].strip()) \\\n for key in gt_dict)\n\n if args.metric == 'precision-recall':\n TP, TN, FP, FN = compute_total_precision_recall(\n line_triplets, silent=args.silent)\n precision = 1 if TP+FP==0 else TP/(TP+FP)\n recall = 1 if TP+FN==0 else TP/(TP+FN)\n f1 = 2*TP/(2*TP+FP+FN)\n tpr = recall # \"sensitivity\"\n fpr = 0 if FP+TN==0 else FP/(FP+TN) # \"overcorrection rate\"\n auc = 0.5*tpr*fpr+tpr*(1-fpr)+0.5*(1-tpr)*(1-fpr)\n print('Aggregate precision: %.3f / recall: %.3f / F1: %.3f' %\n (precision, recall, f1))\n print('Aggregate true-positive-rate: %.3f '\n '/ false-positive-rate: %.3f / AUC: %.3f' %\n (tpr, fpr, auc))\n\n elif args.metric == 'Levenshtein':\n edits_ocr, len_ocr, edits_cor, len_cor = \\\n compute_total_edits_levenshtein(line_triplets, silent=args.silent)\n print('Aggregate CER OCR: ', edits_ocr / len_ocr)\n print('Aggregate CER Corrected: ', edits_cor / len_cor)\n\n elif args.metric == 'combining-e-umlauts':\n edits_ocr, len_ocr, edits_cor, len_cor = \\\n compute_total_edits_combining_e_umlauts(\n line_triplets, silent=args.silent)\n print('Aggregate CER OCR: ', edits_ocr / len_ocr)\n print('Aggregate CER Corrected: ', edits_cor / len_cor)", "def main():\n print(\"SEC Parser\")\n DERA()", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def main():\n args = parse_args()\n logger.debug(\"%s\", \" \".join(sys.argv))\n logger.debug(\"Args: %s\", args)\n\n percentiles = pd.read_csv(args.percentiles, index_col=0)\n percentiles.index = pd.to_datetime(percentiles.index)\n steering_table = pd.read_csv(args.steering_table)\n steering_table['period'] = steering_table['period'].apply(\n lambda x: tuple(map(int, x.strip('()').split(','))))\n\n if args.extra_data:\n paths = list(itertools.chain.from_iterable(atlist(path) for path in args.extra_data))\n dataset = read_data(paths)\n extra_data = normalize_average_dataset(dataset['cube'], relative=args.relative,\n reference_period=args.reference_period)\n\n plot(percentiles, steering_table, args.outfile, xlabel=args.xlabel, ylabel=args.ylabel,\n xrange=args.xrange, yrange=args.yrange, title=args.title, smooth=args.smooth,\n extra_data=extra_data, extra_label=args.extra_label,\n reference_epoch=args.reference_epoch, grid=args.grid, legend=args.legend)\n logger.info(\"Done processing\")", "def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()", "def main():\r\n parametrized_data = loadData('files/parametrized.p')\r\n config = loadConfig('config/gmm.cfg')\r\n\r\n data_ = eachDigitGMM(parametrized_data, config)\r\n\r\n save(data_)", "def main():\n\t#first check args and file paths\n\tcheckArgs(args)\n\t\n\tdata = args.dataset_file\n\tf_name = data.split(\".\")\n\tprint \"\\n[AP]\\t\"+\"######## \"+f_name[0] + '.' + f_name[1]+\" ########\"\n\tprint \"\\n[AP]\\tChecked inputs, now acquiring data\"\n\n\thost = \"localhost\"\n\tuser = \"readonly\"\n\tpasswd = \"readonlypswd\"\n\tdb = args.db_schema\n\tdb_table = args.db_table\n\n\tnameFile = data[0:-20]\n\tdataset = queryDataset(host,user,passwd,db,db_table,\"tmpFile.txt\",nameFile)\n\tif dataset is not None:\n\t\tdataset = dataset.rstrip('\\n')\n\t\tdataset = dataset.replace(\"/\",\"-\")\n\n\t\tlocations_list, length_list = generateDataset(data)\n\n\t\tif len(locations_list) < 2:\n\t\t\tprint \"\\n[SKIP]\\t{dataset} has only one unique line! Can't estimate anything.\\n\\tSKIP THIS FILE!\\n\".format(dataset=str(dataset))\n\t\t\treturn 0\n\n\t\t# Alias for estAbund calling\n\t\testAbund = sonicLength.estAbund\n\n\t\t# Call estAbund and store returned object in results\n\t\tresults = estAbund(robjects.StrVector(locations_list), robjects.FloatVector(length_list))\n\n\t\t# Put estimation for theta in estimations_theta and associated locations in locations_theta; then organize data in dic_of_theta\n\t\ttheta = results.rx2(\"theta\")\n\t\testimations_theta = tuple(theta)\n\t\tlocations_theta = tuple(theta.names)\n\t\t# dic_of_theta\n\t\tdic_of_theta = {}\n\t\tfor i in range(len(locations_theta)):\n\t\t\tdic_of_theta.update({locations_theta[i]:estimations_theta[i]})\n\n\t\t# Put different fragment lengths in length_phi and associated frequencies in freq_phi\n\t\tphi = results.rx2(\"phi\")\n\t\tfreq_phi = tuple(phi)\n\t\tlength_phi = tuple(phi.names)\n\n\t\tlength_phi_numbers = fragmentsLengthPlot(length_phi,freq_phi,length_list,nameFile,dataset)\n\n\t\tprintThetaInfo(estimations_theta,locations_theta,nameFile)\n\n\t\t# Retrieving redundant reads data\n\t\tdic_of_redundant_reads_count, sequence_count_list = redundant_reads_count(from_file_to_list(data,'.tsv'))\n\n\t\t# Box Plot\n\t\tsequence_count = []\n\t\tfor v in sequence_count_list:\n\t\t\tsequence_count.append(int(v))\n\t\tbox_plot(sequence_count, estimations_theta, nameFile,dataset)\n\n\t\t# Plot: unique lengths retrieved for a genomic location VS expected number of parent fragment for the same location\n\t\tphi_VS_theta(length_phi, freq_phi, nameFile, dataset)\n\n\n\t\t#######################################################################################################\n\t\t# Produce .tsv output about measured redundant reads count, abundance-corrected redundant reads count # \n\t\t# and some descriptive of unique fragments lengths #\n\t\t#######################################################################################################\n\n\t\t# Retrieving data\n\t\tdic_of_relative_abundance, dic_of_corrected_reads_count, dic_of_percentage_difference = corrected_reads_count(dic_of_redundant_reads_count, dic_of_theta)\n\t\tdic_of_unique_lengths, dic_of_unique_lengths_number, dic_of_median_of_unique_lengths, dic_of_MAD = fragment_lengths_statistics(data)\n\t\tdic_of_lengths = lengths_explicit_list(from_file_to_list(data,'.txt'))\n\n\t\t# Writing File\n\t\tcorrected_file = open(dataset + \".\" + nameFile+\".outcomes\"+\".tsv\", 'w')\n\t\tcorrected_file.write(\"Chromosome\\tIntegration_locus\\tStrand\\tSequence_Count\\tEstimated_Relative_Abundance\\tCorrected_Sequence_Count\\tPercentage_Variation\\tNumber_of_fragments_of_unique_lengths\\tLength_Min\\tLength_Max\\tLenght_Median\\tRounded_Lenght_Median\\tMAD\\tUnique_Lengths_List\\tUnique_Lengths_Amount\\tCEM_region_?\") ## ! NB ! ## \\tCEM_region_?\" has to remain the last!!!\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tcorrected_file.write(\"\\n\" + splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key]) + \"\\t\" + str(dic_of_lengths[key]))\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tcorrected_file.write(\"\\t\" + cem_symbol)\n\n\t\t# Write database file - Like corrected_file with more field appended in the end\n\t\tdb_file = open(dataset + \".\" + nameFile+\".db_file\"+\".tsv\", 'w')\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tdataset_split = dataset.split('.')\n\t\tdataset_label = '_'.join(dataset_split)\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tdb_file.write(splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key])[1:-1] + \"\\t\" + str(dic_of_lengths[key])[1:-1] + \"\\t\")\n\t\t\tdb_file.write(\"\\t\".join(dataset_split) + \"\\t\" + dataset_label + \"\\t\")\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tdb_file.write(cem_symbol + \"\\t\" + cem_coordinates)\n\t\t\telse:\n\t\t\t\tdb_file.write(\"\\t\")\n\t\t\tdb_file.write(\"\\n\")\n\n\t\tdb_file.close()\n\n\t\t#######################################################################################################\n\n\t\t# Last print for user\n\t\tprint \"\\n[AP]\\tTask Finished, closing.\\n\"\n\telse:\n\t\tprint \"\\n[AP]\\tThe dataset is not in the reference DB. Skipped.\\n\"\n\n\treturn 0", "def main():\n parser = argparse.ArgumentParser(\n usage = '%(prog)s [OPTIONS] [ARGS...]',\n description='Calculate something',\n epilog='Contact simon.clematide@uzh.ch'\n )\n parser.add_argument('--version', action='version', version='0.99')\n parser.add_argument('-l', '--logfile', dest='logfile',\n help='write log to FILE', metavar='FILE')\n parser.add_argument('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_argument('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n parser.add_argument('-c', '--corpus_dir',\n action='store', dest='corpus_dir', default='corpus',\n help='directory with corpus data %(default)')\n parser.add_argument('-m', '--model_dir',\n action='store', dest='model_dir', default='model',\n help='directory with model data %(default)')\n parser.add_argument('-B', '--is_backward_lm',\n action='store_true', dest='is_backward_lm', default=False,\n help='build backward model')\n parser.add_argument('args', nargs='*')\n options = parser.parse_args()\n if options.logfile:\n logging.basicConfig(filename=logfile)\n if options.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n process(options)", "def main():\n parser = argparse.ArgumentParser(description='MergeGVCFs and genotype them using the GATK')\n parser.add_argument('-g', '--gatk', dest='gatk', help=\"Location of the GATK\", required=True)\n parser.add_argument('-x', '--xmx', dest='xmx', help=\"Memory to use with JAVA\", required=True)\n parser.add_argument('-c', '--cores', dest='cores', help=\"Number of cores to use\")\n parser.add_argument('-o', '--output', dest='output', \n help='Final output from the haplotype caller')\n parser.add_argument('-r', '--reference', dest='reference', \n help='Reference FASTA file')\n parser.add_argument('-b','--bed', dest='bed_file',\n help=\"Bed file for limiting the GATK\")\n parser.add_argument('-p', '--ploidy', dest='ploidy', \n help=\"Sample ploidy\", default=2)\n parser.add_argument('-d', '--out_directory', dest='directory', help='Output director')\n parser.add_argument('bams', nargs=\"*\", help='gVCF variant call files output from the GATK')\n args = parser.parse_args()\n args.cores = int(args.cores)\n args.xmx = args.xmx.strip('\"')\n print args.bams\n genovcfs = haplotype_caller(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n bams=args.bams, reference=args.reference,\n out_directory=args.directory, ploidy=args.ploidy, bed_file=args.bed_file)\n outputs = merge_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n gvcfs=genovcfs, reference=args.reference)\n genotype_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n inputs=outputs, output=args.output, reference=args.reference,bed_file=args.bed_file)\n #haplotype_single(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n # inputs=args.gvcfs, reference=args.reference)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--filepath\", default=None, type=str, required=True, help=\"Path to dataset\")\n parser.add_argument(\"--truncate\", action='store_true', help=\"Truncate the data when enabled\")\n parser.add_argument(\"--stats\", action='store_true', help=\"Get stats for the file\")\n parser.add_argument(\"--count_vocab\", action='store_true', help=\"Get vocabulary count and save vocabulary for the file\")\n ##generation\n parser.add_argument('--generate', action='store_true', help=\"Start the generation\")\n parser.add_argument(\"--temperature\", type=float, default=1.0, help=\"Softmax temperature setting\")\n parser.add_argument(\"--length\", type=int, default=150, help=\"number of words to be generated\")\n parser.add_argument(\"--top_k\", type=int, default=1, help=\"parameter for Top-k sampling\")\n parser.add_argument('--stop_token', type=str, default=None, help=\"Token at which text generation is stopped\")\n parser.add_argument('--num_samples', type=int, default=500, help=\"Number of samples to be generated and compared with\")\n parser.add_argument('--save_dir', default=\"../save/\", type=str, help=\"Path to save the system outputs\")\n parser.add_argument(\"--no_cuda\", action='store_true', help=\"Avoid using CUDA when available\")\n ##evaluation\n parser.add_argument(\"--evaluate\", action='store_true', help=\"Start the evaluation\")\n parser.add_argument(\"--eval_dir\", default='../save/gpt2/', help=\"The path to evaluate the system outputs\")\n parser.add_argument(\"--eval_model\", default='gpt2', help=\"The model name to evaluate the system outputs\")\n parser.add_argument(\"--reading_scores\", action='store_true', help=\"Get the average reading scores\") #OK\n parser.add_argument(\"--content_words\", action='store_true', help=\"Get the normalized mean of content words and stop words\") #OK\n parser.add_argument(\"--ngram_overlap\", action='store_true', help=\"Get the average N gram overlap percentage with the prompt\") #OK\n parser.add_argument(\"--sw\", action='store_true', help=\"Do stopword elimination\")\n parser.add_argument(\"--stem\", action='store_true', help=\"Do stemming\")\n parser.add_argument(\"--parse_scores\", action='store_true', help=\"Get the average, skewness and kurtosis of the parses of stories\") \n parser.add_argument(\"--sentemb_sim_scores\", action='store_true', help=\"Get the sentence embedding similarity percentage with the prompt\")\n parser.add_argument(\"--sent_length\", action='store_true', help=\"Get the average sentence length\")\n parser.add_argument(\"--pos_tag_fqd\", action='store_true', help=\"Get POS tag frequency distribution as percentages\")\n parser.add_argument(\"--log_unigm_prob\", action='store_true', help=\"Get the average log unigram probability\")\n # parser.add_argument(\"--coherence_scores\", action='store_true', help=\"Get the average coherence scores\") \n args = parser.parse_args()\n\n\n filepath = args.filepath\n truncate_bool = args.truncate\n stats_bool = args.stats \n vocab_bool = args.count_vocab\n #generation\n generate_bool = args.generate\n temperature = args.temperature\n length = args.length\n top_k = args.top_k\n stop_token = args.stop_token\n num_samples = args.num_samples\n save_dir = args.save_dir\n no_cuda_bool = args.no_cuda\n #evaluation\n evaluate_bool = args.evaluate\n eval_direcpath = args.eval_dir #path to the model folder\n eval_modelname = args.eval_model #name of the model evaluating\n eval_RS = args.reading_scores #evaluate reading scores\n eval_CW = args.content_words #evaluate the percentage of content and stop words\n eval_NG = args.ngram_overlap #evaluate story prompt relatedness scores with ngram overlap pc\n eval_PS = args.parse_scores #evaluate the grammaticality\n eval_SE = args.sentemb_sim_scores #evaluate story prompt relatedness scores\n eval_SL = args.sent_length #evaluate the syntactic complexity\n eval_PF = args.pos_tag_fqd #evaluate the pos-tag frequency distribution as percentages\n eval_RW = args.log_unigm_prob #evaluate the rareword usage scores as mean log unigram probability\n sw = False\n if args.sw:\n sw = True\n stem = False\n if args.stem:\n stem = True\n\n f_prep = FilePreprocessor(filepath) \n if truncate_bool: #required when you are running the code the first time\n f_prep.truncate_stories(num_words=1000)\n if stats_bool:\n num_stories, num_prompts = f_prep.check_num_stories()\n print (num_prompts, num_stories) \n if vocab_bool:\n vocab_counter_prompt, vocab_counter_story = f_prep.make_vocabulary()\n print (\"The vocabulary for the stories: {}\".format(vocab_counter_story))\n print (\"The vocabulary for the prompts: {}\".format(vocab_counter_prompt))\n ##### get the prompt from the file -- done\n ##### get the model type and model file name and path as a dictionary -- done\n ##### for each model type save the prompt, the original story and the generated story with \"temp val\" and \"top k\" val and \"model name\" and \"index of random story prompt selected\" in a file: \"gentext_\"+model_+\"_\"+temperature+\"_\"+top_k+\"_\"+i -- done\n ##### finish the 4 openai gptx models and then move onto xlnet models --done\n if generate_bool:\n # define the pre-trained models offered by huggingface/transformers github: https://github.com/huggingface/transformers for generation\n # Model classes at https://github.com/huggingface/transformers/blob/master/examples/run_generation.py \n if not os.path.exists(save_dir): os.mkdir(save_dir)\n # PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\", \"distilgpt2\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"], \"xlm\": [\"xlm-mlm-en-2048\", \"xlm-mlm-ende-1024\", \"xlm-mlm-enfr-1024\", \"xlm-mlm-enro-1024\", \"xlm-mlm-tlm-xnli15-1024\", \"xlm-mlm-xnli15-1024\", \"xlm-clm-enfr-1024\", \"xlm-clm-ende-1024\", \"xlm-mlm-17-1280\", \"xlm-mlm-100-1280\"]}\n PT_model_dict = {\"openai-gpt\": [\"openai-gpt\"], \"gpt2\": [\"gpt2\", \"gpt2-medium\", \"gpt2-large\"], \"xlnet\": [\"xlnet-base-cased\", \"xlnet-large-cased\"], \"transfo-xl\": [\"transfo-xl-wt103\"]}\n # #check values for variables exist\n # assert temperature\n # assert length\n # assert top_k\n print (\"Get the prompts from {} samples in the test set...\".format(num_samples))\n story_files_dict = f_prep.get_art_prp_file()\n story_files_test = story_files_dict['test']\n nums_selected = random.sample(range(len(story_files_test)), num_samples)\n for idx, i in enumerate(nums_selected):\n prompt = (story_files_test[i][0]).replace(\"[ wp ]\", \"\") #remove the tag from the prompt and save it\n story = story_files_test[i][1]\n # print (\"Prompt: {}\".format(prompt))\n # print (\"Original Story: {}\".format(story))\n for k,v in PT_model_dict.items():\n model_type = k\n model_names_list = v\n for model_ in model_names_list:\n print (\"Generating story #{} with model {} ...\".format(idx+1, model_))\n print (\"Selected story prompt: {}\".format(i+1))\n start_time = time.time()\n generated_text = text_generator(model_type=model_type, model_name_or_path=model_, prompt=prompt, padding_text=story[:50], xlm_lang=\"\", length=length, temperature=temperature, top_k=top_k, top_p=0.9, no_cuda=no_cuda_bool, seed=42, stop_token=stop_token, verbose=False)\n time_elapsed = time.time() - start_time\n temp_pc = int(temperature*100)\n filename_ = \"gentext_\"+model_+\"_T\"+str(temp_pc)+\"_k\"+str(top_k)+\"_\"+str(i)+\".txt\"\n with open(os.path.join(save_dir, filename_),'w') as w_f:\n w_f.write(\"Prompt: \" + prompt + \"\\n\")\n w_f.write(\"Original: \" + story + \"\\n\")\n w_f.write(\"Generated: \" + generated_text + \"\\n\")\n w_f.write(\"Time elapsed: \" + str(time_elapsed) + \"\\n\")\n ##### get the directory of the samples by each model --done\n ##### read the files and get the dataframe from each model \n if evaluate_bool:\n print (\"Evaluation for {} model: \".format(eval_modelname))\n eval_modelObj = EvalDQ(eval_direcpath)\n print (\"Reading the samples ...\") \n \n if eval_modelname == \"fusion\":\n df_modelObj = eval_modelObj.read_fusion_output()\n else:\n df_modelObj = eval_modelObj.read_data_strings()\n # print (df_modelObj[\"temp\"].tolist())\n # exit()\n \n temp = set(df_modelObj[\"temp\"].tolist())\n topK = set(df_modelObj[\"topK\"].tolist())\n print (\"The shape of the Dataframe object for model {} is {}:\".format(eval_modelname, df_modelObj.shape))\n print (\"The temperature and k values are: {} and {}:\".format(temp, topK))\n \n if eval_RS:\n print (\"Calculating the Readability scores ... \")\n print (\"For the original stories ...\")\n df_modelObj_RS_original = eval_modelObj.get_readability_scores(df_modelObj,\"original\")\n print (\"The mean reading score values for the original files ...\")\n print (df_modelObj_RS_original.mean(axis=0))\n print (\"For the generated stories ...\")\n df_modelObj_RS_generated = eval_modelObj.get_readability_scores(df_modelObj,\"generated\")\n print (\"The mean reading score values for the generated files ...\")\n print (df_modelObj_RS_generated.mean(axis=0))\n \n if eval_CW:\n print (\"Calculating the percentage of content words VS stop words ...\")\n print (\"For the original stories ...\")\n cw_ct_ori, sw_ct_ori = eval_modelObj.count_contentwords(df_modelObj, \"original\")\n mean_cw_ct_ori = statistics.mean(cw_ct_ori) #look at the normalized mean \n mean_sw_ct_ori = statistics.mean(sw_ct_ori)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_ori, mean_sw_ct_ori))\n print (\"For the generated stories ...\")\n cw_ct_gen, sw_ct_gen = eval_modelObj.count_contentwords(df_modelObj, \"generated\")\n mean_cw_ct_gen = statistics.mean(cw_ct_gen) #look at the normalized mean \n mean_sw_ct_gen = statistics.mean(sw_ct_gen)\n print (\"The normalized mean for content words is {} and for stop words is {}\".format(mean_cw_ct_gen, mean_sw_ct_gen))\n\n if eval_NG:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the average n-gram overlap with the prompt...\")\n # avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=3)\n # print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n print (\"For the original stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_ori = eval_modelObj.ngram_overlap(df_modelObj, (\"original\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_ori))\n print (\"For the generated stories ...\")\n for i in [1,2,3]:\n print (\"Getting the average for n={}\".format(i))\n avg_ngmoverlap_pc_gen = eval_modelObj.ngram_overlap(df_modelObj, (\"generated\", \"prompt\"), n=i, sw=sw, stem=stem)\n print (\"The average overlap percentage is {}\".format(avg_ngmoverlap_pc_gen))\n\n if eval_PS:\n print (\"Calculating the constituency parsing scores ...\")\n print (\"For the original stories ...\")\n _, skew_scores_ori, kurt_scores_ori = eval_modelObj.parsing_score_calculation(df_modelObj, \"original\")\n mean_skew_scores_ori = statistics.mean(skew_scores_ori) #look at the normalized mean \n mean_kurt_scores_ori = statistics.mean(kurt_scores_ori)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_ori, mean_kurt_scores_ori))\n print (\"For the generated stories ...\")\n _, skew_scores_gen, kurt_scores_gen = eval_modelObj.parsing_score_calculation(df_modelObj, \"generated\")\n mean_skew_scores_gen = statistics.mean(skew_scores_gen) #look at the normalized mean \n mean_kurt_scores_gen = statistics.mean(kurt_scores_gen)\n print (\"The mean skewness is {} and kurtosis is {}\".format(mean_skew_scores_gen, mean_kurt_scores_gen))\n \n if eval_SE:\n print (\"Calculating the Story Prompt Relatedness scores ... \")\n print (\"Calculating the sentence embedding similarity with the prompt...\")\n print (\"For the original stories ...\")\n avg_sentemb_sim_ori = eval_modelObj.word2vec_sentsim(df_modelObj, (\"original\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_ori))\n print (\"For the generated stories ...\")\n avg_sentemb_sim_gen = eval_modelObj.word2vec_sentsim(df_modelObj, (\"generated\", \"prompt\"))\n print (\"The average sentence embedding similarity is {}\".format(avg_sentemb_sim_gen))\n\n if eval_SL:\n print (\"Calculating the average sentence length ...\")\n print (\"For the orginal stories ...\")\n sentlen_list_ori = eval_modelObj.average_sentence_length(df_modelObj, \"original\")\n mean_sentlen_ori = statistics.mean(sentlen_list_ori)\n print (\"The average sentence length is {}\".format(mean_sentlen_ori))\n print (\"For the generated stories ...\")\n sentlen_list_gen = eval_modelObj.average_sentence_length(df_modelObj, \"generated\")\n mean_sentlen_gen = statistics.mean(sentlen_list_gen)\n print (\"The average sentence length is {}\".format(mean_sentlen_gen))\n \n if eval_PF:\n print (\"Calculating the POS tag frequency tag distribution ...\")\n print (\"For the original stories ...\")\n df_modelObj_POS_ori = eval_modelObj.pos_tag_freqdist(df_modelObj, \"original\")\n print (\"The mean POS tag percentages for the original files ...\")\n POS_dict_ori = (df_modelObj_POS_ori.mean(axis=0)).to_dict()\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_ori['NOUN']*100, POS_dict_ori['VERB']*100))\n print (\"For the generated stories ...\")\n df_modelObj_POS_gen = eval_modelObj.pos_tag_freqdist(df_modelObj, \"generated\")\n print (\"The mean POS tag percentages for the generated files ...\")\n POS_dict_gen = df_modelObj_POS_gen.mean(axis=0)\n print (\"NOUN: {} and VERB: {}\".format(POS_dict_gen['NOUN']*100, POS_dict_gen['VERB']*100))\n\n if eval_RW:\n print (\"Calculating the rare word usage metrics ...\")\n print (\"For the generated stories ...\")\n mean_ug_prblst_ori = eval_modelObj.get_rareword_usage(df_modelObj)\n mean_ug_ori = statistics.mean(mean_ug_prblst_ori)\n print (\"The average unigram probability is {}\".format(mean_ug_ori))", "def main():\n\t# GET THE DIALOG CONTENT\n\tpolicyFlag = int(sys.argv[3])\n\tif policyFlag == 0:\n\t\texistedIterNum = 15000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05'\n\telif policyFlag == 1:\n\t\texistedIterNum = 10000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05Retroflex'\n\n\tqLearnOpts = {'gamma': 1.0, \n\t\t\t\t 'alpha': 0.0, \n\t\t\t\t 'epsilon': 0.0}\n\tnumOfTurn = util.returnConvertedIndexListCount('b','cycle_tree')\n\tnumofgauss = 5\n\tvar = 0.0625\n\tlamda = 0.05\n\tunitNum = 101\n\ta = qlearningAgents.FittedQLearningAgent(numOfTurn,numofgauss,var,lamda,unitNum, **qLearnOpts)\t\t\n\ta.openThetaFile(existedThetaFileName,existedIterNum)\n\n\tturnNum = int(sys.argv[1])\n\tuserUnitScore = []\n\tuserUnitScoreVector = sys.argv[2].split(',')\n\tfor i in userUnitScoreVector:\n\t\t\tuserUnitScore.append(float(i)/100.0)\n\n\tstate = State.State(turnNum, userUnitScore)\n\tprint a.getAction(state)", "def main():\n\n region = 'Kanto'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'EastJapan'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'Tohoku'\n year = 2000\n callParallelReducedGAwithP_AVR(region)\n\n region = 'Kansai'\n year = 2000\n callParallelReducedGAwithP_AVR(region)", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)", "def main(args):\n try:\n rec_path = project_path + \"/\" + args.rec\n test_data_path = project_path + \"/\" + args.test\n output_data_path = project_path + \"/\" + args.output\n\n rec = read_csv(rec_path)\n test = read_csv(test_data_path)\n\n accuracy = accuracy_calculator(rec, test)\n # Write to output file\n save_csv(accuracy, output_data_path)\n except Exception as e:\n logger.error(\"Unexpected error occurred when evaluation: \" + str(e))", "def main():\n\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n print(f'Creating GED_Repo for files in {folder}')\n g = GED_Repo([os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')])\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def main():\n\n # Parse arguments. The parser will raise an exception if required arguments are not present.\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='command')\n\n # Arguments for the runtest command.\n cmd_runtest = subparsers.add_parser('runtest')\n runtest_required_named = cmd_runtest.add_argument_group('named arguments')\n runtest_required_named.add_argument('-c', '--corpus',\n help='Corpus root directory containing all speakers.',\n metavar='corpus',\n required=True)\n runtest_required_named.add_argument('-o', '--csvout',\n help='CSV output file.',\n metavar='csvout',\n required=True)\n runtest_required_named.add_argument('-i', '--impl',\n help='Test runner implementation: fast, medium or slow.',\n metavar='impl',\n required=False,\n default='fastest')\n\n # Arguments for the analyse command.\n cmd_analyse = subparsers.add_parser('analyse')\n analyse_required_named = cmd_analyse.add_argument_group('named arguments')\n analyse_required_named.add_argument('-r', '--results',\n help='Input CSV results file.',\n metavar='results',\n required=True)\n analyse_required_named.add_argument('-t', '--th_user',\n help='User-defined threshold.',\n metavar='th_user',\n required=False,\n type=float,\n default=5.79)\n\n # Parse the arguments.\n args = parser.parse_args()\n\n # Dispatch to the correct command.\n if args.command == 'runtest':\n do_runtest(args)\n elif args.command == 'analyse':\n do_analyse(args)\n else:\n raise ValueError('Unknown command {}'.format(args.command))", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def main():\n \n # 1. Learn a decision tree from the data in training.txt\n print \"--Building trees--\"\n train_examples = read_file('training.txt')\n print(train_examples)\n attrs = range(len(train_examples[0])-1)\n rand_tree = decision_tree_learning(train_examples, attrs, use_gain=False)\n gain_tree = decision_tree_learning(train_examples, attrs, use_gain=True)\n print \"--Done building--\"\n print\n\n # 2. Document the tree you got\n print \"--Random tree--\"\n print_tree(rand_tree)\n print\n print \"--Learn tree--\"\n print_tree(gain_tree)\n print\n\n # 3. Classify all examples in the test-set\n test_examples = read_file('test.txt')\n print \"--Testing random tree--\"\n test(rand_tree, test_examples, attrs)\n print\n print \"--Testing information gain tree--\"\n test(gain_tree, test_examples, attrs)\n print \"--Done testings--\"", "def main_ed_parsing(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n\n list = []\n\n first_order_edge = [\n CONFIG.FILTERS.SOBEL_3x3\n ]\n\n for edge in first_order_edge:\n for kernel_gaus in [3, 5, 7, 9]:\n for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:\n for anc_thr in [10, 20, 30, 40, 60]:\n for sc_int in [1, 3, 5]:\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)\n e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,\n max_edges=100, max_points_edge=100)\n list.append(e3 + '_L0')\n\n\n Application.create_config_file()\n Application.configure_save_pictures(ports_to_save=list)\n # Application.configure_show_pictures(ports_to_show=list, time_to_show=0)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/test',\n # raw_image='TestData/BSR/BSDS500/data/images/test',\n # jobs_set=list, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_finder_thr',\n list_of_data=list, number_of_series=25,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n Utils.close_files()", "def main():\n # Return needed Data Frames to analyze\n data_frame, seasons, col, labels, stats, kaggle = load_frames()\n\n # Create the maps now\n create_shot_maps(data_frame,seasons)\n create_scenario_map()\n \n # Create the Plots\n plot_season_graphs(stats)\n plot_pie_charts(kaggle)\n plot_shot_timings(kaggle)\n plot_radar(stats, col, labels)" ]
[ "0.71552646", "0.67432594", "0.6589852", "0.65624803", "0.6458963", "0.6455297", "0.64501965", "0.63569146", "0.6244363", "0.62363696", "0.6217135", "0.6186109", "0.6183862", "0.6182799", "0.6166517", "0.61521643", "0.61434954", "0.6135868", "0.6134632", "0.6118668", "0.6110474", "0.61084354", "0.6090268", "0.6074183", "0.6062296", "0.6058603", "0.6057635", "0.6050766", "0.604348", "0.6039466", "0.60288376", "0.60210675", "0.60183495", "0.60182184", "0.6016163", "0.6015683", "0.6013694", "0.5986131", "0.59686583", "0.59658104", "0.59620464", "0.59609765", "0.59584856", "0.5946587", "0.59416395", "0.59301084", "0.59045124", "0.5894169", "0.5883734", "0.5882775", "0.5880912", "0.58808315", "0.5878351", "0.5868745", "0.5862251", "0.58614945", "0.5860652", "0.58461344", "0.5842246", "0.5832017", "0.58242095", "0.58242005", "0.5817993", "0.58146584", "0.58103484", "0.58080447", "0.58037573", "0.57832193", "0.57563406", "0.57556766", "0.57371", "0.5734638", "0.5723602", "0.57234156", "0.5718415", "0.571725", "0.57147884", "0.57105994", "0.57057256", "0.57028323", "0.5700314", "0.5699899", "0.5694214", "0.5687781", "0.56847066", "0.56788135", "0.56638485", "0.5662086", "0.56585544", "0.565698", "0.56556904", "0.5649233", "0.5645831", "0.563585", "0.56355673", "0.5635024", "0.5634326", "0.56337154", "0.56322676", "0.56289494" ]
0.6480827
4
forward procedure. No need for inputs to be sorted
def forward(self, input_seqs, input_lens, hidden=None): batch_size = input_seqs.size(1) embedded = self.embedding(input_seqs) embedded = embedded.transpose(0, 1) # [B,T,E] sort_idx = np.argsort(-input_lens) unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx))) input_lens = input_lens[sort_idx] sort_idx = cuda_(torch.LongTensor(sort_idx)) embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E] packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens) outputs, hidden = self.gru(packed, hidden) outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs) outputs = outputs[:,:,:self.hidden_size] + outputs[:,:,self.hidden_size:] outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous() hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous() return outputs, hidden
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, *inputs):\n raise NotImplementedError", "def forward(self, x):\n pass", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward_pass(self):", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def forward(self)->None:", "def forward(self, s):", "def step_forward(self):", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, input):\n return input.permute(*self.perm)", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def forward(self, states):\n raise NotImplementedError()", "def forward(self, inputs):\n _, state = self.core(inputs)\n return state", "def base_forward(self, x):\r\n pass", "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x, **kwargs):\n pass", "def feed_forward(self, inputs):\n raise NotImplementedError()", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, obs):\n\t\tpass", "def forward_batch(self,batcher, phase=0):\n pass", "def _forward_impl(self, *inputs, **kwargs):\n raise NotImplementedError('Abstract method.')", "def forward(self, input_):\n if isinstance(input_, list) or isinstance(input_, tuple):\n return self.forward_batched_3d(input_)\n else:\n return self.forward_batched_2d(input_)", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, inputs):\n # inputs -> (B, S)\n x = self.bag(inputs) # x -> (B,)\n out = self.output(x) # out -> (output_size,)\n return out", "def forward(self,input):\n\t\traise RuntimeError(\"All subclasses of Module must implement a forward method\")", "def forward(self, input):\n return mish(input)", "def forward(self, *args, **kwargs):\n pass", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward(self, x):\n return x", "def forward(self, x):\n return self.l1(x)", "def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out", "def forward(self, inp, state_0):\n next_inp = inp.view(1, -1)\n next_state = state_0\n outputs = []\n for i in range(self.seq_len):\n out, next_state = self.model(next_inp, next_state)\n outputs.append(out)\n next_inp = torch.argmax(out, dim=-1)\n\n return torch.cat(outputs, dim=0), next_state", "def forward(self, obs):\n raise NotImplementedError", "def forward(self, output, target):\n raise NotImplementedError", "def forward(self, input, target):\n\n #return self.bce(input_, target)\n return self.bce(input, target)", "def reversesort(self):\n ...", "def forward(self, inputs, outputs):\n if len(inputs) > 1:\n np.copyto(outputs[0], np.sum(inputs, 0))\n else:\n np.copyto(outputs[0], inputs[0])", "def forward(ctx, input):\n ctx.save_for_backward(input) # save input for backward pass\n\n # get lists of odd and even indices\n output = torch.ones_like(input)\n \n return output", "def forward(self, x):\n return self.main(x)", "def _forwardImplementation(self, inbuf, outbuf):\n assert self.module\n \n values = self.module.getActionValues(self.state) \n \n actions = []\n if random() <= self.exploration:\n for i in range(self.shield_options):\n new_action = choice(range(len(values))) \n np.delete(values, new_action)\n actions.append(new_action)\n else:\n for i in range(self.shield_options):\n new_action = where(values == max(values))[0]\n new_action = choice(new_action) \n np.delete(values, new_action)\n actions.append(new_action)\n \n while len(actions) < self.outdim:\n actions.append(-1)\n \n outbuf[:] = actions", "def forward(self, input):\n return input.view(input.size(0), -1)", "def forward(self, input_):\n assert input_.size()[0] % self.pac == 0\n return self.seq(input_.view(-1, self.pacdim))", "def _forwardImplementation(self, inbuf, outbuf):\n assert self.module\n \n values = self.module.getActionValues(self.state) \n n_values = self.n_values.getActionValues(self.state)\n values = map(lambda x, y: x + self.exploration * (sqrt(2 * log(self.experiment.stepid, 2) / y) if y > 0 else 1000), values, n_values);\n \n actions = []\n for i in range(self.shield_options):\n new_action = where(values == max(values))[0]\n new_action = choice(new_action) \n values[new_action] = -10000\n actions.append(new_action)\n \n while len(actions) < self.outdim:\n actions.append(-1)\n \n outbuf[:] = actions", "def forward(self, x):\n #delete all cts\n #self.cts = [self.cts[-1]]\n \n #forward\n length, batch, dim = x.shape\n res = []\n res.append(self.one_step(x[0], torch.zeros((batch, self.latent_size), dtype=torch.float)))\n\n for i in range(1,length):\n res.append(self.one_step(x[i], res[i-1]))\n\n return torch.stack(res)", "def forward(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)", "def forward(self, input_):\n data = self.seq(input_)\n return data", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def _walk_forward(self, step_fn, x, **kwargs):\n for bij in reversed(self._bijectors):\n x = step_fn(bij, x, **kwargs.get(bij.name, {}))\n return x # Now `y`", "def forward(self, x):\n x = self.main(x)\n return x", "def forward(self, x):\n x = self.main(x)\n return x", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self, inputs: Union[torch.Tensor, PackedSequence], hx: torch.Tensor = None) -> \\\n Tuple[torch.Tensor, Union[Tuple[torch.Tensor], torch.Tensor]]:\n\n self._apply_gating_logic()\n\n inputs, packed_sequence_info = _get_inputs_and_packed_sequence_info(inputs, self.batch_first)\n\n # if input is set to batch first, reformat to set timestep as to first dim, followed by batch\n if self.batch_first:\n inputs = inputs.permute(1, 0, 2)\n\n steps = inputs.shape[0]\n batches = inputs.shape[1]\n stacked_hx = []\n output = []\n\n with self._quantize_dequantize_params(inputs) as (quantized_params, _inputs):\n for layer in range(self.num_layers):\n # Quantize the inputs\n quantized_input = self._quantize_activation(self._input_quantizers['input_l{}'.format(layer)], _inputs)\n\n output = []\n reverse_pass_output = []\n for direction in range(self.num_directions):\n permutation = None if not packed_sequence_info else packed_sequence_info.unsorted_indices\n update_initial_hx_encoding_stats, initial_hx = \\\n self._intialize_quantize_hidden_state(batches, _inputs, layer, hx, permutation=permutation)\n cell_hx = initial_hx\n\n param = [quantized_params[p] for p in self._get_param_names(direction, layer)]\n weight_ih, weight_hh, *bias = param\n bias_ih, bias_hh = bias if bias else (None, None)\n\n if direction == 1:\n quantized_input = _get_flipped_input_for_reverse_pass(quantized_input, packed_sequence_info, steps)\n\n for iteration in range(steps):\n\n new_cell_hx = self.rnn_impl_map[self.mode](quantized_input[iteration],\n cell_hx,\n weight_ih,\n weight_hh,\n bias_ih,\n bias_hh)\n\n # Replace rows in the hidden state corresponding to valid inputs in the batch\n cell_hx = _replace_appropriate_hidden_state_rows(cell_hx, new_cell_hx, packed_sequence_info,\n iteration, batches)\n # Quantize the outputs\n cell_hx = self._quantize_hidden_cell_state(layer, cell_hx)\n\n if direction == 0:\n output.append(cell_hx[0] if isinstance(cell_hx, tuple) else cell_hx)\n else:\n if not reverse_pass_output:\n reverse_pass_output = [None] * (steps * batches)\n _fill_appropriate_rows_in_reverse_pass_output(reverse_pass_output,\n packed_sequence_info,\n steps,\n batches,\n iteration,\n cell_hx)\n stacked_hx.append(cell_hx)\n if update_initial_hx_encoding_stats:\n self._update_encoding_stats_with_initial_hidden_state(initial_hx, layer)\n\n if reverse_pass_output:\n _concatenate_output_with_reverse_pass_output(output, reverse_pass_output, self.hidden_size, steps,\n batches, _inputs.device)\n\n # convert a list output tensors to a single tensor\n output = torch.stack(output)\n\n # if configured for more than one layer, the quantized output is fed back as input to next layer\n if self.num_layers > 1:\n _inputs = output\n\n # if input is set to batch first, reformat to set batch back to first dim\n if self.batch_first:\n output = output.permute(1, 0, 2)\n\n output, stacked_hx = _reformat_output_and_stacked_hx_for_packed_sequence(output,\n stacked_hx,\n self.batch_first,\n packed_sequence_info)\n hx = QcQuantizeRecurrent._format_hx_output(stacked_hx)\n\n return output, hx", "def forward(self, input_ids):\n # pdb.set_trace()\n batch_size, binary_max_size, d_model = input_ids.shape\n\n inserted = torch.clone(self.inserted_vector)\n inserted = inserted.expand(batch_size, 1, -1)\n\n batch = torch.cat((inserted, input_ids), dim=1)\n\n batch = batch.permute(1, 0, 2).contiguous()\n # tmp `(binary_max_size + 1, batch_size, d_model)`\n tmp = self.transformer(batch)\n tmp = tmp.permute(1, 0, 2).contiguous()\n\n return self.top_headlayer(tmp[:, 0, :])", "def forward(self, x1, x2, **params):\n raise NotImplementedError()", "def forward(self, input, h0=None):\n batch_size = input.shape[1]\n if h0 is None:\n num_directions = 2 if self.bidirectional else 1\n h0 = Core.zeros((self.num_layers * num_directions,\n batch_size, self.hidden_size),\n dtype=input.dtype, device=input.device)\n self.check_forward_args(input, h0)\n len_seq = input.shape[0]\n input_size = self.input_size\n outputs = []\n hx = []\n input, h0 = self.preprocess_args(input, h0)\n for t in range(len_seq):\n for i in range(self.num_layers):\n w_ih = self.__getattr__(\"weight_ih_l{}1\".format(i))", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_6(inputs=down4,\n layer_size=3)\n\n inter = self.layer_inter(down5)\n\n up1 = self.layer_7(inputs=inter, indices=indices_5, layer_size=3)\n\n up2 = self.layer_8(inputs=up1, indices=indices_4, layer_size=3)\n\n up3 = self.layer_9(inputs=up2, indices=indices_3, layer_size=3)\n\n up4 = self.layer_10(inputs=up3, indices=indices_2, layer_size=2)\n\n up5 = self.layer_11(inputs=up4, indices=indices_1, layer_size=2)\n return up5", "def forward(self, input):\n\n common = self.common_tower(input)\n wdl = self.wdl_head(common)\n policy = self.policy_head(common)\n\n return wdl, policy", "def pre_forward(self, *args, **kwargs):\n self.allocate_parameter()\n return args, kwargs", "def forward_graph(self):\n raise NotImplementedError", "def forward(self, inputs):\n inputs = inputs.transpose(1, 2).unsqueeze(2).contiguous()\n internal_outputs = self.conv_blocks(inputs)\n outputs = []\n for idx in range(self.joint_count):\n outputs.append(self.fc_layer[idx](internal_outputs[:, :, 0, idx]))\n return torch.cat(outputs, 1), internal_outputs", "def forward(self):\n self.position += 1", "def forward(self, shape, *args):\n #TODO\n return None", "def forward(self, x):\n output1, output2 = self.model(x)\n return output1, output2", "def forward(self, input1, input2):\n output1 = self.forward_once(input1)\n output2 = self.forward_once(input2)\n return output1, output2", "def test_forward(self):\n validate_forward()", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def forward(self, x):\n\n out = self.model(x)\n\n return out", "def _pre_forward(\n self,\n handles: List[FlatParamHandle],\n unshard_fn: Optional[Callable],\n module: nn.Module,\n input: Any,\n ):\n self.training_state = TrainingState_.FORWARD\n self._exec_order_data.record_pre_forward(handles, self.training)\n for handle in handles:\n handle._training_state = HandleTrainingState.FORWARD\n if unshard_fn is not None:\n unshard_fn()\n # Register post-backward hooks to reshard the parameters and\n # reduce-scatter their gradients. They must be re-registered every\n # forward pass in case the `grad_fn` is mutated.\n self._register_post_backward_hooks(handles)", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n down6, indices_6, unpool_shape6 = self.layer_6(inputs=down5,\n layer_size=3)\n up5 = self.layer_7(inputs=down6, indices=indices_6,\n output_shape=unpool_shape6, layer_size=3)\n up4 = self.layer_8(inputs=up5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up3 = self.layer_9(inputs=up4, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up2 = self.layer_10(inputs=up3, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up1 = self.layer_11(inputs=up2, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_12(inputs=up1, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n return output", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n return output", "def forward_batch(self,batcher,phase=0):\n mapped_results={}\n inputs=batcher.get_batched_input(mapper=self)\n for type_ in inputs.keys():\n mapper = self.mappers[type_]\n\n mapped_results[type_] = mapper.forward_batch(inputs[type_],phase=0)\n return mapped_results", "def forward(self, inputs):\n x = equiangular_calculator(inputs, self.ratio)\n x = x.permute(0, 3, 1, 2)\n\n if self.return_indices:\n x, indices = F.max_pool2d(x, self.kernel_size, return_indices=self.return_indices)\n else:\n x = F.max_pool2d(x, self.kernel_size)\n x = reformat(x)\n\n if self.return_indices:\n output = x, indices\n else:\n output = x\n\n return output", "def forward(self, inp):\n out = self.features(inp)\n out = out.view(out.size(0), -1) # linearized the output of the module 'features'\n out = self.classifier(out)\n return out", "def forward(self, input_xyz, input_dir):\n\n xyz_ = input_xyz\n for i in range(self.D_1):\n if i in self.skips:\n xyz_ = torch.cat([input_xyz, xyz_], -1)\n xyz_ = getattr(self, f\"xyz_encoding_{i + 1}\")(xyz_)\n\n mid_input=torch.cat([xyz_,input_dir],dim=-1)\n\n for i in range(self.D_2):\n if i==0:\n xyz_=getattr(self, f\"direction_encoding_{i + 1}\")(mid_input)\n else:\n xyz_ = getattr(self, f\"direction_encoding_{i + 1}\")(xyz_)\n\n out=self.visibility(xyz_)\n\n return out", "def forward(self, inputs):\n return np.maximum(0, inputs)", "def forward(self, x):\n out = self.pre_processing(x)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) # reshape the output tensor\n out = self.linear(out)\n\n return out", "def forward(self, xs, Vs = None, keep = 0):\n \n # save inputs\n xs = copy.deepcopy(xs)\n Vs = copy.deepcopy(Vs)\n self.xs = xs\n self.Vs = Vs\n \n # prepare xs and Vs\n # -----------------\n rx_list = []\n for nx,x in enumerate(xs):\n if numpy.isscalar(x):\n x = numpy.asarray([x])\n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n \n if Vs is not None:\n rV_list = [] \n for nV,V in enumerate(Vs):\n V_shp = numpy.shape(V)\n try:\n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nV], V_shp[:-2])\n except:\n raise ValueError('taped independentVariableShapeList = %s\\n but supplied Vs = %s'%(str(self.independentVariableShapeList), str(list(map(numpy.shape, Vs)))))\n rV_list.append(numpy.reshape(V, (numpy.prod(V_shp[:-2]),) + V_shp[-2:]))\n self.V = numpy.ascontiguousarray(numpy.concatenate(rV_list,axis=0))\n \n # run the ADOL-C functions\n # ------------------------\n if Vs is None:\n self.y = wrapped_functions.zos_forward(self.tape_tag, self.x, keep=keep)\n \n else:\n N,P,D = self.V.shape\n if keep == 0:\n self.y,self.W = wrapped_functions.hov_forward(self.tape_tag, self.x, self.V)\n \n elif P == 1:\n Vtmp = self.V.reshape((N,D))\n self.y,Wtmp = wrapped_functions.hos_forward(self.tape_tag, self.x, Vtmp, keep)\n M = Wtmp.shape[0]\n self.W = Wtmp.reshape((M,P,D))\n \n elif P > 1 and keep > 0:\n raise NotImplementedError('ADOL-C doesn\\'t support higher order vector forward with keep!\\n \\\n workaround: several runs forward with P=1')\n \n # prepare outputs\n # ---------------\n self.ys = []\n count = 0\n for ns, s in enumerate(self.dependentVariableShapeList):\n M_ns = numpy.prod(s)\n self.ys.append(self.y[count:count+M_ns].reshape(s))\n count += M_ns\n \n if Vs is not None:\n self.Ws = []\n count = 0\n for ns, s in enumerate(self.dependentVariableShapeList):\n M_ns = numpy.prod(s)\n self.Ws.append(self.W[count:count+M_ns,:,:].reshape(s+(P,D)))\n count += M_ns\n \n # return outputs\n # --------------\n if Vs is None:\n return self.ys\n else:\n return (self.ys, self.Ws)", "def _post_forward(\n self,\n handles: List[FlatParamHandle],\n reshard_fn: Optional[Callable],\n module: nn.Module,\n input: Any,\n output: Any,\n ) -> Any:\n self._exec_order_data.record_post_forward(handles)\n if reshard_fn is not None:\n reshard_fn()\n # Register pre-backward hooks to unshard the flattened parameters\n # for the gradient computation (if needed)\n output = self._register_pre_backward_hooks(output, handles)\n self.training_state = TrainingState_.IDLE\n for handle in handles:\n handle._training_state = HandleTrainingState.IDLE\n return output", "def on_iter_forward(self, runner):\n # unpack features into features and targets\n *features, target = runner.batch\n # Forward features\n runner.output = runner.model(*features)\n # Ensure `targetL` and `outputL` are always in a list format.\n targetL = [target] if not isinstance(target, (list, tuple)) else target\n outputL = [runner.output] if not isinstance(runner.output, (list, tuple)) else runner.output\n # Compute loss\n runner.loss = runner.criterion(*outputL, *targetL)\n runner.target = target", "def forward(cls, linear_out):\n raise Exception(\"Unimplemented\")", "def feedforward(self, input):\n self.ys[0] = input\n for i in range(1, len(self.ws)):\n self.zs[i] = self.ws[i] @ self.ys[i - 1] + self.bs[i]\n self.ys[i] = self.g(self.zs[i])\n return self.ys[-1]", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur" ]
[ "0.7305762", "0.7305762", "0.72925854", "0.71346843", "0.70455575", "0.69174004", "0.6737259", "0.6706367", "0.665543", "0.6645303", "0.6645303", "0.6637352", "0.66037333", "0.6563919", "0.6515652", "0.6515652", "0.6511204", "0.64887595", "0.64721954", "0.6462899", "0.6424968", "0.6403299", "0.6392008", "0.6392008", "0.63863075", "0.63863075", "0.63863075", "0.63668245", "0.6361342", "0.6357749", "0.6357749", "0.62728626", "0.6245518", "0.62423366", "0.6241445", "0.61798096", "0.6158809", "0.61424524", "0.613605", "0.6123887", "0.6114315", "0.6112856", "0.6109077", "0.61043787", "0.6103485", "0.6065727", "0.6012663", "0.60017276", "0.59906065", "0.5970359", "0.59450424", "0.5934527", "0.5922677", "0.59206617", "0.59172654", "0.5911421", "0.5898263", "0.58960253", "0.5891557", "0.58854264", "0.5879639", "0.5875358", "0.58712125", "0.58712125", "0.58320045", "0.5829737", "0.5824499", "0.5824499", "0.5822029", "0.5807168", "0.58051646", "0.57971114", "0.57765454", "0.5775367", "0.5772603", "0.577073", "0.57697576", "0.57574", "0.57559544", "0.5751512", "0.5743881", "0.57432204", "0.57380116", "0.5724735", "0.5724735", "0.5723273", "0.5717771", "0.570694", "0.5706116", "0.56930155", "0.56855255", "0.56830233", "0.56797945", "0.5670657", "0.567015", "0.5663607", "0.56603634", "0.5656362", "0.5655943", "0.56534433", "0.56525254" ]
0.0
-1
Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once.
def installed(cls): try: return cls._installed except AttributeError: name = "mezzanine.pages.middleware.PageMiddleware" installed = middlewares_or_subclasses_installed([name]) setattr(cls, "_installed", installed) return installed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def is_installed(self):\n pass", "def pre_installation(self):\n pass", "def autodiscover():\n from django.utils.importlib import import_module\n global LOADED\n if LOADED:\n return\n LOADED = True\n for app in settings.INSTALLED_APPS:\n try:\n import_module(\"%s.page_widgets\" % app)\n except ImportError, e:\n if \"WidgetModel\" in \"%s\" % e:\n traceback.print_exc(file=sys.stdout)\n pass", "def on_load_middleware():\n\n # protect middleware wrapping: only a single thread proceeds\n global load_middleware_lock # lock gets overwritten as None after init\n if not load_middleware_lock: # already initialized? abort\n return\n mwlock = load_middleware_lock\n mwlock.acquire() # acquire global lock\n if not load_middleware_lock: # check again\n mwlock.release() # abort\n return\n load_middleware_lock = None # mark global as \"init done\"\n\n try:\n # middleware hooks\n from django.conf import settings\n for i in settings.MIDDLEWARE_CLASSES:\n if i.startswith('oboe'):\n continue\n dot = i.rfind('.')\n if dot < 0 or dot+1 == len(i):\n continue\n objname = i[dot+1:]\n imports.whenImported(i[:dot],\n functools.partial(middleware_hooks, objname=objname)) # XXX Not Python2.4-friendly\n\n # ORM\n if oboe.config['inst_enabled']['django_orm']:\n from oboeware import inst_django_orm\n imports.whenImported('django.db.backends', inst_django_orm.wrap)\n\n # templates\n if oboe.config['inst_enabled']['django_templates']:\n from oboeware import inst_django_templates\n import django\n if StrictVersion(django.get_version()) >= StrictVersion('1.3'):\n imports.whenImported('django.template.base', inst_django_templates.wrap)\n else:\n imports.whenImported('django.template', inst_django_templates.wrap)\n\n # load pluggaable instrumentation\n from loader import load_inst_modules\n load_inst_modules()\n\n # it's usually a tuple, but sometimes it's a list\n if type(settings.MIDDLEWARE_CLASSES) is tuple:\n settings.MIDDLEWARE_CLASSES = ('oboeware.djangoware.OboeDjangoMiddleware',) + settings.MIDDLEWARE_CLASSES\n elif type(settings.MIDDLEWARE_CLASSES) is list:\n settings.MIDDLEWARE_CLASSES = ['oboeware.djangoware.OboeDjangoMiddleware'] + settings.MIDDLEWARE_CLASSES\n else:\n print >> sys.stderr, \"Oboe error: thought MIDDLEWARE_CLASSES would be either a tuple or a list, got \" + \\\n str(type(settings.MIDDLEWARE_CLASSES))\n\n finally: # release instrumentation lock\n mwlock.release()\n\n try:\n add_rum_template_tags()\n except Exception, e:\n print >> sys.stderr, \"Oboe error: couldn't add RUM template tags: %s\" % (e,)", "def __bool__(self):\n return self.installed", "def page_setup(self):\n return self.container['page_setup']", "def set_installed(self):\n self._installed = True", "def do_post_install(self, context):\n pass", "def is_installed(cls):\n return find_spec_or_loader(cls.module) is not None", "def on_install(self, request, trigger_context):\n raise NotImplementedError", "def setup_page(self):\n raise NotImplementedError", "def __init__(self, get_response):\n if not settings.PRODUCTION_ENVIRONMENT and not settings.TESTING:\n self.get_response = get_response\n else:\n raise MiddlewareNotUsed()", "def installed(self):\n if self._installed is None:\n self._installed = (self.path is not None)\n return self._installed", "def setup_page(self):\r\n raise NotImplementedError", "def process_request(self, request): # pylint: disable=R0201\n\n error = (\"The Django CAS middleware requires authentication \"\n \"middleware to be installed. Edit your MIDDLEWARE_CLASSES \"\n \"setting to insert 'django.contrib.auth.middleware.\"\n \"AuthenticationMiddleware'.\")\n assert hasattr(request, 'user'), error", "def load_middleware(*args, **kwargs):\n inject_middleware()\n BaseHandler.load_middleware = original_load_middleware\n return original_load_middleware(*args, **kwargs)", "def _install(self):\n\n pass", "def is_installed(self):\n return not self.dont_install", "def _auto_discover(self):\n if self._initialized:\n return\n\n from django.conf import settings\n from django.utils.importlib import import_module\n from django.utils.module_loading import module_has_submodule\n\n self._initialized = True\n for app in settings.INSTALLED_APPS:\n mod = import_module(app)\n # Attempt to import the app's panels module.\n try:\n import_module('%s.panels' % app)\n except:\n # Decide whether to bubble up this error. If the app just\n # doesn't have an panels module, we can ignore the error\n # attempting to import it, otherwise we want it to bubble up.\n if module_has_submodule(mod, 'panels'):\n raise", "def ready(self):\n from django_sites_extensions import models\n from django_sites_extensions import signals", "def __init__(self):\n if not self.is_installed():\n self.install()\n else:\n self.load_users()", "def inject_middleware():\n if 'appmap.django.Middleware' not in settings.MIDDLEWARE:\n settings.MIDDLEWARE.insert(0, 'appmap.django.Middleware')", "def can_load_page(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n expect_loading = False\n if 'expect_loading' in kwargs:\n expect_loading = kwargs['expect_loading']\n del kwargs['expect_loading']\n if expect_loading:\n self._loaded = False\n result = func(self, *args, **kwargs)\n self.wait_for_page_loaded()\n return result\n return func(self, *args, **kwargs)\n\n return wrapper", "def installed(self) -> bool:\n return self._installed", "def middleware(self, *args, **kwargs):\n return super(Blueprint, self).middleware(*args, **kwargs)", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def post_installation(self, exc_value):\n pass", "def page_setup(self, page_setup):\n\n self.container['page_setup'] = page_setup", "def on_registered(self):\r\n super().on_registered()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Cache subtypes\r\n for base_cls in cls.__mro__:\r\n try:\r\n instances = subclass_cache[base_cls]\r\n\r\n except KeyError:\r\n instances = subclass_cache[base_cls] = set()\r\n\r\n instances.add(self)\r\n\r\n # Cache the type\r\n try:\r\n instances = type_cache[cls]\r\n\r\n except KeyError:\r\n instances = type_cache[cls] = set()\r\n\r\n instances.add(self)\r\n\r\n ReplicableRegisteredSignal.invoke(target=self)", "def _populate(self):\n if self.loaded:\n return\n # Note that we want to use the import lock here - the app loading is\n # in many cases initiated implicitly by importing, and thus it is\n # possible to end up in deadlock when one thread initiates loading\n # without holding the importer lock and another thread then tries to\n # import something which also launches the app loading. For details of\n # this situation see #18251.\n imp.acquire_lock()\n try:\n if self.loaded:\n return\n for app_name in settings.INSTALLED_APPS:\n if app_name in self.handled:\n continue\n self.load_app(app_name, True)\n if not self.nesting_level:\n for app_name in self.postponed:\n self.load_app(app_name)\n self.loaded = True\n finally:\n imp.release_lock()", "def pre_install(self, installable_pkgs):\n pass", "def request_plugins(self):", "def install_render_backend(self, backend, version):\n # FIXME That not only doesn't work well, it doesn't work at all!\n for plugin in RenderManager.__subclasses__(): \n self.render_backends[plugin.__name__] = plugin\n self.logger.debug('Registered backends: %s ' % self.render_backends)\n\n # If reqested backend was found in correct version make it the manager:\n if not backend in self.render_backends.keys():\n return False\n else:\n if version:\n if self.render_backends[backend].version == version:\n self.manager = self.render_backends[backend]()\n else:\n # No backend with requested version found.\n return False\n else:\n self.manager = self.render_backends[backend]()\n return True", "def test_dependencies_installed(self):\n installer = getattr(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('plone.app.dexterity'))", "def install(self):\n if self.installed:\n return\n if not self.installable:\n raise self.unsure_how_to_install()\n logger.notice(\"Installing '%s'...\", self.name)\n # Call the subclass implementation\n self._install()\n # Make sure it actually performed as promised\n if not self.path:\n raise HelperNotFoundError(\n 1,\n \"Installation did not raise an exception, but afterward, \"\n \"unable to locate {0}!\".format(self.name))\n\n logger.notice(\"Successfully installed '%s'\", self.name)", "def activate(self):\n not_in_path = self._path_entry not in sys.path\n if not_in_path:\n sys.path.append(self._path_entry)\n\n try:\n with self.cache:\n yield self.inject(Pipeline)\n finally:\n if not_in_path:\n sys.path.remove(self._path_entry)\n\n imported_modules = [\n name\n for name, module in sys.modules.items()\n if (filename := getattr(module, \"__file__\", None))\n and filename.startswith(self._path_entry)\n ]\n\n for name in imported_modules:\n del sys.modules[name]", "def on_homepage(self):\n # While it's easy enough to just query the default manager\n # to do this, providing this convenience method abstracts\n # away the way homepage items are designated in case\n # we change the way that designation is done.\n return self.filter(on_homepage=True)", "def post_setup(self, context):\n pass", "def DebugMenuProviderMixin_on_setup(self):\n pass", "def load_site_if_needed(self):\n self.site.reload_if_needed()", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]", "def pre_install_pkg(self, installable_pkg):\n pass", "def post_install(self, installable_pkgs):\n pass", "def install(self):\n raise NotImplementedError", "def _check_required_if_provider(self):\n return", "def dm_setup(self):\n dispatcher.connect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n self._was_setup_called = True", "def deferred_class_link_app(mounted, model, variables):\n return None", "def before_request():\r\n\r\n\tinit_classes()", "def enable(cls):\r\n cls.disable()\r\n sys.meta_path.insert(0, cls())", "def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))", "def prePresent(self, request):", "def setup(self):\n\n if self.has_setup():\n self.logger.info(\"%s has a pre-flight setup routine. Running now.\" % self.plugin_dict_name)\n self._module.setup(self, self.versions)", "def ensure_autodiscover():\n if not (form_element_plugin_registry._registry\n and form_handler_plugin_registry._registry\n and theme_registry._registry):\n autodiscover()", "def app_cache_ready(self):\n return self.loaded", "def require_add(next=None, internal=None, on_install=None):\n def decorator(view):\n def newview(request, *args, **kwargs):\n next = newview.next\n internal = newview.internal\n\n try:\n fb = request.facebook\n except:\n raise ImproperlyConfigured('Make sure you have the Facebook middleware installed.')\n\n if internal is None:\n internal = request.facebook.internal\n\n if callable(next):\n next = next(request.path)\n elif isinstance(next, int):\n next = '/'.join(request.path.split('/')[next + 1:])\n elif next is None and fb.callback_path and request.path.startswith(fb.callback_path):\n next = request.path[len(fb.callback_path):]\n else:\n next = ''\n\n if not fb.check_session(request):\n if fb.added:\n if request.method == 'GET' and fb.app_name:\n return fb.redirect('%s%s' % (fb.get_app_url(), next))\n return fb.redirect(fb.get_login_url(next=next))\n else:\n return fb.redirect(fb.get_add_url(next=next))\n\n if not fb.added:\n return fb.redirect(fb.get_add_url(next=next))\n\n if 'installed' in request.GET and callable(on_install):\n on_install(request)\n\n if internal and request.method == 'GET' and fb.app_name:\n return fb.redirect('%s%s' % (fb.get_app_url(), next))\n\n return view(request, *args, **kwargs)\n newview.next = next\n newview.internal = internal\n return newview\n return decorator", "def installed(self, value: bool) -> None:\n self._installed = bool(value)", "def resolution_guard(self):\n raise NotImplementedError\n # TODO: implement resolution guard.", "def test_install_plugin_again_is_ok(self):\n raise NotImplementedError()", "def is_installed(self, shutit_module_obj):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t# Cache first\n\t\tif shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_installed:\n\t\t\treturn True\n\t\tif shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_not_installed:\n\t\t\treturn False\n\t\t# Is it installed?\n\t\tif shutit_module_obj.is_installed(self):\n\t\t\tself.get_current_shutit_pexpect_session_environment().modules_installed.append(shutit_module_obj.module_id)\n\t\t\treturn True\n\t\t# If not installed, and not in cache, add it.\n\t\telse:\n\t\t\tif shutit_module_obj.module_id not in self.get_current_shutit_pexpect_session_environment().modules_not_installed:\n\t\t\t\tself.get_current_shutit_pexpect_session_environment().modules_not_installed.append(shutit_module_obj.module_id)\n\t\t\treturn False\n\t\treturn False", "def _installed_apps_add(self):\n config.add_plugin(self.module_path)", "def _require_post_load_hook(action, *args, **kwargs):\n if action == 'back': # do nothing on a 'back'\n return\n \n from twill import commands\n OUT=commands.OUT\n\n global ignore_once\n global ignore_always\n \n if ignore_once or ignore_always:\n ignore_once = False\n return\n \n for what in _requirements:\n\n ####\n ####\n ####\n \n if what == 'success':\n if DEBUG:\n print('REQUIRING success', file=OUT)\n commands.code(200)\n \n ####\n ####\n ####\n \n elif what == 'links_ok':\n from .check_links import check_links\n \n ignore_always = True\n if DEBUG:\n print('REQUIRING functioning links', file=OUT)\n print('(already visited:)', file=OUT)\n print(\"\\n\\t\".join(list(links_visited.keys())))\n \n try:\n check_links(visited=links_visited)\n finally:\n ignore_always = False", "def published(self, for_user=None, include_login_required=False):\n published = super(PageManager, self).published(for_user=for_user)\n unauthenticated = for_user and not is_authenticated(for_user)\n if (\n unauthenticated\n and not include_login_required\n and not settings.PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED\n ):\n published = published.exclude(login_required=True)\n return published", "def __call__(self, environ, start_response):\n middleware = Middleware(environ, start_response)\n middleware.application = self\n return middleware", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def page_load(self):\n return self._page_load", "def force_load(self):\n pass", "def ready(self):\n if settings.PLUGINS_ENABLED:\n if not canAppAccessDatabase(allow_test=True):\n logger.info(\"Skipping plugin loading sequence\") # pragma: no cover\n else:\n logger.info('Loading InvenTree plugins')\n\n if not registry.is_loading:\n # this is the first startup\n try:\n from common.models import InvenTreeSetting\n if InvenTreeSetting.get_setting('PLUGIN_ON_STARTUP', create=False, cache=False):\n # make sure all plugins are installed\n registry.install_plugin_file()\n except Exception: # pragma: no cover\n pass\n\n # get plugins and init them\n registry.plugin_modules = registry.collect_plugins()\n registry.load_plugins()\n\n # drop out of maintenance\n # makes sure we did not have an error in reloading and maintenance is still active\n set_maintenance_mode(False)\n\n # check git version\n registry.git_is_modern = check_git_version()\n if not registry.git_is_modern: # pragma: no cover # simulating old git seems not worth it for coverage\n log_error(_('Your enviroment has an outdated git version. This prevents InvenTree from loading plugin details.'), 'load')\n\n else:\n logger.info(\"Plugins not enabled - skipping loading sequence\") # pragma: no cover", "def pre_deploy(self) -> Any:\n raise NotImplementedError", "def _is_installed(self):\n return self._system.is_library(os.path.join(self.get_install_path(), \"lib/libG4event\")) or \\\n self._system.is_library(os.path.join(self.get_install_path(), \"lib64/libG4event\"))", "def middleware(self, environ, start_response):\n app = self.app\n self.register(app.config['CHANNEL_SERVER'], 'reload', app.reload)", "def enable(self):\n self._installed_apps_add()", "def _add_extensions(self):\n ext_cache_down = 'cache_downloading'\n ext_cache_up = 'cache_uploading'\n cmd_args = self.task_data.get('cmd_args', {})\n if not isinstance(cmd_args, dict):\n cmd_args = {}\n if cmd_args.get('save_raw_pages', False):\n self.required_signals[SIGNAL_SPIDER_OPENED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_up]\n if cmd_args.get('load_raw_pages'):\n self.required_signals[SIGNAL_SCRIPT_CLOSED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_down]", "def test_client_load_pages_request(self):\n is_present = hasattr(self.httpbin_3, 'test_requests_patch_method')\n\n self.assertTrue(is_present)", "def context_processors(self):\n return [\n 'leonardo.module.web.processors.page.add_page_if_missing',\n 'leonardo.module.web.processors.config.leonardo',\n 'leonardo.module.web.processors.font.webfont_cookie',\n ]", "def setup_method(self) -> None:\n super(TestProfiles, self).setup_method()\n if os.getenv('SETUP_METHOD') is None:\n self.custom.setup_method(self)", "def on_load(self):\n pass", "def on_load(self):\n pass", "def _setup(self):\n raise NotImplementedError()", "def addSiteManager(self):\n if zope.component.interfaces.ISite.providedBy(self.context):\n raise UserError(_('This is already a site'))\n\n # We don't want to store security proxies (we can't,\n # actually), so we have to remove proxies here before passing\n # the context to the SiteManager.\n bare = removeSecurityProxy(self.context)\n sm = LocalSiteManager(bare)\n self.context.setSiteManager(sm)\n self.request.response.redirect(\n \"++etc++site/@@SelectedManagementView.html\")", "def Setup(self):\n raise NotImplementedError(\n 'No runtime setup defined for %s' % self.__class__.__name__)", "def _install_linecache_wrapper(self):\n if not hasattr(self, \"_orig_linecache_checkcache\"):\n import linecache\n\n # Save it, although not used really (can be useful for debugging).\n self._orig_linecache_checkcache = linecache.checkcache\n\n def _linecache_checkcache(*args, **kwargs):\n return\n\n linecache.checkcache = _linecache_checkcache", "def dispatch(self, request, *args, **kwargs):\n\n if self.page['page_type'] != self.page_type:\n raise Http404\n return super().dispatch(request, *args, **kwargs)", "def post_install_pkg(self, installable_pkg):\n pass", "def add(self, middleware):\n pass # pragma: no cover", "def install_type(self):\n\n return self._install_type", "def test_middleware_is_included(self):\n self.client.get(reverse('contact:home'))\n last_middleware_obj = self.request_store.objects.last()\n self.assertEqual(last_middleware_obj.method, 'GET')\n self.assertEqual(last_middleware_obj.path, reverse('contact:home'))", "def template_library_installed(self) -> Optional[bool]:\n return pulumi.get(self, \"template_library_installed\")", "def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()", "def not_installed(self) -> bool:\n return pulumi.get(self, \"not_installed\")", "def meta_available(self):\n return self._meta_available", "def prepare(self):\n return HandlerReady()", "def _assert_backend_available(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.is_available():\n raise RuntimeError(\n \"Backend '{}' is not available\".format(self))\n return method(self, *args, **kwargs)\n return wrapper", "def requires_parent_plugin(cls, slot, page):\n return False", "def on_setup(self, request, trigger_context):\n raise NotImplementedError", "def test_installed_apps(self):\n self.assertIn(__package__, settings.INSTALLED_APPS)", "def post_setup(cls):\n super().post_setup()\n\n # The SENTRY_DSN setting should be available to activate sentry for an environment\n if cls.SENTRY_DSN is not None:\n sentry_sdk.init( # pylint: disable=abstract-class-instantiated\n dsn=cls.SENTRY_DSN,\n environment=cls._get_environment(),\n release=get_release(),\n integrations=[DjangoIntegration()],\n )\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra(\"application\", \"backend\")", "def __virtual__():\n if get_configured_provider() is False:\n return False\n if _get_dependencies() is False:\n return False\n\n global cache # pylint: disable=global-statement,invalid-name\n cache = salt.cache.Cache(__opts__)\n\n return __virtualname__", "def __init__(self):\n self.setup_called = False", "def installation(request):\n return render(request, 'ecosystem/installation.html',\n {'page': 'installation', 'category': 'publish'})" ]
[ "0.5708004", "0.5457926", "0.53875947", "0.5337035", "0.53081757", "0.5224973", "0.52124566", "0.5208954", "0.51006395", "0.5072566", "0.5057796", "0.5034346", "0.5021708", "0.50119513", "0.5004418", "0.5003487", "0.4994785", "0.49845058", "0.49206704", "0.49062628", "0.49048826", "0.48873", "0.4885075", "0.48722622", "0.48634034", "0.4862464", "0.4837229", "0.4826915", "0.478637", "0.47698748", "0.47665992", "0.47642958", "0.4763595", "0.4752189", "0.4750586", "0.47460246", "0.47441274", "0.4712507", "0.4704924", "0.47021627", "0.46923104", "0.4684152", "0.464953", "0.4639022", "0.46323425", "0.4616639", "0.46020898", "0.46006408", "0.45963708", "0.4595766", "0.45952532", "0.45922777", "0.45921388", "0.45798916", "0.45784453", "0.45682645", "0.45623726", "0.45574978", "0.45485458", "0.4548195", "0.45451424", "0.452306", "0.4521929", "0.45129833", "0.45110473", "0.45100793", "0.44970536", "0.44950897", "0.44943717", "0.44908524", "0.4478369", "0.44716036", "0.44623783", "0.44542468", "0.44524848", "0.44424", "0.44333643", "0.44333643", "0.44314736", "0.44302353", "0.44226024", "0.44116807", "0.44062242", "0.43960193", "0.43925005", "0.43865412", "0.43860045", "0.4377376", "0.43752366", "0.43642187", "0.43632916", "0.43495008", "0.43413988", "0.43396047", "0.4335611", "0.43329972", "0.43305004", "0.43196905", "0.4311732", "0.4310956" ]
0.8444131
0
Perrequest mechanics for the current page object.
def process_view(self, request, view_func, view_args, view_kwargs): # Load the closest matching page by slug, and assign it to the # request object. If none found, skip all further processing. slug = path_to_slug(request.path_info) pages = Page.objects.with_ascendants_for_slug( slug, for_user=request.user, include_login_required=True ) if pages: page = pages[0] setattr(request, "page", page) context_processors.page(request) else: return # Handle ``page.login_required``. if page.login_required and not is_authenticated(request.user): return redirect_to_login(request.get_full_path()) # If the view isn't Mezzanine's page view, try to return the result # immediately. In the case of a 404 with an URL slug that matches a # page exactly, swallow the exception and try Mezzanine's page view. # # This allows us to set up pages with URLs that also match non-page # urlpatterns. For example, a page could be created with the URL # /blog/about/, which would match the blog urlpattern, and assuming # there wasn't a blog post with the slug "about", would raise a 404 # and subsequently be rendered by Mezzanine's page view. if view_func != page_view: try: return view_func(request, *view_args, **view_kwargs) except Http404: if page.slug != slug: raise # Run page processors. extra_context = {} if request.resolver_match: extra_context = request.resolver_match.kwargs.get("extra_context", {}) model_processors = page_processors.processors[page.content_model] slug_processors = page_processors.processors["slug:%s" % page.slug] for (processor, exact_page) in slug_processors + model_processors: if exact_page and not page.is_current: continue processor_response = processor(request, page) if isinstance(processor_response, HttpResponse): return processor_response elif processor_response: try: for k, v in processor_response.items(): if k not in extra_context: extra_context[k] = v except (TypeError, ValueError): name = "%s.%s" % (processor.__module__, processor.__name__) error = ( "The page processor %s returned %s but must " "return HttpResponse or dict." % (name, type(processor_response)) ) raise ValueError(error) return page_view(request, slug, extra_context=extra_context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r", "def __call__(self):\n self.page1() # GET supercars.do (requests 101-111)\n\n grinder.sleep(2117)\n self.page2() # GET cars.do (requests 201-202)\n\n grinder.sleep(1867)\n self.page3() # GET car.do (request 301)\n\n grinder.sleep(4351)\n self.page4() # GET enquire.do (requests 401-402)\n\n grinder.sleep(16341)\n self.page5() # POST enquire.do (request 501)\n\n grinder.sleep(1309)\n self.page6() # GET supercars.do (request 601)\n\n grinder.sleep(669)\n self.page7() # GET cars.do (requests 701-702)\n\n grinder.sleep(1260)\n self.page8() # GET car.do (request 801)\n\n grinder.sleep(837)\n self.page9() # GET car.do (request 901)\n\n grinder.sleep(1108)\n self.page10() # GET search.do (request 1001)\n\n grinder.sleep(3146)\n self.page11() # POST search.do (requests 1101-1102)\n\n grinder.sleep(2822)\n self.page12() # POST search.do (request 1201)\n\n grinder.sleep(1333)\n self.page13() # GET sell.do (request 1301)\n\n grinder.sleep(17417)\n self.page14() # POST sell.do (request 1401)\n\n grinder.sleep(6680)\n self.page15() # GET insurance.do (request 1501)\n\n grinder.sleep(600)\n self.page16() # GET about.do (requests 1601-1602)\n\n grinder.sleep(584)\n self.page17() # GET supercars.do (request 1701)\n\n grinder.sleep(1049)\n self.page18() # GET cars.do (requests 1801-1802)\n\n grinder.sleep(2901)\n self.page19() # GET car.do (request 1901)\n\n grinder.sleep(1441)\n self.page20() # GET car.do (request 2001)\n\n grinder.sleep(791)\n self.page21() # GET supercars.do (request 2101)\n\n grinder.sleep(1365)\n self.page22() # GET cars.do (request 2201)\n\n grinder.sleep(1067)\n self.page23() # GET supercars.do (request 2301)\n\n grinder.sleep(1284)\n self.page24() # GET cars.do (request 2401)\n\n grinder.sleep(879)\n self.page25() # GET supercars.do (request 2501)\n\n grinder.sleep(1066)\n self.page26() # GET cars.do (request 2601)\n\n grinder.sleep(974)\n self.page27() # GET supercars.do (request 2701)", "def _DoPageProcessing(self, mr, nonce):\n with mr.profiler.Phase('common request data'):\n self._DoCommonRequestProcessing(self.request, mr)\n self._MaybeRedirectToBrandedDomain(self.request, mr.project_name)\n page_data = self.GatherBaseData(mr, nonce)\n\n with mr.profiler.Phase('page processing'):\n page_data.update(self.GatherPageData(mr))\n page_data.update(mr.form_overrides)\n template_helpers.ExpandLabels(page_data)\n self._RecordVisitTime(mr)\n\n return page_data", "def __call__(self):\n self.page1() # GET web (request 101)\n\n grinder.sleep(1000)\n self.page2() # GET web (request 201)\n\n grinder.sleep(1000)\n self.page3() # GET web (request 301)\n\n grinder.sleep(1000)\n self.page4() # GET web (request 401)\n\n grinder.sleep(1000)\n self.page5() # GET web (request 501)\n\n grinder.sleep(1000)\n self.page6() # GET web (request 601)\n\n grinder.sleep(1000)\n self.page7() # GET web (request 701)\n\n grinder.sleep(1000)\n self.page8() # GET web (request 801)\n\n grinder.sleep(1000)\n self.page9() # GET web (request 901)\n\n grinder.sleep(1000)\n self.page10() # GET web (request 1001)\n\n grinder.sleep(1000)\n self.page11() # GET web (request 1101)\n\n grinder.sleep(1000)\n self.page12() # GET web (request 1201)\n\n grinder.sleep(1000)\n self.page13() # GET web (request 1301)\n\n grinder.sleep(1000)\n self.page14() # GET web (request 1401)\n\n grinder.sleep(1000)\n self.page15() # GET web (request 1501)\n\n grinder.sleep(1000)\n self.page16() # GET web (request 1601)\n\n grinder.sleep(1000)\n self.page17() # GET web (request 1701)\n\n grinder.sleep(1000)\n self.page18() # GET web (request 1801)\n\n grinder.sleep(1000)\n self.page19() # GET web (request 1901)\n\n grinder.sleep(1000)\n self.page20() # GET web (request 2001)\n\n grinder.sleep(1000)\n self.page21() # GET web (request 2101)\n\n grinder.sleep(1000)\n self.page22() # GET web (request 2201)\n\n grinder.sleep(1000)\n self.page23() # GET web (request 2301)\n\n grinder.sleep(1000)\n self.page24() # GET web (request 2401)\n\n grinder.sleep(1000)\n self.page25() # GET web (request 2501)\n\n grinder.sleep(1000)\n self.page26() # GET web (request 2601)\n\n grinder.sleep(1000)\n self.page27() # GET web (request 2701)\n\n grinder.sleep(1000)\n self.page28() # GET web (request 2801)\n\n grinder.sleep(1000)\n self.page29() # GET web (request 2901)\n\n grinder.sleep(1000)\n self.page30() # GET web (request 3001)\n\n grinder.sleep(1000)\n self.page31() # GET web (request 3101)\n\n# grinder.sleep(1000)\n# self.page32() # POST downloads (request 3201)\n\n# grinder.sleep(1000)\n# self.page33() # GET goog-malware-shavar_s_10501-10520.10501.10502-10520: (request 3301)\n\n grinder.sleep(1000)\n self.page34() # GET web (request 3401)\n\n grinder.sleep(1000)\n self.page35() # GET web (request 3501)\n# self.page36() # GET goog-malware-shavar_a_9606-9610.9606-9609.9610: (request 3601)\n\n# grinder.sleep(1000)\n# self.page37() # GET goog-phish-shavar_s_36981-36985.36981-36985.: (request 3701)\n\n# grinder.sleep(1000)\n# self.page38() # GET goog-phish-shavar_s_36986-36990.36986-36987.36988-36990: (request 3801)\n\n# grinder.sleep(1000)\n# self.page39() # GET goog-phish-shavar_a_46491-46500.46491-46499.46500: (request 3901)\n\n grinder.sleep(1000)\n self.page40() # GET web (request 4001)\n\n grinder.sleep(1000)\n self.page41() # GET web (request 4101)\n\n grinder.sleep(1000)\n self.page42() # GET web (request 4201)\n\n grinder.sleep(1000)\n self.page43() # GET web (request 4301)\n\n grinder.sleep(1000)\n self.page44() # GET web (request 4401)\n\n grinder.sleep(1000)\n self.page45() # GET web (request 4501)\n\n grinder.sleep(1000)\n self.page46() # GET web (request 4601)\n\n grinder.sleep(1000)\n self.page47() # GET web (request 4701)\n\n grinder.sleep(1000)\n self.page48() # GET web (request 4801)\n\n grinder.sleep(1000)\n self.page49() # GET web (request 4901)\n\n grinder.sleep(1000)\n self.page50() # GET web (request 5001)\n\n grinder.sleep(1000)\n self.page51() # GET web (request 5101)\n\n grinder.sleep(1000)\n self.page52() # GET web (request 5201)\n\n grinder.sleep(1000)\n self.page53() # GET web (request 5301)", "def access_page(self, page_number):\n pass", "def request_access(self):\n pass", "def setup_page(self):\r\n raise NotImplementedError", "def process(self, request):\n pass", "async def _add_page_settings(self, page: Page) -> None:\r\n # launch options for this page.\r\n launch_options = self.browsers[page.browser]['launch_options']\r\n # set the default maximum navigation time.\r\n if 'defaultNavigationTimeout' in launch_options:\r\n page.setDefaultNavigationTimeout(\r\n launch_options['defaultNavigationTimeout'])\r\n tasks = [self.set_stealth(page)]\r\n # blocks URLs from loading.\r\n if 'blockedURLs' in launch_options:\r\n tasks.append(self.set_blocked_urls(\r\n page, launch_options['blockedURLs']))\r\n # disable cache for each request.\r\n if 'setCacheEnabled' in launch_options:\r\n tasks.append(page.setCacheEnabled(\r\n launch_options['setCacheEnabled']))\r\n # add a JavaScript function(s) that will be invoked whenever the page is navigated.\r\n for script in launch_options.get('evaluateOnNewDocument', []):\r\n tasks.append(page.evaluateOnNewDocument(script))\r\n # intercept all request and only allow requests for types not in request_abort_types.\r\n request_abort_types = launch_options.get('requestAbortTypes')\r\n if request_abort_types:\r\n # enable request interception.\r\n tasks.append(page.setRequestInterception(True))\r\n\r\n async def block_type(request: Request):\r\n # condition(s) where requests should be aborted.\r\n if request.resourceType in request_abort_types:\r\n await request.abort()\r\n elif launch_options.get('blockRedirects', False) and request.isNavigationRequest() and len(request.redirectChain):\r\n await request.abort()\r\n else:\r\n await request.continue_()\r\n\r\n page.on('request',\r\n lambda request: asyncio.create_task(block_type(request)))\r\n await asyncio.gather(*tasks)", "def page_main():\n \n cur_page = request.form['page']\n session['page'] = utils.id_to_page(cur_page,g.graph)\n return render_template('do_action.html')", "def setup_page(self):\n raise NotImplementedError", "def work(self, request):\n raise NotImplementedError", "def teacher_forum_landing_page(request, page):\n # Deleting admin-typed user session\n # Deleting programmer-typed-user session\n\n # Get the current users\n current_basic_user = get_current_user(request, User, ObjectDoesNotExist)\n\n current_basic_user_profile = get_current_user_profile(\n request,\n User,\n BasicUserProfile,\n ObjectDoesNotExist\n )\n\n # Getting the teacher profile\n current_teacher_profile = get_current_teacher_user_profile(\n request,\n User,\n TeacherUserProfile,\n ObjectDoesNotExist\n )\n\n # getting the page posts\n # At every page there will be 45 entries so always multiply it by that and\n # then reduce your objects\n current_page = page\n previous_page = page-1\n next_page = page+1\n\n post_records_starting_point = current_page * 46\n post_records_ending_point = post_records_starting_point + 46\n\n try:\n current_page_posts = TeacherForumPost.objects.all().order_by(\"-id\")[post_records_starting_point:post_records_ending_point]\n except ObjectDoesNotExist:\n current_page_posts = None\n\n # getting posts comment count\n page_comments = {}\n for post in current_page_posts:\n comments = TeacherForumComment.objects.filter(post=post)\n comment_count = 0\n for comment in comments:\n comment_count += 1\n page_comments[post.id] = comment_count\n\n # post upvote form processing\n if request.POST.get(\"teacher_forum_landing_upvote_submit\"):\n hidden_post_id = request.POST.get(\"hidden_post_id\")\n post = TeacherForumPost.objects.get(id=hidden_post_id)\n # upvote the post\n post.karma += 1\n post.save()\n return HttpResponseRedirect(\"/teacher/forum/read/\"+str(post.id)+\"/\")\n\n data = {\n \"current_basic_user\": current_basic_user,\n \"current_basic_user_profile\": current_basic_user_profile,\n \"current_teacher_profile\": current_teacher_profile,\n \"current_page_posts\": current_page_posts,\n \"current_page\": current_page,\n \"previous_page\": previous_page,\n \"next_page\": next_page,\n \"current_page_posts\": current_page_posts,\n \"page_comments\": page_comments,\n }\n\n if \"teacher_user_logged_in\" in request.session:\n return render(request, \"teacher_forum/landing_page.html\", data)\n else:\n return HttpResponseRedirect(\"/\")", "def _verify_page(self):", "def permission_request_processor(page, request):\n if not has_permission_to_view(page, request.user):\n raise PermissionDenied", "def supply(request, page_name):\n\n _ = page_name\n return get_quests_from_cache(request.user)", "def __call__(request):", "def index_html(self,REQUEST): \n return self.posting_html(self,REQUEST)", "def identify_and_parse_page(self, response):\n if self.initial_page_filter(response):\n if self.is_index_page(url=response.url, response=response):\n self.process_index_page(response)\n elif self.is_captcha_page(response.url, response):\n self.process_captcha(response)\n elif self.is_results_page(response.url, response):\n items = self.process_question_answer_page(response)\n if self.duplicate_url:\n yield Request(url=self.duplicate_url, callback=self.identify_and_parse_page)\n self.duplicate_url = None\n for item in items:\n yield item\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))", "def process(self):\n data = getattr(self, self.request.method.lower())()\n data, pagination = self.paginate(data)\n return data, pagination", "def perform_scraping(current_session):\r\n\r\n # List Array storing all relevant decision information\r\n final_data_fetch = []\r\n pagination_index = global_constants['NUMBER_PAGE_TO_SCRAPE_FIRST']\r\n while pagination_index < global_constants['NUMBER_PAGE_TO_SCRAPE_LAST']:\r\n print(\"Page:\", pagination_index, \" Collected records:\", len(final_data_fetch))\r\n\r\n # Get relevant admit-reject page based on pagination value\r\n result = current_session.get(global_constants['ALL_RESULTS_URL'] + str(pagination_index),\r\n headers=dict(referer=global_constants['ALL_RESULTS_URL']))\r\n tree = lxml_html.fromstring(result.content)\r\n\r\n # Get Nodes containing individual decisions for each page(approx 20 per page)\r\n decision_buckets = tree.xpath('//*[@class=\"row\"]/div[@class=\"col-sm-6\"]/div[@class=\"panel panel-warning\"]/div[@class=\"panel-body\"]')\r\n\r\n # If decision buckets are empty, captcha page has been encountered\r\n if len(decision_buckets) == 0:\r\n print(\"Captcha Time\")\r\n time.sleep(120)\r\n continue\r\n\r\n for individual_decision_bucket in decision_buckets:\r\n\r\n current_admit_status = ((individual_decision_bucket.xpath('./div[1]/div[2]/label'))[0]).text.strip()\r\n\r\n # Fetch results only if ADMIT or REJECT\r\n if current_admit_status.lower() == 'admit' or current_admit_status.lower() == 'reject':\r\n\r\n # Get relevant information from html page returned in response\r\n current_bucket_university_course = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/small'))[0]).text.replace(\"\\n\",\"\").strip()\r\n current_gre = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[1]'))[0]).getchildren())[1]).tail)\r\n current_toefl = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[2]'))[0]).getchildren())[1]).tail)\r\n current_gpa = get_gpa(((((individual_decision_bucket.xpath('./div[2]/div[3]'))[0]).getchildren())[1]).tail)\r\n current_workex = get_workex_months(((((individual_decision_bucket.xpath('./div[2]/div[4]'))[0]).getchildren())[1]).tail)\r\n\r\n current_university, current_course = split_bucket_university_course(current_bucket_university_course.lower())\r\n # Append decision information to final bucket only if minimum criteria met\r\n if current_university is not None and filter_criteria_met(current_gre, current_gpa, current_toefl):\r\n\r\n # Get UG College from profile of user\r\n profile_page_path = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/a'))[0]).attrib['href']\r\n profile_result = current_session.get(global_constants['HOME_PAGE'] + profile_page_path,\r\n headers=dict(referer=global_constants['PAST_RESULTS_URL']))\r\n profile_tree = lxml_html.fromstring(profile_result.content)\r\n ug_details_bucket = (profile_tree.xpath('//div[@class=\"col-sm-12 card\"][1]'))\r\n if len(ug_details_bucket) >= 1:\r\n ug_details_bucket = ug_details_bucket[0]\r\n current_ug_course = ((ug_details_bucket.xpath('./div[1]/div[7]/p[1]/b[1]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n current_ug_college = ((ug_details_bucket.xpath('./div[1]/div[7]/p[2]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n\r\n final_data_fetch.append([current_course, current_university, current_gpa, current_gre, current_toefl,\r\n current_workex, current_ug_course, current_ug_college, current_admit_status])\r\n\r\n # Add sleep time to allow for web scraping in undetected manner\r\n sleep_delay = random.choice([0, 1, 2, 3])\r\n time.sleep(sleep_delay)\r\n pagination_index += 1\r\n\r\n # Export final_data to excel sheet\r\n export_to_file(final_data_fetch)", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def page1(self):\r\n self.token_cmd = \\\r\n '10RCT000612'\r\n \r\n # Expecting 302 'Moved Temporarily'\r\n result = request101.GET('/sustain/cms/user/navigation' +\r\n '?cmd=' +\r\n self.token_cmd)\r\n tp.process_result()\r\n self.token_caseNumber = \\\r\n httpUtilities.valueFromLocationURI('caseNumber') # '10RCT000612'\r\n\r\n request102.GET('/sustain/cms/case' +\r\n '?caseNumber=' +\r\n self.token_caseNumber)\r\n tp.process_result()\r\n self.token_dispatch = \\\r\n httpUtilities.valueFromBodyURI('dispatch') # 'onForm'\r\n self.token_xrefClass = \\\r\n httpUtilities.valueFromBodyURI('xrefClass') # 'com.sustain.cases.model.Case'\r\n self.token_xrefId = \\\r\n httpUtilities.valueFromBodyURI('xrefId') # '3914617'\r\n # 20 different values for token_formId found in response, using the first one.\r\n self.token_formId = \\\r\n httpUtilities.valueFromBodyURI('formId') # '359'\r\n # 8 different values for token_id found in response, using the first one.\r\n self.token_id = \\\r\n httpUtilities.valueFromBodyURI('id') # '3914617'\r\n self.token_caseId = \\\r\n httpUtilities.valueFromBodyURI('caseId') # '3914617'\r\n self.token_data7588 = \\\r\n httpUtilities.valueFromHiddenInput('data(7588)') # 'Defendant'\r\n self.token_orgapachestrutstaglibhtmlTOKEN = \\\r\n httpUtilities.valueFromHiddenInput('org.apache.struts.taglib.html.TOKEN') # '39fc0124ede005f5995f2f7941d1b1e0'\r\n\r\n grinder.sleep(20)\r\n request103.GET('/sustain/common/images/close.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"849-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n self.token_build = \\\r\n '44855'\r\n request104.GET('/sustain/assets/skins/pacificScene/skin.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"764-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request105.GET('/sustain/assets/css/base.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"109340-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request106.GET('/sustain/common/css/tabs/tabbase.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:42 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"729-1271277702000\\\"'), ))\r\n tp.process_result()\r\n\r\n request107.GET('/sustain/assets/img/roundedCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1092-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request108.GET('/sustain/assets/img/roundedBorderNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2864-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request109.GET('/sustain/assets/img/roundedBorderWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2898-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request110.GET('/sustain/assets/img/shadowCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3037-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request111.GET('/sustain/assets/img/shadowNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3014-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request112.GET('/sustain/assets/img/shadowWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3091-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request113.GET('/sustain/assets/img/shadowC.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"6735-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request114.GET('/sustain/assets/img/main-background.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"622-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request115.GET('/sustain/assets/img/magnifier.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"716-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request116.GET('/sustain/assets/img/icons/topnav-icons.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"41101-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request117.GET('/sustain/assets/img/eCourt-logo-small.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2846-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request118.GET('/sustain/assets/skins/pacificScene/corners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"660-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request119.GET('/sustain/assets/img/arrows-left.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"350-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request120.GET('/sustain/assets/img/eCourt-logo-mini.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1347-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request121.GET('/sustain/assets/img/arrows-right.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"348-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request122.GET('/sustain/assets/img/leftnav_bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"163-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request123.GET('/sustain/assets/img/tn.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"504-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request124.GET('/sustain/assets/img/ln.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"142-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request125.GET('/sustain/assets/img/case-header-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"21172-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request126.GET('/sustain/assets/img/icon-note-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"488-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request127.GET('/sustain/assets/img/icon-print.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"687-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request128.GET('/sustain/assets/img/icon-case-status.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"389-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request129.GET('/sustain/assets/img/transparent-white.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4646-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request130.GET('/sustain/common/images/x.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"43-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request131.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request132.GET('/sustain/assets/img/bg_button_span-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"844-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request133.GET('/sustain/assets/img/bg_button_a-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"663-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request134.GET('/sustain/common/images/ajax-loader.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"701-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request135.GET('/sustain/assets/img/caseislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"357-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request136.GET('/sustain/assets/img/caseSubislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4102-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request137.GET('/sustain/assets/img/island-shadow-top-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request138.GET('/sustain/assets/img/island-shadow-bottom-left-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request139.GET('/sustain/common/images/spacer.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request140.GET('/sustain/assets/img/case-toolbar-bg-selected.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"728-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request141.GET('/sustain/assets/img/island-shadow-right-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2858-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request142.GET('/sustain/assets/img/island-shadow-bottom-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2849-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request143.GET('/sustain/assets/img/island-shadow-bottom-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"158-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request144.GET('/sustain/assets/img/casetoolbar-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4510-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request145.GET('/sustain/assets/img/cti-view-person.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"558-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request146.GET('/sustain/assets/img/icon-insert-or-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"399-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request147.GET('/sustain/assets/img/icon-special.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"761-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request148.GET('/sustain/assets/img/icon-edit.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"664-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request149.GET('/sustain/assets/img/cti-audit-log.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"559-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request150.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request151.GET('/sustain/assets/img/bg_button_span.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3607-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request152.GET('/sustain/common/images/boxminus-blu.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"826-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request153.GET('/sustain/assets/img/header_bg_inverse.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"237-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n return result", "def page4(self):\r\n self.token_dispatch = \\\r\n 'onUpdate'\r\n self.token_submitAction = \\\r\n 'saveAndBack'\r\n self.token_data6542 = \\\r\n ''\r\n \r\n # Expecting 302 'Moved Temporarily'\r\n result = request401.POST('/sustain/cms/case/view' +\r\n '?dispatch=' +\r\n self.token_dispatch,\r\n ( NVPair('org.apache.struts.taglib.html.TOKEN', self.token_orgapachestrutstaglibhtmlTOKEN),\r\n NVPair('formId', '37'),\r\n NVPair('id', '2561247'),\r\n NVPair('submitAction', self.token_submitAction),\r\n NVPair('failOnWarnings', self.token_failOnWarnings),\r\n NVPair('navRuleId', self.token_navRuleId),\r\n NVPair('caseId', '3914617'),\r\n NVPair('data(com.sustain.cases.model.Charge:2561247.optlock)', self.token_datacomsustaincasesmodelCharge2561247optlock),\r\n NVPair('data(8608)', '4/14/10'),\r\n NVPair('data(630)', '4/9/10'),\r\n NVPair('data(12790)', '0830'),\r\n NVPair('data(8572)', 'MISD'),\r\n NVPair('data(6213)', '1'),\r\n NVPair('data(6222)', ''),\r\n NVPair('data(6229)', ''),\r\n NVPair('data(638)', ''),\r\n NVPair('lookup.autocomplete.635', ''),\r\n NVPair('data(635)', self.token_data635),\r\n NVPair('data(9804)', '1000.00'),\r\n NVPair('data(7915)', 'false'),\r\n NVPair('data(13354)', '45.00'),\r\n NVPair('data(13352)', '60.00'),\r\n NVPair('data(13353)', '60.00'),\r\n NVPair('data(12789)', '05/20/2010'),\r\n NVPair('data(637)', ''),\r\n NVPair('data(641)', ''),\r\n NVPair('data(642)', ''),\r\n NVPair('data(6542)', self.token_data6542),\r\n NVPair('data(6214.2561247.XRefEntityId)', self.token_data62142561247XRefEntityId),\r\n NVPair('data(6214.2561247.XRefEntityName)', self.token_data62142561247XRefEntityName),\r\n NVPair('data(6214.2561247.XRefId)', self.token_data62142561247XRefId),\r\n NVPair('data(6214.2561247.XRefType)', self.token_data62142561247XRefType),\r\n NVPair('data(8492.4225.XRefEntityId)', self.token_data84924225XRefEntityId),\r\n NVPair('data(8492.4225.XRefEntityName)', self.token_data84924225XRefEntityName),\r\n NVPair('data(8492.4225.XRefId)', self.token_data84924225XRefId),\r\n NVPair('data(8492.4225.XRefType)', self.token_data84924225XRefType), ),\r\n ( NVPair('Content-Type', 'multipart/form-data; boundary=---------------------------7da1e92e300be'), ),\r\n True)\r\n tp.process_result()\r\n self.token_formId = \\\r\n httpUtilities.valueFromLocationURI('formId') # '309'\r\n\r\n request402.GET('/sustain/cms/case' +\r\n '?id=' +\r\n self.token_id +\r\n '&formId=' +\r\n self.token_formId +\r\n '&caseId=' +\r\n self.token_caseId)\r\n tp.process_result()\r\n self.token_dispatch = \\\r\n httpUtilities.valueFromBodyURI('dispatch') # 'onForm'\r\n # 15 different values for token_formId found in response, using the first one.\r\n self.token_formId = \\\r\n httpUtilities.valueFromBodyURI('formId') # '359'\r\n # 5 different values for token_id found in response; the first matched\r\n # the last known value of token_id - don't update the variable.\r\n self.token_orgapachestrutstaglibhtmlTOKEN = \\\r\n httpUtilities.valueFromHiddenInput('org.apache.struts.taglib.html.TOKEN') # '7b16c809d6cb68aef3f5f2aca7763283'\r\n\r\n grinder.sleep(20)\r\n request403.GET('/sustain/common/images/close.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"849-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request404.GET('/sustain/assets/skins/pacificScene/skin.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"764-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request405.GET('/sustain/assets/css/base.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"109340-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request406.GET('/sustain/common/css/tabs/tabbase.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:42 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"729-1271277702000\\\"'), ))\r\n tp.process_result()\r\n\r\n request407.GET('/sustain/assets/img/roundedCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1092-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request408.GET('/sustain/assets/img/roundedBorderNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2864-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request409.GET('/sustain/assets/img/roundedBorderWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2898-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request410.GET('/sustain/assets/img/shadowCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3037-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request411.GET('/sustain/assets/img/shadowNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3014-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request412.GET('/sustain/assets/img/shadowWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3091-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request413.GET('/sustain/assets/img/shadowC.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"6735-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request414.GET('/sustain/assets/img/main-background.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"622-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request415.GET('/sustain/assets/img/icons/topnav-icons.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"41101-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request416.GET('/sustain/assets/img/magnifier.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"716-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request417.GET('/sustain/assets/img/eCourt-logo-small.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2846-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request418.GET('/sustain/assets/img/icon-print.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"687-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request419.GET('/sustain/assets/img/arrows-left.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"350-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request420.GET('/sustain/assets/img/eCourt-logo-mini.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1347-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request421.GET('/sustain/assets/img/arrows-right.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"348-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request422.GET('/sustain/assets/img/leftnav_bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"163-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request423.GET('/sustain/assets/img/tn.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"504-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request424.GET('/sustain/assets/img/ln.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"142-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request425.GET('/sustain/assets/img/case-header-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"21172-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request426.GET('/sustain/assets/img/icon-note-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"488-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request427.GET('/sustain/assets/skins/pacificScene/corners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"660-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request428.GET('/sustain/assets/img/icon-case-status.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"389-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request429.GET('/sustain/assets/img/transparent-white.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4646-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request430.GET('/sustain/common/images/x.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"43-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request431.GET('/sustain/assets/img/bg_button_a-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"663-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request432.GET('/sustain/assets/img/bg_button_span-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"844-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request433.GET('/sustain/common/images/ajax-loader.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"701-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request434.GET('/sustain/assets/img/caseislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"357-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request435.GET('/sustain/assets/img/caseSubislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4102-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request436.GET('/sustain/assets/img/island-shadow-top-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request437.GET('/sustain/assets/img/island-shadow-bottom-left-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request438.GET('/sustain/common/images/spacer.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request439.GET('/sustain/assets/img/case-toolbar-bg-selected.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"728-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request440.GET('/sustain/assets/img/island-shadow-right-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2858-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request441.GET('/sustain/assets/img/island-shadow-bottom-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2849-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request442.GET('/sustain/assets/img/icon-edit.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"664-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request443.GET('/sustain/assets/img/casetoolbar-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4510-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request444.GET('/sustain/assets/img/cti-view-person.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"558-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request445.GET('/sustain/assets/img/icon-insert-or-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"399-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request446.GET('/sustain/assets/img/icon-special.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"761-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request447.GET('/sustain/assets/img/island-shadow-bottom-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"158-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request448.GET('/sustain/assets/img/cti-audit-log.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"559-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request449.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request450.GET('/sustain/assets/img/bg_button_span.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3607-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request451.GET('/sustain/common/images/boxminus-blu.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"826-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request452.GET('/sustain/assets/img/sortup.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"53-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n return result", "def __call__(self, request):", "def scrape(self):\n pass", "def _page_call(self, url, request) -> Dict:\n response = self._post(url, request)\n raise_on_error(response)\n return response.json()", "def page3(self):\r\n self.token_id = \\\r\n '2561247'\r\n self.token_formId = \\\r\n '37'\r\n result = request301.GET('/sustain/cms/case/view' +\r\n '?id=' +\r\n self.token_id +\r\n '&caseId=' +\r\n self.token_caseId +\r\n '&formId=' +\r\n self.token_formId)\r\n tp.process_result()\r\n # 12 different values for token_formId found in response, using the first one.\r\n self.token_formId = \\\r\n httpUtilities.valueFromBodyURI('formId') # '359'\r\n # 2 different values for token_id found in response, using the first one.\r\n self.token_id = \\\r\n httpUtilities.valueFromBodyURI('id') # '3914617'\r\n self.token_orgapachestrutstaglibhtmlTOKEN = \\\r\n httpUtilities.valueFromHiddenInput('org.apache.struts.taglib.html.TOKEN') # 'fbf66908d72a8e2f019df007c75f6c43'\r\n self.token_submitAction = \\\r\n httpUtilities.valueFromHiddenInput('submitAction') # ''\r\n self.token_failOnWarnings = \\\r\n httpUtilities.valueFromHiddenInput('failOnWarnings') # 'true'\r\n self.token_navRuleId = \\\r\n httpUtilities.valueFromHiddenInput('navRuleId') # '-1'\r\n self.token_datacomsustaincasesmodelCharge2561247optlock = \\\r\n httpUtilities.valueFromHiddenInput('data(com.sustain.cases.model.Charge:2561247.optlock)') # '0'\r\n self.token_data635 = \\\r\n httpUtilities.valueFromHiddenInput('data(635)') # '191827'\r\n self.token_data6542 = \\\r\n httpUtilities.valueFromHiddenInput('data(6542)') # '0'\r\n self.token_data62142561247XRefEntityId = \\\r\n httpUtilities.valueFromHiddenInput('data(6214.2561247.XRefEntityId)') # '2561247'\r\n self.token_data62142561247XRefEntityName = \\\r\n httpUtilities.valueFromHiddenInput('data(6214.2561247.XRefEntityName)') # 'com.sustain.cases.model.Charge'\r\n self.token_data62142561247XRefId = \\\r\n httpUtilities.valueFromHiddenInput('data(6214.2561247.XRefId)') # ''\r\n self.token_data62142561247XRefType = \\\r\n httpUtilities.valueFromHiddenInput('data(6214.2561247.XRefType)') # ''\r\n self.token_data84924225XRefEntityId = \\\r\n httpUtilities.valueFromHiddenInput('data(8492.4225.XRefEntityId)') # '4225'\r\n self.token_data84924225XRefEntityName = \\\r\n httpUtilities.valueFromHiddenInput('data(8492.4225.XRefEntityName)') # 'com.sustain.document.model.Document'\r\n self.token_data84924225XRefId = \\\r\n httpUtilities.valueFromHiddenInput('data(8492.4225.XRefId)') # ''\r\n self.token_data84924225XRefType = \\\r\n httpUtilities.valueFromHiddenInput('data(8492.4225.XRefType)') # ''\r\n\r\n grinder.sleep(20)\r\n request302.GET('/sustain/common/images/close.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"849-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request303.GET('/sustain/assets/skins/pacificScene/skin.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"764-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request304.GET('/sustain/assets/css/base.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"109340-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request305.GET('/sustain/common/css/tabs/tabbase.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:42 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"729-1271277702000\\\"'), ))\r\n tp.process_result()\r\n\r\n request306.GET('/sustain/assets/img/roundedCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1092-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request307.GET('/sustain/assets/img/roundedBorderNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2864-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request308.GET('/sustain/assets/img/roundedBorderWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2898-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request309.GET('/sustain/assets/img/shadowCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3037-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request310.GET('/sustain/assets/img/shadowNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3014-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request311.GET('/sustain/assets/img/shadowWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3091-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request312.GET('/sustain/assets/img/shadowC.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"6735-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request313.GET('/sustain/assets/img/main-background.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"622-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request314.GET('/sustain/assets/img/icons/topnav-icons.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"41101-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request315.GET('/sustain/assets/img/magnifier.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"716-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request316.GET('/sustain/assets/img/eCourt-logo-small.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2846-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request317.GET('/sustain/assets/skins/pacificScene/corners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"660-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request318.GET('/sustain/assets/img/arrows-left.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"350-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request319.GET('/sustain/assets/img/eCourt-logo-mini.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1347-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request320.GET('/sustain/assets/img/arrows-right.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"348-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request321.GET('/sustain/assets/img/leftnav_bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"163-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request322.GET('/sustain/assets/img/tn.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"504-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request323.GET('/sustain/assets/img/ln.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"142-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request324.GET('/sustain/assets/img/case-header-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"21172-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request325.GET('/sustain/assets/img/icon-note-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"488-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request326.GET('/sustain/assets/img/icon-print.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"687-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request327.GET('/sustain/assets/img/icon-case-status.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"389-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request328.GET('/sustain/assets/img/transparent-white.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4646-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request329.GET('/sustain/common/images/x.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"43-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request330.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request331.GET('/sustain/assets/img/bg_button_a-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"663-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request332.GET('/sustain/assets/img/bg_button_span-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"844-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request333.GET('/sustain/common/images/ajax-loader.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"701-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request334.GET('/sustain/common/images/img.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"379-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request335.GET('/sustain/assets/img/caseislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"357-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request336.GET('/sustain/assets/img/caseSubislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4102-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request337.GET('/sustain/assets/img/icon-loading.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1737-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request338.GET('/sustain/assets/img/island-shadow-top-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request339.GET('/sustain/assets/img/island-shadow-bottom-left-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request340.GET('/sustain/common/images/spacer.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request341.GET('/sustain/assets/img/island-shadow-right-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2858-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request342.GET('/sustain/assets/img/island-shadow-bottom-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2849-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request343.GET('/sustain/assets/img/island-shadow-bottom-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"158-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request344.GET('/sustain/assets/img/casetoolbar-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4510-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request345.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request346.GET('/sustain/assets/img/bg_button_span.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3607-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request347.GET('/sustain/common/images/boxminus-blu.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"826-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request348.GET('/sustain/assets/img/icon-lookup.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"701-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request349.GET('/sustain/assets/img/icon-clear.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"664-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request350.GET('/sustain/assets/img/icon-save-and-go-back.png')\r\n tp.process_result()\r\n\r\n request351.GET('/sustain/assets/img/icon-default-save.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"645-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request352.GET('/sustain/assets/img/icon-default-go-back.png')\r\n tp.process_result()\r\n\r\n request353.GET('/sustain/assets/img/icon-special.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"761-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request354.GET('/sustain/assets/img/cti-view-person.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"558-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request355.GET('/sustain/assets/img/cti-audit-log.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"559-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request356.GET('/sustain/assets/img/icon-delete-grey.png')\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request357.GET('/sustain/assets/img/normal-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request358.GET('/sustain/assets/img/title-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"116-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request359.GET('/sustain/assets/img/menuarrow.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"49-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request360.GET('/sustain/assets/img/dark-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"85-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request361.GET('/sustain/assets/img/active-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"89-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request362.GET('/sustain/assets/img/status-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"116-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request363.GET('/sustain/assets/img/hover-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"89-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request364.GET('/sustain/assets/img/hover-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"89-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request365.GET('/sustain/assets/img/rowhover-bg.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n return result", "def prePresent(self, request):", "def fifth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.sixth_page.wait_for_page()", "def initial(self, request, *args, **kwargs):\n\t\t# Ensure that the incoming request is permitted\n\t\t# self.perform_authentication(request)\n\t\t# self.check_permissions(request)\n\t\t# self.check_throttles(request)\n\t\tpass", "def __init__(self, page, main):\r\n #create variables\r\n self.lang_claims = []\r\n self.merged_claims = []\r\n self.validated_claims = []\r\n self.new_claims = []\r\n self.proceed = True\r\n self.main = main\r\n #init parents\r\n pywikibot.ItemPage.__init__(self, page.site, page.title())\r\n output.Logger.__init__(self)\r\n Rules.__init__(self)\r\n self.main = main\r\n #get the page\r\n if not self.checkNoItem():\r\n self.proceed = False\r\n else:\r\n self.get()\r\n #check if the item is untransferred\r\n if self.checkTransferred():\r\n self.proceed = False", "def prepare_wiki_page(self, req, page, fields):\r\n pass", "def create_page(self):", "def _DoCommonRequestProcessing(self, request, mr):\n with mr.profiler.Phase('basic processing'):\n self._CheckForMovedProject(mr, request)\n self.AssertBasePermission(mr)", "def check(self):\n if not self.logged_in:\n self.threaded_login()\n #QTimer().singleShot(2000,lambda: self.check())\n #return\n #print \"Getting weapon page\"\n self.threaded_get_page(\"weapon\", self.process_weapon_stats)\n if self.check_challenge:\n self.threaded_get_page(\"challenges\", self.process_challenge)", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def run(self):\n\n try:\n # Get the content from this page\n if self.verbose:\n print \"Getting page content for '%s'\" % self.url.strip()\n \n content = getPageContent(self.url)\n\n # Verify that this is not binary data\n if content is not None and isHTML(content):\n\n\n # Extract basic data about this result\n content = content.lower()\n title, keywords, description = parseMetaDataFromContent(content)\n headers = parseHeaderInformationFromContent(content)\n\n # Add this result data\n self.resultDictionary['title'] = title\n self.resultDictionary['keywords'] = keywords\n self.resultDictionary['description'] = description\n self.resultDictionary['content'] = content\n self.resultDictionary['headers'] = headers\n\n # Run the extensions\n for extension in self.extensions:\n extension.run(self.resultDictionary)\n\n\n except URLError:\n\n # Skip this URL, and register it as an error on the cache\n if self.verbose:\n print(\"Error accessing '%s', %s\" % (self.url.strip(), str(sys.exc_info()[1]).strip()))", "def page(self, request):\n draw = request.GET.get('draw', 0)\n length = int(request.GET.get('length', 5))\n start = int(request.GET.get('start', 0))\n order_column = int(request.GET.get('order[0][column]', 0))\n order_direction = request.GET.get('order[0][dir]', 'asc')\n search_keyword = request.GET.get('search[value]', '')\n raise NotImplementedError", "def _scrape(self):", "def do_GET(self):\n\n\t\t# Delegate the request to a specialised method appropriate for each route.\n\t\t# Creating a \"switch\" construct for choosing the right delegate method.\n\t\tdelegates = {'/contact.html' : self.processContactRequest,\n\t\t\t\t\t\t'/products.html' : self.processProductsRequest,\n\t\t\t\t\t\t'/report.html' : self.processReportRequest}\n\t\ttry:\n\t\t\tself.render(delegates[self.path]())\n\t\texcept:\n\t\t\tself.render(self.page_not_found())", "def _handle_first_request(self):\n pass", "def execute(pagename, request):\n PackagePages(pagename, request).render()", "def __setParentPage(self):\n page = {}\n try:\n page['et_thread_hierarchy'] = self.__hierarchy = [x.strip() for x in re.split('>|\\n',stripHtml(self.soup.find('div', 'vt_h2').renderContents())) if x.strip()]\n page['title'] = page['et_thread_hierarchy'][-1]\n except:\n log.info(self.log_msg('hierachies not found in url %s'%self.currenturi))\n return \n try:\n date_str = stripHtml(self.soup.find('span', 'vt_first_timestamp').renderContents())\n date_str = re.sub(\"(\\d+)(st|nd|rd|th)\",r\"\\1\", date_str)\n page['posted_date'] = datetime.strftime(datetime.strptime(date_str\n , 'on %B %d, %Y'),\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%SZ\")\n log.info(self.log_msg('Date not found in %s'% self.currenturi))\n \n if checkSessionInfo(self.__genre, self.session_info_out, self.task.instance_data['uri'],\\\n self.task.instance_data.get('update')):\n log.info(self.log_msg('Session info return True, Already exists'))\n return\n page_data_keys = ['et_author_name','ei_thread_replies_count','ei_thread_views_count'\\\n ,'edate_last_post_date','et_thread_last_post_author']\n [page.update({each:self.task.pagedata.get(each)}) for each in page_data_keys if self.task.pagedata.get(each) ]\n try:\n result = updateSessionInfo('review', self.session_info_out, self.\\\n task.instance_data['uri'], get_hash( page ), 'forum', self.task.instance_data.get('update'))\n if result['updated']:\n page['path']=[ self.task.instance_data['uri'] ] \n page['parent_path']=[]\n page['uri'] = self.currenturi\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])\n page['data'] = ''\n page['entity'] = 'thread'\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n else:\n log.info(self.log_msg('Result[updated] returned True for \\\n uri'%self.currenturi))\n except :\n log.exception(self.log_msg(\"parent post couldn't be parsed\"))", "def page16(self):\n result = request1601.GET('/Cars_Sample_App/about.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/insurance.do'), ))\n\n grinder.sleep(118)\n request1602.GET('/Cars_Sample_App/images/about_car.gif', None,\n ( NVPair('Accept', '*/*'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/about.do'), ))\n\n return result", "def distribute(self, page):\n self.crawl_page(self.keyword, self.since, self.to, page)", "def __init__(self):\n self.tasks = []\n self.page_context = PageContext()", "def init_page_elements(self):\n pass", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def dispatch(self, request, *args, **kwargs):\n\n if self.page['page_type'] != self.page_type:\n raise Http404\n return super().dispatch(request, *args, **kwargs)", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def getPage(**kwargs):\n\n if conf.delay is not None and isinstance(conf.delay, (int, float)) and conf.delay > 0:\n time.sleep(conf.delay)\n elif conf.cpuThrottle:\n delay = 0.00001 * (conf.cpuThrottle ** 2)\n time.sleep(delay)\n\n kb.locks.reqLock.acquire()\n\n kb.lastRequestUID += 1\n requestID = kb.lastRequestUID\n\n kb.locks.reqLock.release()\n\n url = kwargs.get('url', conf.url).replace(\" \", \"%20\")\n get = kwargs.get('get', None)\n post = kwargs.get('post', None)\n method = kwargs.get('method', None)\n cookie = kwargs.get('cookie', None)\n ua = kwargs.get('ua', None)\n direct = kwargs.get('direct', False)\n multipart = kwargs.get('multipart', False)\n silent = kwargs.get('silent', False)\n raise404 = kwargs.get('raise404', True)\n auxHeaders = kwargs.get('auxHeaders', None)\n response = kwargs.get('response', False)\n\n page = \"\"\n cookieStr = \"\"\n requestMsg = \"HTTP request [#%d]:\\n%s \" % (requestID, conf.method)\n requestMsg += \"%s\" % urlparse.urlsplit(url)[2] or \"/\"\n responseMsg = \"HTTP response \"\n requestHeaders = \"\"\n responseHeaders = \"\"\n logHeaders = \"\"\n\n try:\n if silent:\n socket.setdefaulttimeout(3)\n\n if direct:\n if \"?\" in url:\n url, params = url.split(\"?\")\n params = urlencode(params)\n url = \"%s?%s\" % (url, params)\n requestMsg += \"?%s\" % params\n\n elif multipart:\n # Needed in this form because of potential circle dependency \n # problem (option -> update -> connect -> option)\n from lib.core.option import proxyHandler\n\n multipartOpener = urllib2.build_opener(proxyHandler, multipartpost.MultipartPostHandler)\n conn = multipartOpener.open(url, multipart)\n page = conn.read()\n responseHeaders = conn.info()\n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\"))\n\n return page\n\n else:\n if conf.parameters.has_key(PLACE.GET) and not get:\n get = conf.parameters[PLACE.GET]\n\n if get:\n url = \"%s?%s\" % (url, get)\n requestMsg += \"?%s\" % get\n\n if conf.method == HTTPMETHOD.POST:\n if conf.parameters.has_key(PLACE.POST) and not post:\n post = conf.parameters[PLACE.POST]\n\n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str\n\n # Perform HTTP request\n headers = forgeHeaders(cookie, ua)\n\n if kb.authHeader:\n headers[\"Authorization\"] = kb.authHeader\n\n if kb.proxyAuthHeader:\n headers[\"Proxy-authorization\"] = kb.proxyAuthHeader\n\n if auxHeaders:\n for key, item in auxHeaders.items():\n headers[key] = item\n\n if method:\n req = MethodRequest(url, post, headers)\n req.set_method(method)\n else:\n req = urllib2.Request(url, post, headers)\n\n if not conf.dropSetCookie and conf.cj:\n for _, cookie in enumerate(conf.cj):\n if not cookieStr:\n cookieStr = \"Cookie: \"\n\n cookie = getUnicode(cookie)\n index = cookie.index(\" for \")\n\n cookieStr += \"%s; \" % cookie[8:index]\n\n conn = urllib2.urlopen(req)\n\n if not req.has_header(\"Accept-Encoding\"):\n requestHeaders += \"Accept-Encoding: identity\\n\"\n\n requestHeaders += \"\\n\".join([\"%s: %s\" % (header, value) for header, value in req.header_items()])\n\n if not req.has_header(\"Cookie\") and cookieStr:\n requestHeaders += \"\\n%s\" % cookieStr[:-2]\n\n if not req.has_header(\"Connection\"):\n requestHeaders += \"\\nConnection: close\"\n\n requestMsg += \"\\n%s\" % requestHeaders\n\n if post:\n requestMsg += \"\\n%s\" % post\n\n requestMsg += \"\\n\"\n\n logger.log(8, requestMsg)\n\n if not kb.authHeader and req.has_header(\"Authorization\"):\n kb.authHeader = req.get_header(\"Authorization\")\n\n if not kb.proxyAuthHeader and req.has_header(\"Proxy-authorization\"):\n kb.proxyAuthHeader = req.get_header(\"Proxy-authorization\")\n\n if hasattr(conn, \"redurl\") and hasattr(conn, \"redcode\") and not conf.redirectHandled:\n msg = \"sqlmap got a %d redirect to \" % conn.redcode\n msg += \"%s - What target address do you \" % conn.redurl\n msg += \"want to use from now on? %s \" % conf.url\n msg += \"(default) or provide another target address based \"\n msg += \"also on the redirection got from the application\\n\"\n\n while True:\n choice = readInput(msg, default=\"1\")\n\n if not choice or choice == \"1\":\n pass\n else:\n conf.url = choice\n return Connect.__getPageProxy(**kwargs)\n\n break\n\n conf.redirectHandled = True\n\n # Reset the number of connection retries\n conf.retriesCount = 0\n\n # Return response object\n if response:\n return conn, None\n\n # Get HTTP response\n page = conn.read()\n code = conn.code\n status = conn.msg\n responseHeaders = conn.info()\n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\"))\n\n except urllib2.HTTPError, e:\n code = e.code\n status = e.msg\n\n try:\n page = e.read()\n responseHeaders = e.info()\n except socket.timeout:\n warnMsg = \"connection timed out while trying \"\n warnMsg += \"to get error page information (%d)\" % code\n logger.warn(warnMsg)\n return None, None\n except:\n pass\n\n responseMsg = \"\\n%s[#%d] (%d %s):\\n\" % (responseMsg, requestID, code, status)\n\n if responseHeaders:\n logHeaders = \"\\n\".join([\"%s: %s\" % (key.capitalize() if isinstance(key, basestring) else key, value) for (key, value) in responseHeaders.items()])\n\n logHTTPTraffic(requestMsg, \"%s%s\\n\\n%s\" % (responseMsg, logHeaders, page))\n\n if e.code == 401:\n errMsg = \"not authorized, try to provide right HTTP \"\n errMsg += \"authentication type and valid credentials (%d)\" % code\n raise sqlmapConnectionException, errMsg\n elif e.code == 404 and raise404:\n errMsg = \"page not found (%d)\" % code\n raise sqlmapConnectionException, errMsg\n else:\n debugMsg = \"got HTTP error code: %d (%s)\" % (code, status)\n logger.debug(debugMsg)\n\n except (urllib2.URLError, socket.error, socket.timeout, httplib.BadStatusLine, httplib.IncompleteRead), e:\n tbMsg = traceback.format_exc()\n\n if \"URLError\" in tbMsg or \"error\" in tbMsg:\n warnMsg = \"unable to connect to the target url\"\n elif \"timeout\" in tbMsg:\n warnMsg = \"connection timed out to the target url\"\n elif \"BadStatusLine\" in tbMsg:\n warnMsg = \"the target url responded with an unknown HTTP \"\n warnMsg += \"status code, try to force the HTTP User-Agent \"\n warnMsg += \"header with option --user-agent or -a\"\n elif \"IncompleteRead\" in tbMsg:\n warnMsg = \"there was an incomplete read error while retrieving data \"\n warnMsg += \"from the target url\"\n else:\n warnMsg = \"unable to connect to the target url\"\n\n if \"BadStatusLine\" not in tbMsg:\n warnMsg += \" or proxy\"\n\n if silent:\n return None, None\n elif conf.retriesCount < conf.retries:\n conf.retriesCount += 1\n\n warnMsg += \", sqlmap is going to retry the request\"\n logger.critical(warnMsg)\n\n time.sleep(1)\n\n socket.setdefaulttimeout(conf.timeout)\n return Connect.__getPageProxy(**kwargs)\n else:\n socket.setdefaulttimeout(conf.timeout)\n raise sqlmapConnectionException, warnMsg\n\n socket.setdefaulttimeout(conf.timeout)\n\n page = sanitizeAsciiString(page)\n page = getUnicode(page)\n parseResponse(page, responseHeaders)\n\n responseMsg += \"[#%d] (%d %s):\\n\" % (requestID, code, status)\n logHeaders = \"\\n\".join([\"%s: %s\" % (key.capitalize() if isinstance(key, basestring) else key, value) for (key, value) in responseHeaders.items()])\n\n logHTTPTraffic(requestMsg, \"%s%s\\n\\n%s\" % (responseMsg, logHeaders, page))\n\n if conf.verbose <= 5:\n responseMsg += getUnicode(logHeaders)\n elif conf.verbose > 5:\n responseMsg += \"%s\\n%s\\n\" % (logHeaders, page)\n\n logger.log(7, responseMsg)\n\n if conf.parseErrors:\n msg = extractErrorMessage(page)\n if msg:\n logger.info(\"parsed error message: '%s'\" % msg)\n\n return page, responseHeaders", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def do_POST(self):\r\n self.do_GET()", "def page1(self):\n result = request101.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 15 different values for token_mid found in response, using the first one.\n self.token_mid = \\\n httpUtilities.valueFromBodyURI('mid') # '3'\n\n grinder.sleep(124)\n request102.GET('/Cars_Sample_App/images/enquire_but.gif')\n\n request103.GET('/Cars_Sample_App/images/line.gif')\n\n request104.GET('/Cars_Sample_App/images/manufacturers/Bmw.gif')\n\n request105.GET('/Cars_Sample_App/images/manufacturers/AstonMartin.gif')\n\n request106.GET('/Cars_Sample_App/images/manufacturers/Ferrari.gif')\n\n request107.GET('/Cars_Sample_App/images/insurance_but.gif')\n\n grinder.sleep(90)\n request108.GET('/Cars_Sample_App/images/manufacturers/Porsche.gif')\n\n request109.GET('/Cars_Sample_App/images/manufacturers/Jaguar.gif')\n\n request110.GET('/Cars_Sample_App/images/pipe.gif')\n\n request111.GET('/Cars_Sample_App/images/manufacturers/Lotus.gif')\n\n return result", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }", "def fourth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fifth_page.wait_for_page()", "def page47(self):\n self.token_target = \\\n 'POJOCache'\n self.token_mode = \\\n 'Reset'\n result = request4701.GET('/clusterinfo-web/controller' +\n '?target=' +\n self.token_target +\n '&mode=' +\n self.token_mode)\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result", "def loop_pages(self, response):\n\n current_page = response.xpath(\"//a[@class='currentPage ']/text()\")\n print(\"current page: {0}\".format(current_page.extract_first()))\n\n next_page_link = response.xpath(\"//a[@class='text' and contains(., 'Next')]\")\n next_page_link = next_page_link.xpath('@href').extract_first()\n\n # urls_stories is a tuple with a url, and a corresponding Story object\n urls_stories = self.get_thread_urls(response)\n\n if self.generate_test is None:\n # generate requests for -- new -- stories\n for (url, story) in urls_stories:\n yield scrapy.Request(url, callback=self.scan_thread, priority=1, meta={\"story_item\": story})\n\n # generate requests for stories that need to be updated.\n for (url, story) in self.update_list:\n yield scrapy.Request(url, callback=self.update_stories, priority=2, meta={\"story_item\": story})\n\n if next_page_link is not None:\n\n # print(\"next page link: {0}\".format(next_page_link))\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)\n else:\n \"\"\"\n This section activates if self.generate_test is not None.\n A thread url is required to be provided to generate a test scenario out of that\n thread.\n It scans the site looking for this thread, and scrapes it.\n If it doesn't find it, it scans the next page.\n \"\"\"\n print(\"\\n\\tGENERATING TEST SCENARIO\\n\")\n for (url, story) in urls_stories:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n for (url, story) in self.update_list:\n if url == self.test_url:\n yield scrapy.Request(url, callback=self.scan_thread, priority=0, meta={\"story_item\": story})\n return\n\n next_page_link = response.urljoin(next_page_link)\n yield scrapy.Request(next_page_link, callback=self.loop_pages, priority=0)", "def sixth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.utility_page.click_next_button()\n self.seventh_page.wait_for_page()", "def __setParentPage(self):\n page = {}\n try:\n page['et_thread_hierarchy'] = self.__hierarchy = [x.strip() for x \\\n in stripHtml(self.soup.find('ul','lia-list-standard-inline')\\\n .renderContents()).split(':') if x.strip()]\n page['data'] = page['title'] = page['et_thread_hierarchy'][-1]\n except:\n log.info(self.log_msg('Thread hierarchy and Title Not found for uri\\\n %s'%self.currenturi))\n return\n if checkSessionInfo(self.__genre, self.session_info_out, self.task.\\\n instance_data['uri'], self.task.instance_data.get('update')):\n log.info(self.log_msg('Session info return True, Already exists'))\n return\n page_data_keys = ['et_author_name', 'ei_thread_replies_count', \\\n 'edate_last_post_date']\n [page.update({each:self.task.pagedata.get(each)}) for each in page_data_keys if self.task.pagedata.get(each)]\n try:\n date_str = re.sub('\\s+', ' ', stripHtml(self.soup.find('div', \\\n 'lia-panel-message-content').find('span', \\\n 'DateTime lia-message-posted-on lia-component-common-widget-date')\\\n .renderContents())) \n page['posted_date'] = datetime.strftime(datetime.strptime(date_str, \\\n '%m-%d-%Y %I:%M %p'), '%Y-%m-%dT%H:%M:%SZ')\n except:\n log.info(self.log_msg('Posted date not found'))\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \\\n \"%Y-%m-%dT%H:%M:%SZ\")\n try:\n result = updateSessionInfo('review', self.session_info_out, self.\\\n task.instance_data['uri'], get_hash( page ), 'forum', \\\n self.task.instance_data.get('update'))\n if result['updated']:\n page['path'] = [ self.task.instance_data['uri'] ] \n page['parent_path'] = []\n page['uri'] = self.currenturi\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1]) \n page['entity'] = 'thread'\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n else:\n log.info(self.log_msg('Result[updated] returned True for \\\n uri'%self.currenturi))\n except :\n log.exception(self.log_msg(\"parent post couldn't be parsed\"))", "def _process_request(self, request, response):\n ...", "def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()", "def dispatch(self):\n handler_start_time = time.time()\n\n logging.info('\\n\\n\\nRequest handler: %r', self)\n count0, count1, count2 = gc.get_count()\n logging.info('gc counts: %d %d %d', count0, count1, count2)\n GC_COUNT.add(count0, {'generation': 0})\n GC_COUNT.add(count1, {'generation': 1})\n GC_COUNT.add(count2, {'generation': 2})\n\n self.mr = monorailrequest.MonorailRequest(self.services)\n\n self.ratelimiter.CheckStart(self.request)\n self.response.headers.add('Strict-Transport-Security',\n 'max-age=31536000; includeSubDomains')\n\n if 'X-Cloud-Trace-Context' in self.request.headers:\n self.mr.profiler.trace_context = (\n self.request.headers.get('X-Cloud-Trace-Context'))\n if trace_service is not None:\n self.mr.profiler.trace_service = trace_service\n\n if self.services.cache_manager:\n # TODO(jrobbins): don't do this step if invalidation_timestep was\n # passed via the request and matches our last timestep\n try:\n with self.mr.profiler.Phase('distributed invalidation'):\n self.services.cache_manager.DoDistributedInvalidation(self.mr.cnxn)\n\n except MySQLdb.OperationalError as e:\n logging.exception(e)\n page_data = {\n 'http_response_code': httplib.SERVICE_UNAVAILABLE,\n 'requested_url': self.request.url,\n }\n self.template = template_helpers.GetTemplate(\n 'templates/framework/database-maintenance.ezt',\n eliminate_blank_lines=self._ELIMINATE_BLANK_LINES)\n self.template.WriteResponse(\n self.response, page_data, content_type='text/html')\n return\n\n try:\n with self.mr.profiler.Phase('parsing request and doing lookups'):\n self.mr.ParseRequest(self.request, self.services)\n\n self.response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n webapp2.RequestHandler.dispatch(self)\n\n except exceptions.NoSuchUserException as e:\n logging.warning('Trapped NoSuchUserException %s', e)\n self.abort(404, 'user not found')\n\n except exceptions.NoSuchGroupException as e:\n logging.warning('Trapped NoSuchGroupException %s', e)\n self.abort(404, 'user group not found')\n\n except exceptions.InputException as e:\n logging.info('Rejecting invalid input: %r', e)\n self.response.status = httplib.BAD_REQUEST\n\n except exceptions.NoSuchProjectException as e:\n logging.info('Rejecting invalid request: %r', e)\n self.response.status = httplib.NOT_FOUND\n\n except xsrf.TokenIncorrect as e:\n logging.info('Bad XSRF token: %r', e.message)\n self.response.status = httplib.BAD_REQUEST\n\n except permissions.BannedUserException as e:\n logging.warning('The user has been banned')\n url = framework_helpers.FormatAbsoluteURL(\n self.mr, urls.BANNED, include_project=False, copy_params=False)\n self.redirect(url, abort=True)\n\n except ratelimiter.RateLimitExceeded as e:\n logging.info('RateLimitExceeded Exception %s', e)\n self.response.status = httplib.BAD_REQUEST\n self.response.body = 'Slow your roll.'\n\n finally:\n self.mr.CleanUp()\n self.ratelimiter.CheckEnd(self.request, time.time(), handler_start_time)\n\n total_processing_time = time.time() - handler_start_time\n logging.warn('Processed request in %d ms',\n int(total_processing_time * 1000))\n\n end_count0, end_count1, end_count2 = gc.get_count()\n logging.info('gc counts: %d %d %d', end_count0, end_count1, end_count2)\n if (end_count0 < count0) or (end_count1 < count1) or (end_count2 < count2):\n GC_EVENT_REQUEST.increment()\n\n if settings.enable_profiler_logging:\n self.mr.profiler.LogStats()\n\n if (self.mr.profiler.trace_context is not None and\n random.random() < settings.trace_fraction):\n self.mr.profiler.ReportTrace()", "def first_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.utility_page.click_next_button()\n self.second_page.wait_for_page()", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # Clean up template\n template = pywikibot.Page(page.site, template,\n ns=10).title(withNamespace=False)\n # We found the template we were looking for\n if template in self.templateTitles:\n for field, value in fielddict.items():\n field = field.strip()\n value = value.strip()\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(\n u'A claim for %s already exists. Skipping'\n % claim.getID())\n # TODO FIXME: This is a very crude way of dupe\n # checking\n else:\n if claim.getType() == 'wikibase-item':\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))\n continue\n elif claim.getType() == 'string':\n claim.setTarget(value.strip())\n else:\n pywikibot.output(\"%s is not a supported datatype.\" % claim.getType())\n continue\n\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget()))\n item.addClaim(claim)\n # A generator might yield pages from multiple sites\n source = self.getSource(page.site)\n if source:\n claim.addSource(source, bot=True)", "def accessed(self):\r\n\r\n url = '{0}/{1}'.format(self.get_url(), 'accessed')\r\n\r\n request = http.Request('POST', url, None)\r\n return request, parsers.parse_json", "def show_pages():\n page_id=request.args['page_id']\n\n if str(request.args['r_type'])==\"Create\":\n return render_template(\"write.html\",page_id=page_id)\n\n if request.args['referrer']==\"home\":\n graph = facebook.GraphAPI(page_id)\n pages = graph.get_object('me')\n is_published = request.args['r_type'] == \"Show_Published\"\n print_me(is_published)\n res = graph.get_connections(pages['id'], 'promotable_posts', is_published=is_published, limit=8)\n else:\n res = requests.request(\"GET\",request.args['next']).json()\n posts,next_page = get_posts_from_graph_object(res)\n views_dict={}\n thread_list = []\n message_dict={} \n id_list=[]\n for post in posts:\n if \"message\" in post:\n post_id = post['id']\n message_dict[post_id] = post['message']\n id_list.append(post_id)\n thread=Thread(target=get_views,args=(page_id, post_id, views_dict) )\n thread.start()\n thread_list.append(thread)\n\n for thread in thread_list:\n thread.join()\n posts_info=[]\n for i in id_list:\n posts_info.append( (message_dict[i], views_dict[i] ) )\n return render_template('show_posts.html', posts_info=(posts_info,next_page,page_id))", "def seventh_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.eighth_page.wait_for_page()", "def endpoints(self):\n\n # Yields the home page\n gui_uri = self.app.config.GUI_PX\n yield self.page(MiyagiAppHome, gui_uri)\n\n # Yields the process list page\n processes_uri = f'{gui_uri}{self.app.config.PROCESSES_PX}'\n yield self.page(ProcessesPage, processes_uri)\n\n for p_name, process in self.app.processes.items():\n # For every process yields the relative general page\n process_uri = f'{processes_uri}/{p_name}'\n yield self.page(\n ProcessPage,\n process_uri,\n process=process\n )\n for obj in process.objects:\n # For every object in the process yields the relative page\n # List of instances + general object actions\n object_uri = f'{process_uri}{self.app.config.OBJECTS_PX}/{obj.name.lower()}'\n yield self.page(\n ObjectPage,\n object_uri,\n handler='generic_handler',\n methods=['GET', ],\n process=process,\n obj=obj\n )\n\n # For every object in the process yields the object creation form\n yield self.page(\n ObjectEditPage,\n f'{object_uri}/<uid>',\n handler='create_modify_object_handler',\n methods=['GET', 'POST'],\n process=process,\n obj=obj\n )\n # TODO: object remove endpoint\n\n # TODO: object actions endpoints\n # Object class methods\n\n # TODO: process actions endopoints\n\n # TODO: System endpoints and controllers", "def __action_member_top(self, html):\n return self.__action_mypage(html)", "def page48(self):\n self.token_target = \\\n 'POJOCache'\n self.token_mode = \\\n 'Update'\n result = request4801.GET('/clusterinfo-web/controller' +\n '?target=' +\n self.token_target +\n '&mode=' +\n self.token_mode)\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result", "def _post_dispatch(self, request, *args, **kwargs):\n pass", "def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()", "def _update_pagination_data(self, page_content, current):\n target = self.html.xpath(\"//input[@class='rgPageNext']\")[0].attrib[\n \"name\"\n ]\n if int(current) > 1:\n # After the first page - find view state etc here.\n viewstate = page_content.split(\"__VIEWSTATE|\")[1].split(\"|\")[0]\n valiation = page_content.split(\"__EVENTVALIDATION|\")[1].split(\"|\")[\n 0\n ]\n else:\n viewstate = self.html.xpath(self.vs_xpath)[0].attrib[\"value\"]\n valiation = self.html.xpath(self.ev_xpath)[0].attrib[\"value\"]\n\n self.data[\"__EVENTTARGET\"] = target\n self.data[\"__VIEWSTATE\"] = viewstate\n self.data[\"__EVENTVALIDATION\"] = valiation", "def page2(self):\r\n self.token_formId = \\\r\n '309'\r\n result = request201.GET('/sustain/cms/case' +\r\n '?formId=' +\r\n self.token_formId +\r\n '&id=' +\r\n self.token_id +\r\n '&caseId=' +\r\n self.token_caseId)\r\n tp.process_result()\r\n # 15 different values for token_formId found in response, using the first one.\r\n self.token_formId = \\\r\n httpUtilities.valueFromBodyURI('formId') # '359'\r\n # 5 different values for token_id found in response; the first matched\r\n # the last known value of token_id - don't update the variable.\r\n self.token_orgapachestrutstaglibhtmlTOKEN = \\\r\n httpUtilities.valueFromHiddenInput('org.apache.struts.taglib.html.TOKEN') # '8b603b338d6e00e8e6f4c44e3dd063b1'\r\n\r\n grinder.sleep(20)\r\n request202.GET('/sustain/common/images/close.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"849-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request203.GET('/sustain/assets/skins/pacificScene/skin.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"764-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request204.GET('/sustain/assets/css/base.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"109340-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request205.GET('/sustain/common/css/tabs/tabbase.css' +\r\n '?build=' +\r\n self.token_build, None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:42 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"729-1271277702000\\\"'), ))\r\n tp.process_result()\r\n\r\n request206.GET('/sustain/assets/img/roundedCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1092-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request207.GET('/sustain/assets/img/roundedBorderNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2864-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request208.GET('/sustain/assets/img/roundedBorderWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2898-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request209.GET('/sustain/assets/img/shadowCorners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3037-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request210.GET('/sustain/assets/img/shadowNS.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3014-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request211.GET('/sustain/assets/img/shadowWE.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3091-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request212.GET('/sustain/assets/img/shadowC.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"6735-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request213.GET('/sustain/assets/img/main-background.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"622-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request214.GET('/sustain/assets/img/icons/topnav-icons.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"41101-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request215.GET('/sustain/assets/img/magnifier.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"716-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request216.GET('/sustain/assets/img/eCourt-logo-small.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2846-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request217.GET('/sustain/assets/skins/pacificScene/corners.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:22 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"660-1271277682000\\\"'), ))\r\n tp.process_result()\r\n\r\n request218.GET('/sustain/assets/img/arrows-left.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"350-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request219.GET('/sustain/assets/img/eCourt-logo-mini.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"1347-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request220.GET('/sustain/assets/img/arrows-right.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"348-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request221.GET('/sustain/assets/img/leftnav_bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"163-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request222.GET('/sustain/assets/img/tn.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"504-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request223.GET('/sustain/assets/img/ln.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"142-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request224.GET('/sustain/assets/img/case-header-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"21172-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request225.GET('/sustain/assets/img/icon-note-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"488-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request226.GET('/sustain/assets/img/icon-print.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"687-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request227.GET('/sustain/assets/img/icon-case-status.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"389-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request228.GET('/sustain/assets/img/transparent-white.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4646-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request229.GET('/sustain/common/images/x.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"43-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request230.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request231.GET('/sustain/assets/img/bg_button_a-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"663-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request232.GET('/sustain/assets/img/bg_button_span-selected.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"844-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request233.GET('/sustain/common/images/ajax-loader.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"701-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request234.GET('/sustain/assets/img/caseislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"357-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request235.GET('/sustain/assets/img/caseSubislandheader-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4102-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request236.GET('/sustain/assets/img/island-shadow-top-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request237.GET('/sustain/assets/img/island-shadow-bottom-left-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2847-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request238.GET('/sustain/common/images/spacer.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"110-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n request239.GET('/sustain/assets/img/case-toolbar-bg-selected.jpg', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"728-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request240.GET('/sustain/assets/img/island-shadow-right-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2858-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request241.GET('/sustain/assets/img/island-shadow-bottom-repeat.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"2849-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request242.GET('/sustain/assets/img/island-shadow-bottom-right-corner.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"158-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request243.GET('/sustain/assets/img/casetoolbar-default-bg.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"4510-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request244.GET('/sustain/assets/img/cti-view-person.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"558-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request245.GET('/sustain/assets/img/icon-insert-or-add.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"399-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request246.GET('/sustain/assets/img/icon-special.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"761-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request247.GET('/sustain/assets/img/icon-edit.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"664-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request248.GET('/sustain/assets/img/cti-audit-log.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"559-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request249.GET('/sustain/assets/img/bg_button_a.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3424-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n request250.GET('/sustain/assets/img/bg_button_span.png', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:24 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"3607-1271277684000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request251.GET('/sustain/common/images/boxminus-blu.gif', None,\r\n ( NVPair('If-Modified-Since', 'Wed, 14 Apr 2010 20:41:40 GMT'),\r\n NVPair('If-None-Match', 'W/\\\"826-1271277700000\\\"'), ))\r\n tp.process_result()\r\n\r\n grinder.sleep(20)\r\n request252.GET('/sustain/assets/img/sortup.gif')\r\n tp.process_result()\r\n\r\n return result", "def page15(self):\n result = request1501.GET('/Cars_Sample_App/insurance.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/sell.do?query=save'), ))\n\n return result", "def __getParentPage(self):\r\n page = {}\r\n try:\r\n self.hierarchy = page['et_thread_hierarchy'] = [stripHtml(x.renderContents()) for x in self.soup.find('p','breadcrumb').findAll('a')][2:]\r\n page['title']= page['et_thread_hierarchy'][-1]\r\n except:\r\n log.info(self.log_msg('Thread hierarchy is not found'))\r\n page['title']=''\r\n try:\r\n self.thread_id = page['et_thread_id'] = self.currenturi.split('/')[-1].replace('.aspx','')\r\n except:\r\n log.info(self.log_msg('Thread id not found'))\r\n if checkSessionInfo(self.genre, self.session_info_out, self.parent_uri,\\\r\n self.task.instance_data.get('update')):\r\n log.info(self.log_msg('Session info return True, Already exists'))\r\n return False\r\n\r\n for each in ['et_author_name','ei_thread_num_replies','ei_thread_num_views','edate_last_post_date','ef_thread_rating']:\r\n try:\r\n page[each] = self.task.pagedata[each]\r\n except:\r\n log.info(self.log_msg('page data cannot be extracted for %s'%each))\r\n try:\r\n date_str = stripHtml(self.soup.find('div','wrapper_comment').find('p','post_date').renderContents()).split('|')[0].strip()\r\n page['posted_date'] = datetime.strftime(datetime.strptime(date_str,'%m-%d-%Y %I:%M %p'),\"%Y-%m-%dT%H:%M:%SZ\")\r\n except:\r\n page['posted_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\r\n log.info(self.log_msg('Posted date not found'))\r\n try:\r\n post_hash = get_hash( page )\r\n id=None\r\n if self.session_info_out=={}:\r\n id=self.task.id\r\n result = updateSessionInfo( self.genre, self.session_info_out, self.\\\r\n parent_uri, post_hash,'Forum',self.task.instance_data.get('update'), Id=id)\r\n if not result['updated']:\r\n return False\r\n #page['first_version_id']=result['first_version_id']\r\n #page['id'] = result['id']\r\n page['path']=[self.parent_uri]\r\n page['parent_path']=[]\r\n page['uri'] = self.currenturi\r\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])\r\n page['priority']=self.task.priority\r\n page['level']=self.task.level\r\n page['pickup_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\r\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\r\n page['connector_instance_id'] = self.task.connector_instance_id\r\n page['workspace_id'] = self.task.workspace_id\r\n page['client_id'] = self.task.client_id\r\n page['client_name'] = self.task.client_name\r\n page['last_updated_time'] = page['pickup_date']\r\n page['versioned'] = False\r\n page['data'] = ''\r\n page['task_log_id']=self.task.id\r\n page['entity'] = 'Post'\r\n page['category']=self.task.instance_data.get('category','')\r\n self.pages.append(page)\r\n log.info(page)\r\n log.info(self.log_msg('Parent Page added'))\r\n return True\r\n except :\r\n log.exception(self.log_msg(\"parent post couldn't be parsed\"))\r\n return False", "def get_page(self):\n self.browser.get(self.url)", "def get(self):\n self.render_improper_access()", "def treat_page(self):\n # Load the page\n text = self.current_page.text\n\n # Munge!\n for program in self.getOption('filters'):\n text = self.pipe(program, text)\n\n # only save if something was changed\n self.put_current(text)", "def __next_page(self):\n self.current_page = self.current_page + 1\n tree = ET.parse(urlopen(self.url + '&start=' + str(self.current_page)))\n self.iterator = tree.iterfind(self.GLOBAL_NP + 'entry')", "def serve(self, request, *args, **kwargs):\n return Page.serve(self, request, *args, **kwargs)", "def ninth_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.recorded_response.wait_for_page()", "def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()", "def main_page(self):\n choice = \"\"\n while choice != \"x\":\n header, main_menu, choices, underline = self.__get_format.main_menu_format()\n choice = self.__main_menu.main_page(header,main_menu,choices,underline)\n if choice == \"1\":\n self.__rent_controller.Rent_page()\n elif choice == \"2\":\n try_again = \"\"\n while try_again != \"n\":\n try_again, valid = self.__salesman_controller.sign_in_page()\n if valid == True:\n self.__salesman_controller.salesman_menu()\n elif choice == \"3\":\n self.__order_controller.find_order_process(page=2)\n elif choice == \"i\":\n self.__information_controller.information_page()", "def get(self, request):\n pass", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # We found the template we were looking for\n if template.replace(u'_', u' ')==self.templateTitle:\n for field, value in fielddict.items():\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))\n #TODO FIXME: This is a very crude way of dupe checking\n else:\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))\n item.addClaim(claim)\n if self.source:\n claim.addSource(self.source, bot=True)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))", "def _pre_dispatch(self, request, *args, **kwargs):\n pass", "def thread_refresh(self):\n\n self.pageNumber.set_text(str(self.page))\n self.pagePart.set_text(str(self.part))\n self.pageImage.set_from_file(self.webclient.loading)\n\n file = self.webclient.fetch(self.page, self.part)\n self.map = ()\n if file != self.webclient.default:\n self.map = self.webclient.fetch_map(self.page, self.part)\n self.pageImage.set_from_file(file)\n self.enable_controls(True)", "def request(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def start_requests(self):\n self.spider = Base_Spider(LpCfg)\n self.first_url = 'https://www.liepin.com/zhaopin/' \\\n '?industries=&dqs=010&salary=15%2440' \\\n '&jobKind=2&pubTime=3&compkind=&compscale=' \\\n '&industryType=&searchType=1&clean_condition=' \\\n '&isAnalysis=&init=1&sortFlag=15&flushckid=1' \\\n '&fromSearchBtn=2&headckid=0b5a9690a5cb1d82&key=Python'\n urls = []\n s = self.spider.get_content(self.first_url)\n self.cookies = self.spider.session.cookies.get_dict()\n del s\n self.spider.headers.update({'Cookie': self.cookies})\n for page in range(1,5):\n url = self.first_url + '&curPage=%d'%page\n urls.append(url)\n for url in urls:\n print url\n yield scrapy.Request(url=url,\n callback=self.parse,\n headers=self.spider.headers,\n cookies=self.cookies\n )", "def request_html_page(self):\n try:\n response = requests.get('http://www.indeed.com/jobs?', params=self.payload)\n except:\n print \"got error for \", self.payload\n self.page = response.content", "def page11(self):\n self.token_target = \\\n 'Request'\n self.token_mode = \\\n 'Reset'\n result = request1101.GET('/clusterinfo-web/controller' +\n '?target=' +\n self.token_target +\n '&mode=' +\n self.token_mode)\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result", "def objects(self):\n catalog = getToolByName(self.context, \"portal_catalog\")\n query = {}\n utils = getToolByName(self.context, \"plone_utils\")\n query[\"portal_type\"] = utils.getUserFriendlyTypes()\n registry = getUtility(IRegistry)\n typesUseViewActionInListings = frozenset(\n registry.get(\"plone.types_use_view_action_in_listings\", [])\n )\n\n is_plone_site_root = IPloneSiteRoot.providedBy(self.context)\n if not is_plone_site_root:\n query[\"path\"] = \"/\".join(self.context.getPhysicalPath())\n\n query[\"is_default_page\"] = True\n default_page_modified = OOBTree()\n for item in catalog.searchResults(query):\n key = item.getURL().rsplit(\"/\", 1)[0]\n value = (item.modified.micros(), item.modified.ISO8601())\n default_page_modified[key] = value\n\n # The plone site root is not catalogued.\n if is_plone_site_root:\n loc = self.context.absolute_url()\n date = self.context.modified()\n # Comparison must be on GMT value\n modified = (date.micros(), date.ISO8601())\n default_modified = default_page_modified.get(loc, None)\n if default_modified is not None:\n modified = max(modified, default_modified)\n lastmod = modified[1]\n yield {\n \"loc\": loc,\n \"lastmod\": lastmod,\n # 'changefreq': 'always',\n # hourly/daily/weekly/monthly/yearly/never\n # 'prioriy': 0.5, # 0.0 to 1.0\n }\n\n query[\"is_default_page\"] = False\n for item in catalog.searchResults(query):\n loc = item.getURL()\n date = item.modified\n # Comparison must be on GMT value\n modified = (date.micros(), date.ISO8601())\n default_modified = default_page_modified.get(loc, None)\n if default_modified is not None:\n modified = max(modified, default_modified)\n lastmod = modified[1]\n if item.portal_type in typesUseViewActionInListings:\n loc += \"/view\"\n yield {\n \"loc\": loc,\n \"lastmod\": lastmod,\n # 'changefreq': 'always',\n # hourly/daily/weekly/monthly/yearly/never\n # 'prioriy': 0.5, # 0.0 to 1.0\n }", "def __init__(self, session, method):\n self._fetch = functools.partial(session.request, method)", "def process (self, data):\n\n if data['method'] == 'GET':\n code, page = self.get(data['resource'])\n elif data['method'] == 'PUT':\n code, page = self.put(data['resource'], data['body'])\n elif data['method'] == 'POST':\n code, page = self.post(data['resource'], data['body'])\n else:\n code, page = \"405 Method not allowed\",\\\n PAGE_NOT_ALLOWED.format(method=data['method'])\n return (code, page)", "async def _init_page(self, page: Page) -> None:\r\n self.pages[page] = {\r\n 'id': str(uuid4()),\r\n 'is_idle': False\r\n }\r\n # add custom settings to page.\r\n await self._add_page_settings(page)\r\n # add page to idle queue.\r\n await self.set_idle(page)\r\n # start task to periodically check page idle status.\r\n asyncio.create_task(\r\n self._check_idle_status(page))" ]
[ "0.7065218", "0.6655661", "0.6454575", "0.6180861", "0.6127758", "0.59273225", "0.58338803", "0.5830356", "0.579896", "0.57571733", "0.5753007", "0.56619835", "0.5652793", "0.557103", "0.5565266", "0.55646205", "0.5558261", "0.5553076", "0.5551274", "0.5549091", "0.552509", "0.55143917", "0.5492689", "0.5440828", "0.5408295", "0.54057807", "0.536175", "0.53534955", "0.53296214", "0.53133655", "0.53112185", "0.52729195", "0.5267506", "0.5262926", "0.52563494", "0.5251468", "0.5250428", "0.5250428", "0.52225155", "0.5220632", "0.52113837", "0.52098054", "0.520105", "0.5186231", "0.51819235", "0.5180053", "0.51774806", "0.5174585", "0.5159844", "0.51498455", "0.5145435", "0.5140168", "0.51295537", "0.51288337", "0.5123739", "0.51219404", "0.51081806", "0.51081806", "0.5106136", "0.5094427", "0.5079456", "0.507915", "0.507648", "0.5075521", "0.50726765", "0.50680864", "0.50669104", "0.5051236", "0.50498223", "0.5046307", "0.5040944", "0.50357884", "0.50339013", "0.50298667", "0.50294816", "0.50279206", "0.5027103", "0.50258887", "0.50240785", "0.5023798", "0.5015804", "0.5005361", "0.50029707", "0.50014234", "0.5001052", "0.4997782", "0.49930033", "0.49926832", "0.4991125", "0.49888253", "0.4984667", "0.49815384", "0.49760392", "0.4973786", "0.49727753", "0.49698082", "0.49670365", "0.4966283", "0.49658605", "0.49635944", "0.49540496" ]
0.0
-1
Checks if an object is a number. That is, a ``float`` or an ``int``. Where this differs from simply checking if an object is an instance of ``numbers.Number`` is that boolean objects are also a number by that measure (which, technically,
def is_numeric(number): if isinstance(number, bool): return False elif isinstance(number, int) or isinstance(number, float): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isNumeric(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number)", "def isNumeric(obj):\n return isinstance(obj, (int, float, bool))", "def is_numeric(obj):\n return isinstance(obj, (int, float, complex))", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def is_number(n):\n return isinstance(n, (int, float))", "def is_number(value):\n\n return isinstance(value, (int, long, float))", "def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def is_number(obj):\n try:\n complex(obj) # for int, long, float and complex\n except ValueError:\n return False\n\n return True", "def isNumber(x):\n return isinstance(x, (int, float))", "def ISNUMBER(value):\n return isinstance(value, numbers.Number)", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def isNumber(x):\n\treturn type(x) in [int, float]", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def is_number(self) -> bool:\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def has_numeric_type(obj: _std_typing.Any) -> bool:\n return (not has_vector_type(obj)) and (not has_string_type(obj))", "def check_value_is_number_type(value):\n if not isinstance(value, Number):\n raise TypeError(\"Value must be a Number type.\")", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_number(value, allow_bool=False):\n if isinstance(value, bool):\n return allow_bool\n return isinstance(value, _Number)", "def is_numeric(space, w_obj):\n if w_obj.tp in [space.tp_float, space.tp_int]:\n return space.w_True\n if w_obj.tp == space.tp_str:\n return space.newbool(w_obj.is_really_valid_number(space))\n return space.w_False", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(number):\n if type(number) == type(1) or type(number) == type(0.1) or type(number) == type('') or type(u''):\n try:\n float(number)\n return True\n except ValueError:\n return False\n except TypeError:\n return False\n else:\n return False", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def is_float_or_int(value):\n if type(value) is float:\n return True\n elif type(value) is int:\n return True\n else:\n return False", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False", "def isnumeric(self):\n return isnumeric(self)", "def is_number_type(self):\n raise exceptions.NotImplementedError()", "def is_numerable(self):\n return (self.is_unknown or self.is_byte or self.is_word\n or self.is_dword or self.is_qword)", "def _isnumber(string):\n if not _isconvertible(float, string):\n return False\n elif isinstance(string, (str, bytes)) and (\n math.isinf(float(string)) or math.isnan(float(string))\n ):\n return string.lower() in [\"inf\", \"-inf\", \"nan\"]\n return True", "def is_integer(number: float):\n\tif number.is_integer():\n\t\treturn int(number)\n\treturn number", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def isgoodnum(n):\n return (not isinstance(n,bool)) and isinstance(n,(int,float))", "def is_number(G):\n return True", "def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False", "def is_number(symbol):\n return isa(symbol, complex) or is_rational(symbol)", "def isNumber(self, s):\n try:\n tmp = float(s)\n return True\n except:\n return False", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def is_number(entity: Any) -> Tuple[bool, Optional[float]]:\n try:\n number: float = float(entity)\n return True, number\n except ValueError:\n return False, None", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_numeric(self) -> bool:\n return False", "def check_for_float_and_int(check):", "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def value_is_float_not_int(value):\n # this is klugy and only needed to display deprecation warnings\n try:\n int(value)\n return False\n except ValueError:\n try:\n float(value)\n return True\n except ValueError:\n return False\n except TypeError:\n return False", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_numeric (self) :\n\n return self.__isnumeric__", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumber(val):\n try:\n _ = complex(val)\n return True\n except TypeError:\n return False", "def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False", "def is_num(n):\n return '{} is a number'.format(n)", "def _is_number(self, symbol):\n if symbol.type == self.scanner.NUMBER:\n return True\n else:\n return False", "def _usable_number(self, num):\n real = isinstance(num, numbers.Real)\n non_nan = not numpy.isnan(num)\n non_bool = not (num is True or num is False)\n return real and non_nan and non_bool", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_natural_number(x):\n if not is_number(x):\n return False\n else:\n if isinstance(x, int):\n return True\n elif isinstance(x, float):\n return x.is_integer()\n else:\n raise ValueError(\"Error. Not covered all number types\")", "def not_a_num(val):\n if math.isnan(val):\n return False\n else:\n return True", "def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def isNumber(self):\n return _libsbml.ASTNode_isNumber(self)", "def is_decimal(obj):\n\n return isinstance(obj, Decimal)", "def _is_non_negative_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item >= 0", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumber(self, *args):\n return _libsbml.ASTBasePlugin_isNumber(self, *args)", "def is_number(value):\n try:\n float(value.replace(',', ''))\n except ValueError:\n return False\n return True", "def _is_positive_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item > 0", "def is_numberish(G):\n return True", "def is_valid_real_number(string_object: str):\n try:\n return float(string_object)\n except ValueError:\n return False", "def isNumber(number):\n try:\n # Try to cast the string\n int(number)\n # The cast was successful\n return True\n # The cast was unsuccessful, the string is not a number\n except ValueError as err:\n # Write the exception in logging\n logging.exception(str(err))\n return False", "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def is_integer(x):\r\n if isinstance(x, float):\r\n return x == int(x)\r\n else:\r\n raise TypeError, \"Input float\"", "def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True", "def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False", "def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False", "def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def IsNumeric(text):\n try:\n _ = float(text)\n except ValueError:\n return 0\n else:\n return 1", "def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def isrealnum(variable):\n return bool(math.isfinite(variable))", "def is_float(possible_number):\r\n try:\r\n float(possible_number)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def isScalar(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)" ]
[ "0.84229404", "0.8354426", "0.80287915", "0.8011571", "0.79731923", "0.7880432", "0.78278196", "0.78255373", "0.7822248", "0.7775982", "0.7771816", "0.76862437", "0.7623797", "0.76032704", "0.7492383", "0.74228925", "0.7375729", "0.7342082", "0.73297185", "0.7325915", "0.730338", "0.7261604", "0.72465223", "0.7229434", "0.72052276", "0.7185906", "0.71531904", "0.7136376", "0.707353", "0.7044746", "0.7038204", "0.70338625", "0.7012538", "0.70034873", "0.6987274", "0.6987274", "0.6987274", "0.6971061", "0.6943244", "0.69374263", "0.68962115", "0.6894735", "0.6879336", "0.6861191", "0.6855105", "0.6750014", "0.67107505", "0.6707727", "0.6705374", "0.667567", "0.6670345", "0.66379744", "0.6627456", "0.66231877", "0.6621681", "0.6618168", "0.6614887", "0.6612275", "0.65996283", "0.6592815", "0.65897673", "0.6584801", "0.6583291", "0.6553234", "0.6536214", "0.6535775", "0.6533468", "0.6521088", "0.6485139", "0.6477163", "0.6466072", "0.64205253", "0.6405785", "0.6402701", "0.6390124", "0.6383202", "0.6381811", "0.6361944", "0.63615745", "0.63396454", "0.63317597", "0.6277482", "0.62549484", "0.62332344", "0.62287813", "0.62151086", "0.62136334", "0.6212376", "0.62123126", "0.62118125", "0.6204139", "0.6194049", "0.61832845", "0.61705345", "0.61361986", "0.6136192", "0.6096367", "0.6094028", "0.6086511", "0.6079501" ]
0.75077426
14
Checks if all the arguments it receives are numeric (according to
def are_numeric(*values): for value in values: if not is_numeric(value): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_arguments(arguments):\n quit = False\n for argument, value in vars(arguments).items():\n try:\n float(value)\n except:\n print(\"{} must be numeric\".format(argument))\n quit = True\n if quit:\n exit(1)", "def check_for_float_and_int(check):", "def number_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Number):\n name = type(var).__name__\n raise DigitError(\n 'Function {} expected number, {} got instead.'.format(func, name))", "def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def isNumeric(obj):\n return isinstance(obj, (int, float, bool))", "def _check_args(self):\n if not isinstance(self.digits, str):\n raise TypeError('digits must be of type string.')\n if isinstance(self.n_points, float):\n self.n_points = int(self.n_points)\n if not isinstance(self.n_points, int):\n raise TypeError('n_points must be of type integer.')\n if self.n_points < 0:\n raise ValueError('n_points must be positive.')", "def hasCorrectNumberArguments(self, *args):\n return _libsbml.ASTBasePlugin_hasCorrectNumberArguments(self, *args)", "def is_numeric(self) -> bool:\n return False", "def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False", "def isnumeric(self):\n return isnumeric(self)", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def numeric(*args):", "def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def is_numeric(obj):\n return isinstance(obj, (int, float, complex))", "def __verify_numeric(self, action, value):\n if action != \"1\": # if the action is anything other than inserting:\n return True\n try:\n return value.isnumeric()\n except ValueError:\n return False", "def isNumber(x):\n\treturn type(x) in [int, float]", "def check_for_float(check):", "def isNumber(x):\n return isinstance(x, (int, float))", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def isNumeric(self,chain):\n res = True\n try:\n int(chain)\n except:\n res = False\n return res", "def validate_numeric_annots(self):\n valid = True\n for annot_header in self.file.columns[1:]:\n annot_name = annot_header[0]\n annot_type = annot_header[1]\n column_dtype = self.file.dtypes[annot_header]\n if annot_type == \"numeric\" and column_dtype == \"object\":\n valid = False\n msg = f\"Numeric annotation, {annot_name}, contains non-numeric data (or unidentified NA values)\"\n self.store_validation_issue(\n \"error\", msg, \"content:invalid-type:not-numeric\"\n )\n return valid", "def is_int(*args): \n try:\n for i in args:\n int(i)\n return True\n except Exception:\n return False", "def is_numeric(number):\n\n if isinstance(number, bool):\n return False\n elif isinstance(number, int) or isinstance(number, float):\n return True\n else:\n return False", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg", "def is_numeric (self) :\n\n return self.__isnumeric__", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.check_numeric_columns,\n expected_arguments=[\"self\", \"X\"],\n expected_default_values=None,\n )", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_numeric(space, w_obj):\n if w_obj.tp in [space.tp_float, space.tp_int]:\n return space.w_True\n if w_obj.tp == space.tp_str:\n return space.newbool(w_obj.is_really_valid_number(space))\n return space.w_False", "def isNumeric(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number)", "def are_all_numbers(values: List[Union[str, int, float]]):\n for value in values:\n if not is_number(value):\n return False\n return True", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def isgoodnum(n):\n return (not isinstance(n,bool)) and isinstance(n,(int,float))", "def hasCorrectNumberArguments(self):\n return _libsbml.ASTNode_hasCorrectNumberArguments(self)", "def isNumeric(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64 or col.dtype == np.float32 or col.dtype == np.float64", "def is_numeric(s):\n \n if s == False or s == None or s == \"\" or s == True:\n return False\n \n try:\n float(s)\n return True\n except (ValueError, TypeError):\n return False", "def check_if_number(list):\n for item in list:\n try:\n float(item)\n except ValueError as e:\n print WrongTypePointError(item)\n sys.exit()", "def has_numeric_type(obj: _std_typing.Any) -> bool:\n return (not has_vector_type(obj)) and (not has_string_type(obj))", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def check_number_operands(operator: loxtoken.Token, op1: Any, op2: Any = None) -> bool:\n if op2 is None:\n if isinstance(op1, float):\n return True\n raise_error(LoxRuntimeError, operator, \"Operand must be a number.\")\n else:\n if isinstance(op1, float) and isinstance(op2, float):\n return True\n raise_error(LoxRuntimeError, operator, \"Both operands must be a number.\")\n return False", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def int_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Integral):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected integral number, {} got instead.'.format(func, name))", "def IsNumeric(text):\n try:\n _ = float(text)\n except ValueError:\n return 0\n else:\n return 1", "def test_star_args():\n\n @type_checked\n def _run_test(wat:int, *args:float, **kwargs:str):\n assert wat == 0\n for arg in args:\n assert isinstance(arg, float)\n assert len(args) == 4\n for _, value in kwargs.items():\n assert isinstance(value, str)\n\n _run_test(False, False, True, 14, \"10.2\", foo=False, bar=17, ok=None)", "def isNumber(txt):\r\n if not isinstance(txt, str) or len(txt)==0:\r\n return \"error: isNumber\"\r\n # --- YOU CODE STARTS HERE\r\n else: \r\n try: \r\n m = float(txt)\r\n return True\r\n except ValueError: \r\n return False", "def check_for_int(check):", "def isNumber(num):\n try:\n abs(num)\n return True\n except:\n return False", "def is_number(n):\n return isinstance(n, (int, float))", "def isNumber(self, *args):\n return _libsbml.ASTBasePlugin_isNumber(self, *args)", "def isnumeric(a):\n if not _is_unicode(a):\n raise TypeError(\"isnumeric is only available for Unicode strings and arrays\")\n return _vec_string(a, bool_, 'isnumeric')", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True", "def is_number(self) -> bool:\n return False", "def is_number(G):\n return True", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def __check_args_type(self):\n if not isinstance(self.__num_prev_scans, int):\n error_msg = \"num_prev_scans must of type 'int', but given '\"\n error_msg += str(type(self.__num_prev_scans))+ \"'\"\n raise TypeError(error_msg)\n\n if isinstance(self.__num_prev_scans, bool):\n error_msg = \"num_prev_scans must of type 'int', but given '\"\n error_msg += str(type(self.__num_prev_scans))+ \"'\"\n raise TypeError(error_msg)", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "async def process_args(self, context: MessageContext, *args: str) -> bool:\n\n output = []\n\n for datum in args:\n datum = datum.strip().strip(\",\")\n\n if datum.isnumeric():\n output.append(math.floor(int(datum) / 8))\n\n if not output:\n return False\n\n await context.reply_all(\n f\"Nether Location: {', '.join([str(x) for x in output])}\"\n )\n\n return True", "def _check_args(self, args_):\n\n pass", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\"", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X >= 0).all() & isinteger(\n X\n ), \"x should be greater or equal to 0 and integer.\"", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def validate_args(*args: Any) -> bool:\n\n return len(args) == 4 and Item.validate_price(args[2]) and Entity.validate_discount(args[3])", "def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)", "def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))", "def is_number(value):\n\n return isinstance(value, (int, long, float))", "def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def is_numberish(G):\n return True", "def valid_args(args):\n return args is not None and len(args) > 0", "def is_numerable(self):\n return (self.is_unknown or self.is_byte or self.is_word\n or self.is_dword or self.is_qword)", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_numeric(rows, col):\n return rows.dtypes.values[col] in numerics", "def must_be_numeric(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return pd.isna(pd.to_numeric(str(cell), errors=\"coerce\"))", "def validateNumericInput(input_param):\n if input_param.isnumeric():\n return input_param\n else:\n raise ValueError(\"One or more input parameter(s) are non numeric\")", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def __check_args_type(self):\n if not isinstance(self.__min_range, (float, int)):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif not isinstance(self.__max_range, (float, int)):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)\n\n if isinstance(self.__min_range, bool):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif isinstance(self.__max_range, bool):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def validate_numeric(column_name, value, column_data_type=\"numeric\"):\n valid = value.isnumeric()\n if not valid:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)\n return None", "def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True", "def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False", "def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False", "def check_numeric(data, col):\n from pandas.api.types import is_numeric_dtype\n try:\n if is_numeric_dtype(data[col]):\n logging.info(f' {col} is numeric.')\n return data\n else:\n numdata = (data\n .drop([col], axis=1)\n .join(data[col].apply(pandas.to_numeric, errors='coerce'))\n )\n numcol = numdata[col].isnull().values().sum()\n logging.warning(f' %s rows in %s are non-numeric' % (numcol, col,))\n logging.warning(f' {col} is tested by coercing into numeric values.')\n return numdata\n except:\n logging.error(f' the format of %s is not testable.' % (col,))\n print(data.head(n=2))\n sys.exit(1)", "def _is_arithmetic(self, words):\n if words[0] in ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not']:\n if len(words) != 1:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_ARITHMETIC command.\".format(self._file_line))\n return True\n else:\n return False", "def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False", "def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return", "def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])" ]
[ "0.79766375", "0.72621953", "0.7188788", "0.7124984", "0.7036451", "0.7026137", "0.6995436", "0.69920516", "0.6922569", "0.6913737", "0.68856776", "0.68640095", "0.68215996", "0.67944366", "0.6732328", "0.6717718", "0.6717057", "0.6711378", "0.664611", "0.6630572", "0.6597417", "0.6569322", "0.6494181", "0.64835775", "0.6482341", "0.6480279", "0.64764017", "0.64741606", "0.645952", "0.6457773", "0.64540595", "0.6414762", "0.63902265", "0.63599515", "0.63557607", "0.63356745", "0.63314563", "0.63281894", "0.6314842", "0.630463", "0.63037133", "0.6278389", "0.6262018", "0.6261348", "0.62367886", "0.621473", "0.62120235", "0.62117743", "0.62117743", "0.61904895", "0.6178708", "0.61642635", "0.6149765", "0.6146786", "0.6144032", "0.6120624", "0.6091479", "0.60846597", "0.60844463", "0.60833186", "0.6078929", "0.6078425", "0.606093", "0.60596895", "0.6054126", "0.6054126", "0.6022069", "0.6018216", "0.6015258", "0.6013543", "0.60047245", "0.59999806", "0.5996427", "0.5982263", "0.59818715", "0.59798265", "0.5955163", "0.59523714", "0.59512925", "0.5950196", "0.5948221", "0.594005", "0.59399927", "0.5939797", "0.59285253", "0.59242547", "0.59182894", "0.5918189", "0.59166294", "0.5910508", "0.5888219", "0.58756626", "0.58621037", "0.58598024", "0.58570176", "0.5845672", "0.58343863", "0.5833386", "0.5828186", "0.5826427" ]
0.7544063
1
Drop the unit definition silently
def _drop_units(q): try: return q.magnitude except: return q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeUnitDefinition(self, *args):\n return _libsbml.Model_removeUnitDefinition(self, *args)", "def unsetName(self):\n return _libsbml.UnitDefinition_unsetName(self)", "def unsetKind(self):\n return _libsbml.Unit_unsetKind(self)", "def removeUnit(self, *args):\n return _libsbml.UnitDefinition_removeUnit(self, *args)", "def _remove_unit(unit_name):\n zaza.model.destroy_unit(\"ovn-central\", unit_name)\n zaza.model.block_until_all_units_idle()\n zaza.model.wait_for_application_states()", "def _drop_units(q):\n try:\n return q.value\n except AttributeError:\n try:\n return q.value\n except AttributeError:\n return q", "def drop(self):\n pass", "def drop(self):\n pass", "def unsetUnits(self):\n return _libsbml.Species_unsetUnits(self)", "def unsetUnits(self):\n return _libsbml.Rule_unsetUnits(self)", "def unsetUnits(self):\n return _libsbml.ASTNode_unsetUnits(self)", "def unsetUnits(self):\n return _libsbml.Compartment_unsetUnits(self)", "def remove(self, *args):\n return _libsbml.ListOfUnitDefinitions_remove(self, *args)", "def unsetUnits(self):\n return _libsbml.Parameter_unsetUnits(self)", "def _strip_optunit(thing, unit):\n if u.is_quantity(thing):\n return thing.value_in_unit(unit)\n return thing", "async def on_unit_destroyed(self, unit_tag):", "def test_undefine(self):\n self.assertEqual(['undef', 'test'],\n grammar._UNDEFINE.parseString(\"#undef test\").asList())", "def unsetUnitRef(self):\n return _libsbml.SBaseRef_unsetUnitRef(self)", "def untargeted(self):\n\t\tpass", "def test_component_specification_lifetime_non_declaration(self):\r\n\t\tself.assertTrue(self._configuration_[\"RemoveWordDefinitionTask\"].lifetime() == \"\")", "def UnitDefinition_simplify(*args):\n return _libsbml.UnitDefinition_simplify(*args)", "def _remove_definition(self, definition):\n global_callback._call_library_remove_definition(self, definition)\n definition._library = None", "def test_incomplete():\n test_file = os.path.join(DATA_DIR, 'incomplete.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['finished'] == 1 # not finished\n assert info['energy'] is None # energy in eV\n assert info['k'] == '12x12x12' # Monkhorst-Pack net\n assert not info['ncycles']", "def createUnitDefinition(self):\n return _libsbml.Model_createUnitDefinition(self)", "def remove_definition(self, definition):\n assert definition.library == self, \"definition is not included in library\"\n self._remove_definition(definition)\n self._definitions.remove(definition)", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_remove_workflow_definition(self):\n pass", "def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)", "def tearDown(self):\n super(LabForUser, self).tearDown()\n if self.compound is not None:\n self.compound.delete()", "def _removeSpecs(self):\n self.specGenerator.removeSpecs()", "def consume_units_unconditionally(self, units):\n pass", "def suppressMessages():\n dislin.unit(0)", "def createUnit(self):\n return _libsbml.UnitDefinition_createUnit(self)", "def close_unit(self):\n self.disconnect()", "def getUnitDefinition(self, *args):\n return _libsbml.Model_getUnitDefinition(self, *args)", "def clone(self):\n return _libsbml.UnitDefinition_clone(self)", "def unsetTimeUnits(self):\n return _libsbml.Model_unsetTimeUnits(self)", "def drop(self):\n self.id = None", "def drop(self,title):\n\n if self.enabled:\n del self.timers[title]", "def unsetAreaUnits(self):\n return _libsbml.Model_unsetAreaUnits(self)", "def eliminate_unit_productions(self) -> \"CFG\":\n unit_pairs = self.get_unit_pairs()\n productions = [x\n for x in self._productions\n if len(x.body) != 1\n or not isinstance(x.body[0], Variable)]\n productions_d = get_productions_d(productions)\n for var_a, var_b in unit_pairs:\n for production in productions_d.get(var_b, []):\n productions.append(Production(var_a, production.body,\n filtering=False))\n return CFG(self._variables,\n self._terminals,\n self._start_symbol,\n productions)", "def unsetOffset(self):\n return _libsbml.Unit_unsetOffset(self)", "def none(self):", "def test_drop(self):\n state = generate_valid_state(np.array([\n [1, 1, 0, 1, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n ], dtype=np.uint8))\n field = Field.create(state)\n self.assertIsNotNone(field)\n\n lines_cleared = field.drop(Tetromino.JTetromino(), 0)\n self.assertEqual(lines_cleared, 0)\n expected_field = Field.create(generate_valid_state(np.array([\n [6, 6, 6, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 6, 1, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n ])))\n self.assertFieldsEqual(field, expected_field)\n\n lines_cleared = field.drop(\n Tetromino.TTetromino().rotate_right(), 8)\n self.assertEqual(lines_cleared, 1)\n expected_field = Field.create(generate_valid_state(np.array([\n [6, 6, 6, 0, 0, 0, 0, 0, 0, 3],\n [1, 1, 6, 1, 1, 0, 1, 1, 3, 3],\n ])))\n self.assertFieldsEqual(field, expected_field)\n\n field.drop(Tetromino.OTetromino(), 3)\n field.drop(Tetromino.ZTetromino(), 6)\n field.drop(Tetromino.JTetromino().flip(), 0)\n field.drop(Tetromino.OTetromino(), 8)\n expected_field = Field.create(generate_valid_state(np.array([\n [6, 0, 0, 0, 0, 0, 0, 0, 2, 2],\n [6, 6, 6, 2, 2, 0, 5, 5, 2, 2],\n [6, 6, 6, 2, 2, 0, 0, 5, 5, 3],\n [1, 1, 6, 1, 1, 0, 1, 1, 3, 3],\n ])))\n self.assertFieldsEqual(field, expected_field)\n lines_cleared = field.drop(Tetromino.ITetromino().rotate_right(), 5)\n self.assertEqual(lines_cleared, 2)\n expected_field = Field.create(generate_valid_state(np.array([\n [6, 0, 0, 0, 0, 1, 0, 0, 2, 2],\n [6, 6, 6, 2, 2, 1, 0, 5, 5, 3],\n ])))", "def test_remove_workflow_definitions_in_job(self):\n pass", "def undef(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"undef\")\n identifier = self.match_type(Identifier)\n return UndefNode(identifier)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid undef directive.\")", "def reset_units(shared, *args):\n shared.config.remove_section('units')\n shared.config.add_section('units')\n \n return", "def unsetTimeUnits(self):\n return _libsbml.KineticLaw_unsetTimeUnits(self)", "def test_unit_definitions(self):\n st = State(\"water\")\n props = st._all_props.union(st._read_only_props) - {\"phase\"} # type: ignore\n assert all([a in st._SI_units.keys() for a in props]) # type: ignore", "def unreturnbank(self):\n pass", "def delete(self, unit_type, unit_name, variation_name=None):\n if unit_type == pu.UnitType.alias:\n relevant_dict = self.alias_definitions\n stat_key = \"#aliases\"\n elif unit_type == pu.UnitType.slot:\n relevant_dict = self.slot_definitions\n stat_key = \"#slots\"\n elif unit_type == pu.UnitType.intent:\n relevant_dict = self.intent_definitions\n stat_key = \"#intents\"\n else:\n raise ValueError(\"Tried to delete a definition with wrong type \"+\n \"(expected alias, slot or intent)\")\n\n if unit_name not in relevant_dict:\n raise KeyError(\"Couldn't find a definition for \" + unit_type.name +\n \" '\" + unit_name + \"'.\")\n\n nb_rules = relevant_dict[unit_name].get_nb_rules(variation_name)\n if variation_name is None:\n del relevant_dict[unit_name]\n self.stats[stat_key] -= 1\n self.stats[\"#declarations\"] -= 1\n self.stats[\"#rules\"] -= nb_rules\n else:\n relevant_dict[unit_name].delete_variation(variation_name)\n self.stats[\"#rules\"] -= nb_rules", "def stop_fixture(self):\n pass", "def drop_table(self):\n for ss in self.spectrae:\n ss.tau[('H',1,1215)] = np.array([0])", "def drop_units(cfg, units, name, mode, drop_cb=None, prof=None, logger=None):\n\n # blowup is only enabled on profiling\n if 'RADICAL_PILOT_PROFILE' not in os.environ:\n if logger:\n logger.debug('no profiling - no dropping')\n return units\n\n if not units:\n # if logger:\n # logger.debug('no units - no dropping')\n return units\n\n drop = cfg.get('drop', {}).get(name, {}).get(mode, 1)\n\n if drop == 0:\n # if logger:\n # logger.debug('dropped nothing')\n return units\n\n return_list = True\n if not isinstance(units, list):\n return_list = False\n units = [units]\n\n if drop == 2:\n if drop_cb:\n for unit in units:\n drop_cb(unit=unit, name=name, mode=mode, prof=prof, logger=logger)\n if logger:\n logger.debug('dropped all')\n for unit in units:\n logger.debug('dropped %s', unit['_id'])\n if return_list: return []\n else : return None\n\n if drop != 1:\n raise ValueError('drop[%s][%s] not in [0, 1, 2], but is %s' \\\n % (name, mode, drop))\n\n ret = list()\n for unit in units :\n if '.clone_' not in unit['_id']:\n ret.append(unit)\n # if logger:\n # logger.debug('dropped not %s', unit['_id'])\n else:\n if drop_cb:\n drop_cb(unit=unit, name=name, mode=mode, prof=prof, logger=logger)\n if logger:\n logger.debug('dropped %s', unit['_id'])\n\n if return_list: \n return ret\n else: \n if ret: return ret[0]\n else : return None", "def test_deblaze():\n\n spec = IGRINSSpectrum(file=file)\n\n new_spec = spec.remove_nans().deblaze()\n\n assert new_spec is not None\n assert isinstance(new_spec, Spectrum1D)", "def simplify(*args):\n return _libsbml.UnitDefinition_simplify(*args)", "def drop(self):\n\t\tdrop_model(self.name, self.cursor, print_info = False)", "def drop(self):\n for step in self.steps:\n step[1].drop()", "def unsetTimeUnits(self):\n return _libsbml.Event_unsetTimeUnits(self)", "def test_remove_a_single_attribute(self):\n pass", "def drop_non_wind(ds):\n return ds.drop(['fdir', 't2m', 'ssrd'])", "def unload_fmu(self):\n del self.fmu", "def test_unsupported_units(self):\n with pytest.raises(TypeError):\n set_default_units(\"bad\")\n with pytest.raises(TypeError):\n State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"bad\")", "def test_untar(self):", "def unit_of_measurement(self):\n return None", "def untuck(self):\n self.move_to_neutral()", "def teardown_class(self):\n\n file_list = \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n '*.log')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.dat')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.png')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'monol_test_fake*.evt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'bubu*'))\n for f in file_list:\n print(\"Removing \" + f)\n os.remove(f)", "def unsetLengthUnits(self):\n return _libsbml.Model_unsetLengthUnits(self)", "def tearDownClass(cls):\n del cls.my_model.name\n del cls.my_model.my_number\n del cls.my_model_json\n del cls.my_new_model\n del cls.my_model", "def test_standard_unit_class_not_set(self) -> None:\n # Arrange.\n MyType.clear_interning_cache()\n # Make it look like we have no standard unit for this type.\n MyType._STANDARD_UNIT_CLASS = None\n\n # Create a new instance.\n my_type = MyType.decorate(MyUnit)\n\n # Act and assert.\n with pytest.raises(UnitError, match=\"no standard\"):\n my_type.standard_unit_class()", "def tearDown(self): # pylint: disable=invalid-name\n self.hass.stop()\n entity.Entity.overwrite_attribute(self.entity.entity_id,\n [ATTR_HIDDEN], [None])", "def tearDownClass(self):\n remove('temp_mol_file.csv')", "def suppress(self):\n pass", "def remove_assumption(g, l = 0):\n global simulator\n if simulator is None:\n print \"program is not running\"\n return\n try:\n simulator.remove_assumption(g,l)\n except:\n simulation_error()", "def undef(self, varName:str, varType:str) -> None:\n undef_str = 'undef(\"{varName}\", {varType})'\n fmtDict = dict()\n fmtDict['varName'] = varName\n fmtDict['varType'] = varType\n self.run(undef_str.format(**fmtDict).strip())", "def clear_specific(self):\n self.specific_file = None\n self.specific_parser = None\n\n self.specific_box.delete(0, END)", "def unpossessed(self):\r\n self.owner = None", "def getUnstructured042_special_Units(filename):\n doc = Document(filename)\n units = list()\n # all interviews start with a header\n isHeader = True\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n # ensure it is not an empty line\n if hasText:\n\n \n units.append({'unit':paragraph})\n return units", "def test_drop_table(self):\n schema: t.List[DiffableTable] = []\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(class_name=\"Band\", tablename=\"band\", columns=[])\n ]\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_tables.statements) == 1)\n self.assertEqual(\n schema_differ.drop_tables.statements[0],\n \"manager.drop_table(class_name='Band', tablename='band')\",\n )", "def tearDownClass(cls):\n cls.runModule(\"g.remove\", flags=\"rf\", type=\"vector\",\n name=\"gbif_poa3\")\n cls.del_temp_region()", "def unsetSubstanceUnits(self):\n return _libsbml.Model_unsetSubstanceUnits(self)", "def unsetSubstanceUnits(self):\n return _libsbml.Species_unsetSubstanceUnits(self)", "def disarm(self):\n pass", "def test_undelete_derived_metric(self):\n pass", "def undefAll(self) -> None:\n self.run(\"undef all\")", "def createUnit(self):\n return _libsbml.Model_createUnit(self)", "def no_ast(f):\n delattr(f, 'ast')\n return f", "def __delitem__(name):", "def unset_current_units(self, utype):\n try:\n cunits = self._saved_units[utype]\n except KeyError:\n raise Exception(\"Units to restore not found\")\n \n if utype in self.allowed_utypes:\n if cunits in self.units[utype]:\n self.current_units[utype] = cunits\n else:\n raise Exception(\"Unknown units of %s\" % utype)\n else:\n raise Exception(\"Unknown type of units\")", "def remove_units(namespace):\n res = copy(namespace)\n for label, value in res.__dict__.items():\n if isinstance(value, pd.Series):\n value = remove_units_series(value)\n res.__dict__[label] = magnitude(value)\n return res", "def test_load_do_not_convert_non_quantity_strings(self):\n sage = ForceField(\"openff-2.0.0.offxml\")\n\n for parameter_handler_name in sage.registered_parameter_handlers:\n parameter_handler = sage.get_parameter_handler(parameter_handler_name)\n\n for parameter in parameter_handler.parameters:\n assert isinstance(parameter.smirks, str)\n assert not isinstance(parameter.smirks, unit.Quantity)\n\n # Ensure that, for example, F isn't converted to Farad\n if (\n parameter_handler_name == \"LibraryCharges\"\n and parameter.name is not None\n ):\n assert isinstance(parameter.name, str)\n assert not isinstance(parameter.name, unit.Quantity)", "def help_drop(self):\n print(DROP)", "def die(self):\n self.pjs.bombermen.remove(self)\n for block in self.physics.blocks[self.stype]:\n if block == self.rects[0]:\n self.physics.blocks[self.stype].remove(block)", "def del_fits_key_unit(self, key):\n if self.get_fits_key_unit(key) is None:\n raise ValueError(f'No unit for key \"{key}\" to delete')\n kcomment = self.comments[key]\n print(kcomment)\n print(self._unit_regexp)\n kcomment = re.sub(self._unit_regexp, '', kcomment)\n self.comments[key] = kcomment\n self._cards[self._cardindex(key)].comment = kcomment", "def test_remove_taxation_strategy_from_rate_plan(self):\n pass", "def uom(self, value):\n raise TypeError(\"Cannot delete {class-name} uom property.\")", "def drop(duration=None):\n if duration is not None:\n if not isinstance(duration, tuple):\n duration = (duration, duration)\n msg = of.ofp_flow_mod()\n msg.match = of.ofp_match.from_packet(packet)\n msg.idle_timeout = duration[0]\n msg.hard_timeout = duration[1]\n msg.buffer_id = event.ofp.buffer_id\n event.connection.send(msg)\n elif event.ofp.buffer_id is not None:\n msg = of.ofp_packet_out()\n msg.buffer_id = event.ofp.buffer_id\n msg.in_port = event.port\n event.connection.send(msg)", "def undefined(self, ident, args):\n return \"\"", "def test_remove_one(self):\n pass", "def tearDown(self):\n del self.a" ]
[ "0.7016923", "0.671953", "0.65005463", "0.6406769", "0.6294587", "0.6132818", "0.61080724", "0.61080724", "0.6019801", "0.6007988", "0.5972027", "0.58067775", "0.5797811", "0.5779521", "0.5779516", "0.5762775", "0.574033", "0.57371294", "0.57115144", "0.5674466", "0.5610218", "0.5575008", "0.55471087", "0.5544347", "0.55429226", "0.5532614", "0.5529788", "0.552874", "0.5513573", "0.5482675", "0.5473916", "0.5468075", "0.54455686", "0.54249305", "0.5421762", "0.5409915", "0.5402766", "0.53976876", "0.5391365", "0.5390969", "0.5369016", "0.53594905", "0.53072464", "0.5307189", "0.53053427", "0.5293425", "0.52932674", "0.5293237", "0.5274813", "0.52674484", "0.52524775", "0.5250377", "0.52491546", "0.52489036", "0.5247225", "0.52466697", "0.524584", "0.5245727", "0.5240668", "0.5223976", "0.52214414", "0.51992744", "0.51875997", "0.5182057", "0.5181505", "0.51777726", "0.51763237", "0.51747817", "0.5171436", "0.51711327", "0.514937", "0.5147609", "0.51455295", "0.5136966", "0.51326317", "0.5128937", "0.5120498", "0.5116213", "0.51160616", "0.5113067", "0.5096348", "0.5093187", "0.50916725", "0.5091027", "0.50787896", "0.5077313", "0.50750965", "0.50686955", "0.50684917", "0.50631887", "0.50622445", "0.50587857", "0.5057697", "0.5055566", "0.5054346", "0.5050186", "0.5038189", "0.5035449", "0.50341773", "0.50322825" ]
0.61350024
5
Adapt the resolution of the spectra to match the lick definitions Lick definitions have different resolution elements as function of wavelength. These definition are hardcoded in this function
def reduce_resolution(wi, fi, fwhm0=0.55, sigma_floor=0.2): # all in AA w_lick_res = (4000., 4400., 4900., 5400., 6000.) lick_res = (11.5, 9.2, 8.4, 8.4, 9.8) # FWHM in AA w = np.asarray(wi) flux = np.atleast_2d(fi) # Linear interpolation of lick_res over w # numpy interp does constant instead of extrapolation # res = np.interp(w, w_lick_res, lick_res) # spline order: 1 linear, 2 quadratic, 3 cubic ... from scipy.interpolate import InterpolatedUnivariateSpline res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w) # Compute width from fwhm const = 2. * np.sqrt(2. * np.log(2)) # conversion fwhm --> sigma lick_sigma = np.sqrt((res ** 2 - fwhm0 ** 2)) / const # Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2)) flux_red = np.zeros(flux.shape, dtype=flux.dtype) for i, sigma in enumerate(lick_sigma): maxsigma = 3. * sigma # sampling floor: min (0.2, sigma * 0.1) delta = min(sigma_floor, sigma * 0.1) delta_wj = np.arange(-maxsigma, + maxsigma, delta) wj = delta_wj + w[i] for k, fk in enumerate(flux): fluxj = np.interp(wj, w, fk, left=0., right=0.) flux_red[k, i] = np.sum(fluxj * delta * np.exp(-0.5 * (delta_wj / sigma) ** 2)) flux_red /= lick_sigma * const return flux_red.reshape(np.shape(fi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_resolution( self ):\r\n offset = 0\r\n # if current and skinned resolutions differ and skinned resolution is not\r\n # 1080i or 720p (they have no 4:3), calculate widescreen offset\r\n if ( ( not ( self.currentResolution == self.resolution ) ) and self.resolution > 1 ):\r\n # check if current resolution is 16x9\r\n if ( self.currentResolution == 0 or self.currentResolution % 2 ): iCur16x9 = 1\r\n else: iCur16x9 = 0\r\n # check if skinned resolution is 16x9\r\n if ( self.resolution % 2 ): i16x9 = 1\r\n else: i16x9 = 0\r\n # calculate widescreen offset\r\n offset = iCur16x9 - i16x9\r\n self.win.setCoordinateResolution( self.resolution + offset )", "def change_resolution(self):", "def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def resample(wavelength, spectra, resampling_ratio):\n\n new_length = int(np.round(wavelength.size * resampling_ratio))\n spectra_, wavelength_ = scipy.signal.resample(spectra, new_length, wavelength)\n return wavelength_, spectra_", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = None # Field of view fo spectrometer\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n\n\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 5 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.5 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [320, 330] # Range of wavelengths used in checking integration time\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(1, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def coadd_spectra(spec_list_fits, out_name, scale_spectra=True,\r\n use_ratios=False, ratio_range=[4200, 4300], \r\n one_side=True):\r\n\r\n spec_list_txt = [f.replace('fits', 'txt') for f in spec_list_fits]\r\n\r\n # first spectrum in the list is always the reference spectrum\r\n hdr = pyfits.getheader(spec_list_fits[0])\r\n #mjd = hdr['MJD']\r\n #date_obs = hdr['DATE-OBS']\r\n #epoch = hdr['EPOCH']\r\n #observat = hdr['OBSERVAT']\r\n exptime = hdr['EXPTIME']\r\n seeing = hdr['FWHM']\r\n # save some keywords\r\n keys = ['OBJECT', 'OBSERVER', 'DICHROIC', 'APERTURE', 'LAMPS', 'UTSHUT', 'OBSLST', 'RA', 'DEC', 'HOURANG', 'HA', 'TELFOCUS', 'CASSPA', 'PARALLAC', 'CCDTEMP', 'ANGLE', 'GRATING', 'AIRMASS']\r\n #mjd_blue = hdr['MJD']\r\n exptime_blue = hdr['EXPTIME']\r\n hdr_save = {}\r\n for key in keys:\r\n hdr_save[key] = hdr[key]\r\n verr = np.float(hdr['VERR'])**2\r\n spec_ref = np.genfromtxt(spec_list_txt[0], names='wave, flux', \r\n dtype='f4, f4')\r\n err_ref = np.genfromtxt(spec_list_txt[0].replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n wave = spec_ref['wave']\r\n spec_ref = spec_ref['flux'].view(np.ma.masked_array)\r\n err_ref = err_ref['flux'].view(np.ma.masked_array)\r\n\r\n\r\n # err_ref['flux'] = np.where(err_ref['flux'] <= 0, 1, err_ref['flux']) # reset bad error values to 1\r\n # boolean array: mask out invalid regions so average excludes zeros\r\n bad_err = err_ref <= 0\r\n spec_ref[bad_err] = np.ma.masked\r\n err_ref[bad_err] = np.ma.masked\r\n\r\n\r\n # spectra and their errors will be stored here\r\n spectra = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n spectra_err = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n\r\n spectra[:, 0] = spec_ref\r\n spectra_err[:, 0] = err_ref\r\n\r\n ratio = [1]\r\n\r\n for i, fname in enumerate(spec_list_fits[1:]):\r\n fname_txt = spec_list_txt[i+1]\r\n hdr = pyfits.getheader(fname)\r\n exptime += hdr['EXPTIME']\r\n seeing += hdr['FWHM']\r\n verr += np.float(hdr['VERR'])**2\r\n spec = np.genfromtxt(fname_txt, names='wave, flux', dtype='f4, f4')\r\n err = np.genfromtxt(fname_txt.replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n spec = spec['flux'].view(np.ma.masked_array)\r\n err = err['flux'].view(np.ma.masked_array)\r\n # reset bad error values to 1\r\n # err['flux'] = np.where(err['flux'] <= 0, 1, err['flux']) \r\n bad_err = err <= 0\r\n spec[bad_err] = np.ma.masked\r\n err[bad_err] = np.ma.masked\r\n\r\n spectra[:, i+1] = spec\r\n spectra_err[:, i+1] = err\r\n if scale_spectra:\r\n if use_ratios:\r\n # use the specified region to determine te ratio of spectra\r\n good = np.where((spec > ratio_range[0]) & \r\n (spec < ratio_range[1]))\r\n ratio.append(np.median(spec_ref[good]/spec[good]))\r\n else:\r\n spec_good_err = err > 0\r\n # identify overlap between sides\r\n wgd = (err_ref > 0) & (err > 0)\r\n\r\n ratio.append(match_spectra_leastsq(spec[wgd], \r\n spec_ref[wgd], err[wgd], \r\n err_ref[wgd]))\r\n\r\n \r\n\r\n spec_avg, sum_weights = np.average(spectra*ratio, weights=1./(spectra_err*ratio)**2, axis=1, returned=True)\r\n spec_err = 1./np.sqrt(sum_weights)\r\n # output coadded spectra and uncertainties\r\n f = open('%s.spec.txt' % out_name, 'w')\r\n g = open('%s.err.txt' % out_name, 'w')\r\n h = open('%s.snr.txt' % out_name, 'w')\r\n # add some header keywords\r\n for key in hdr_save.keys():\r\n f.write('# %s = %s\\n' % (key, hdr_save[key]))\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n f.write('# FWHM = %.2f\\n' % float(seeing/len(spec_list_fits)))\r\n f.write('# VERR = %.2f\\n' % np.sqrt(verr))\r\n #f.write('# MJD = %.6f\\n' % (mjd + exptime/(2.*60.*60.*24.)))\r\n else:\r\n # when combining sides, use the MJD and EXPTIME from the combined blue side\r\n f.write('# EXPTIME = %.0f\\n' % exptime_blue)\r\n #f.write('# MJD = %.6f\\n' % mjd_blue)\r\n\r\n for x, y, z in zip(wave, spec_avg, spec_err):\r\n f.write('%.3f %.5g\\n' % (x, y))\r\n g.write('%.3f %.5g\\n' % (x, z))\r\n h.write('%.3f %.5g\\n' % (x, y/z))\r\n f.close()\r\n g.close()\r\n h.close()\r\n # save as 1D IRAF FITS files\r\n iraf.delete('%s.spec.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.err.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.snr.fits' % out_name, verify=\"no\")\r\n iraf.rspectext('%s.spec.txt' % out_name, '%s.spec.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.err.txt' % out_name, '%s.err.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.snr.txt' % out_name, '%s.snr.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n # add keywords\r\n f = pyfits.open('%s.spec.fits' % out_name)\r\n for key in hdr_save.keys():\r\n #f[0].header.update(key, hdr_save[key])\r\n f[0].header[key]= hdr_save[key]\r\n #f[0].header.update('DATE-OBS', date_obs)\r\n #f[0].header.update('OBSERVAT', observat)\r\n #f[0].header.update('EPOCH', epoch)\r\n #f[0].header['DATE-OBS']= date_obs\r\n #f[0].header['OBSERVAT']= observat\r\n #f[0].header['EPOCH']= epoch\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n #f[0].header.update('EXPTIME', exptime)\r\n #f[0].header.update('FWHM', seeing/len(spec_list_fits))\r\n #f[0].header.update('VERR', '%.2f' % np.sqrt(verr), 'Uncertainty in km/s')\r\n f[0].header['EXPTIME']= exptime\r\n f[0].header['FWHM']= seeing/len(spec_list_fits)\r\n f[0].header['VERR']= '%.2f' %np.sqrt(verr)\r\n #mjd += exptime/(2.*60.*60.*24.)\r\n else:\r\n # when combining sides, use the EXPTIME from the combined blue side\r\n #f[0].header.update('EXPTIME', exptime_blue)\r\n f[0].header['EXPTIME']= exptime_blue\r\n #del f[0].header['VERR'] #DaveC\r\n #f[0].header.update('MJD', np.round(mjd, decimals=6))\r\n #f[0].header['MJD']= np.round(mjd, decimals=6)\r\n\r\n f.writeto('%s.spec.fits' % out_name, clobber=True)\r\n f.close()", "def change_resolution(self, L):\n if L != self.L:\n self.L = L\n self._modes = self._get_modes(self.L)\n self._set_up()", "def setResolution(self, resolution):\n self._lowLevelSetDeviceResolution(self.ADC_RESOLUTIONS[resolution])", "def plot_spectrum(inp='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z=9.505, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, show_cont=True, draws=100, figsize=(16, 8), ranges=[(3650, 4980)], Rline=1000, full_log=False, write=False, eazy_templates=None, use_full_dispersion=True, get_spl_templates=False, scale_uncertainty_kwargs=None, plot_unit=None, spline_single=True, sys_err=0.02, return_fit_results=False, use_aper_columns=False, label=None, **kwargs):\n global SCALE_UNCERTAINTY\n \n lw, lr = utils.get_line_wavelengths()\n \n if isinstance(inp, str):\n sampler = SpectrumSampler(inp, **kwargs)\n file = inp\n elif isinstance(inp, pyfits.HDUList):\n sampler = SpectrumSampler(inp, **kwargs)\n file = None\n else:\n file = None\n sampler = inp\n \n if (label is None) & (file is not None):\n label = os.path.basename(file)\n \n spec = sampler.spec\n \n if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):\n if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):\n ap_corr = spec['aper_corr']*1\n else:\n ap_corr = 1\n \n flam = spec['aper_flux']*spec['to_flam']*ap_corr\n eflam = spec['aper_full_err']*spec['to_flam']*ap_corr\n else:\n flam = spec['flux']*spec['to_flam']\n eflam = spec['full_err']*spec['to_flam']\n \n wrest = spec['wave']/(1+z)*1.e4\n wobs = spec['wave']\n mask = spec['valid']\n \n flam[~mask] = np.nan\n eflam[~mask] = np.nan\n \n bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)\n\n # bspl = utils.bspline_templates(wave=spec['wave']*1.e4,\n # degree=3,\n # df=nspline)\n \n w0 = utils.log_zgrid([spec['wave'].min()*1.e4,\n spec['wave'].max()*1.e4], 1./Rline)\n \n templates, tline, _A = make_templates(sampler, z,\n bspl=bspl,\n eazy_templates=eazy_templates,\n vel_width=vel_width,\n scale_disp=scale_disp,\n use_full_dispersion=use_full_dispersion,\n disp=spec.disp,\n grating=spec.grating,\n **kwargs,\n )\n \n if scale_uncertainty_kwargs is not None:\n _, escl, _ = calc_uncertainty_scale(file=None,\n data=(spec, _A),\n **scale_uncertainty_kwargs)\n eflam *= escl\n spec['escale'] *= escl\n \n okt = _A[:,mask].sum(axis=1) > 0\n \n _Ax = _A[okt,:]/eflam\n _yx = flam/eflam\n \n if eazy_templates is None:\n _x = np.linalg.lstsq(_Ax[:,mask].T, \n _yx[mask], rcond=None)\n else:\n _x = nnls(_Ax[:,mask].T, _yx[mask])\n \n coeffs = np.zeros(_A.shape[0])\n coeffs[okt] = _x[0]\n \n _model = _A.T.dot(coeffs)\n _mline = _A.T.dot(coeffs*tline)\n _mcont = _model - _mline\n \n full_chi2 = ((flam - _model)**2/eflam**2)[mask].sum()\n cont_chi2 = ((flam - _mcont)**2/eflam**2)[mask].sum()\n \n if return_fit_results:\n return templates, coeffs, flam, eflam, _model, mask, full_chi2\n \n try:\n oktemp = okt & (coeffs != 0)\n \n AxT = (_A[oktemp,:]/eflam)[:,mask].T\n \n covar_i = utils.safe_invert(np.dot(AxT.T, AxT))\n covar = utils.fill_masked_covar(covar_i, oktemp)\n covard = np.sqrt(covar.diagonal())\n \n has_covar = True\n except:\n has_covar = False\n covard = coeffs*0.\n N = len(templates)\n covar = np.eye(N, N)\n \n print(f'\\n# line flux err\\n# flux x 10^-20 erg/s/cm2')\n if label is not None:\n print(f'# {label}')\n \n print(f'# z = {z:.5f}\\n# {time.ctime()}')\n \n cdict = {}\n eqwidth = {}\n \n for i, t in enumerate(templates):\n cdict[t] = [float(coeffs[i]), float(covard[i])]\n if t.startswith('line '):\n lk = t.split()[-1]\n \n # Equivalent width:\n # coeffs, line fluxes are in units of 1e-20 erg/s/cm2\n # _mcont, continuum model is in units of 1-e20 erg/s/cm2/A\n # so observed-frame equivalent width is roughly\n # eqwi = coeffs[i] / _mcont[ wave_obs[i] ]\n \n if lk in lw:\n lwi = lw[lk][0]*(1+z)/1.e4\n continuum_i = np.interp(lwi, spec['wave'], _mcont)\n eqwi = coeffs[i]/continuum_i\n else:\n eqwi = np.nan\n \n eqwidth[t] = eqwi\n \n print(f'{t:>20} {coeffs[i]:8.1f} ± {covard[i]:8.1f} (EW={eqwi:9.1f})')\n \n \n if 'srcra' not in spec.meta:\n spec.meta['srcra'] = 0.0\n spec.meta['srcdec'] = 0.0\n spec.meta['srcname'] = 'unknown'\n \n spec['model'] = _model/spec['to_flam']\n spec['mline'] = _mline/spec['to_flam']\n \n data = {'z': float(z),\n 'file':file,\n 'label':label,\n 'ra': float(spec.meta['srcra']),\n 'dec': float(spec.meta['srcdec']),\n 'name': str(spec.meta['srcname']),\n 'wmin':float(spec['wave'][mask].min()),\n 'wmax':float(spec['wave'][mask].max()),\n 'coeffs':cdict,\n 'covar':covar.tolist(),\n 'wave': [float(m) for m in spec['wave']],\n 'flux': [float(m) for m in spec['flux']],\n 'err': [float(m) for m in spec['err']],\n 'escale': [float(m) for m in spec['escale']],\n 'model': [float(m) for m in _model/spec['to_flam']],\n 'mline':[float(m) for m in _mline/spec['to_flam']],\n 'templates':templates, \n 'dof': int(mask.sum()), \n 'fullchi2': float(full_chi2), \n 'contchi2': float(cont_chi2),\n 'eqwidth': eqwidth,\n }\n \n for k in ['z','wmin','wmax','dof','fullchi2','contchi2']:\n spec.meta[k] = data[k]\n \n #fig, axes = plt.subplots(len(ranges)+1,1,figsize=figsize)\n if len(ranges) > 0:\n fig = plt.figure(figsize=figsize, constrained_layout=True)\n gs = GridSpec(2, len(ranges), figure=fig)\n axes = []\n for i, _ra in enumerate(ranges):\n axes.append(fig.add_subplot(gs[0,i]))\n \n axes.append(fig.add_subplot(gs[1,:]))\n \n else:\n fig, ax = plt.subplots(1,1,figsize=figsize)\n axes = [ax]\n \n _Acont = (_A.T*coeffs)[mask,:][:,:nspline]\n _Acont[_Acont < 0.001*_Acont.max()] = np.nan\n \n if (draws is not None) & has_covar:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mu = np.random.multivariate_normal(coeffs[oktemp], covar_i, size=draws)\n \n #print('draws', draws, mu.shape, _A.shape)\n mdraws = _A[oktemp,:].T.dot(mu.T)\n else:\n mdraws = None\n \n if plot_unit is not None:\n unit_conv = (1*spec.meta['flamunit']).to(plot_unit,\n equivalencies=spec.equiv).value\n else:\n unit_conv = np.ones(len(wobs))\n \n for ax in axes:\n if 1:\n ax.errorbar(wobs, flam*unit_conv, eflam*unit_conv,\n marker='None', linestyle='None',\n alpha=0.5, color='k', ecolor='k', zorder=100)\n\n ax.step(wobs, flam*unit_conv, color='k', where='mid', lw=1, alpha=0.8)\n # ax.set_xlim(3500, 5100)\n\n #ax.plot(_[1]['templz']/(1+z), _[1]['templf'])\n \n ax.step(wobs[mask], (_mcont*unit_conv)[mask],\n color='pink', alpha=0.8, where='mid')\n ax.step(wobs[mask], (_model*unit_conv)[mask],\n color='r', alpha=0.8, where='mid')\n \n cc = utils.MPL_COLORS\n for w, c in zip([3727, 4980, 6565, 9070, 9530, 1.094e4, 1.282e4, \n 1.875e4], \n [cc['purple'], cc['b'], cc['g'], 'darkred', 'darkred', \n cc['pink'], cc['pink'], cc['pink']]):\n wz = w*(1+z)/1.e4\n dw = 70*(1+z)/1.e4\n ax.fill_between([wz-dw, wz+dw], [0,0], [100,100], \n color=c, alpha=0.07, zorder=-100)\n \n \n if mdraws is not None:\n ax.step(wobs[mask], (mdraws.T*unit_conv).T[mask,:],\n color='r', alpha=np.maximum(1./draws, 0.02), zorder=-100, where='mid')\n\n if show_cont:\n ax.plot(wobs[mask], (_Acont.T*unit_conv[mask]).T,\n color='olive', alpha=0.3)\n \n ax.fill_between(ax.get_xlim(), [-100, -100], [0, 0], color='0.8', \n alpha=0.5, zorder=-1)\n\n ax.fill_betweenx([0, 100], [0,0], [1215.67*(1+z)/1.e4]*2, \n color=utils.MPL_COLORS['orange'], alpha=0.2,\n zorder=-1)\n \n ax.grid()\n\n # axes[0].set_xlim(1000, 2500)\n # ym = 0.15; axes[0].set_ylim(-0.1*ym, ym)\n \n for i, r in enumerate(ranges):\n axes[i].set_xlim(*[ri*(1+z)/1.e4 for ri in r])\n # print('xxx', r)\n \n if spec.filter == 'clear':\n axes[-1].set_xlim(0.6, 5.29)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.1))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.5))\n elif spec.filter == 'f070lp':\n axes[-1].set_xlim(0.69, 1.31)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n elif spec.filter == 'f100lp':\n axes[-1].set_xlim(0.99, 1.91)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.1))\n elif spec.filter == 'f170lp':\n axes[-1].set_xlim(1.69, 3.21)\n elif spec.filter == 'f290lp':\n axes[-1].set_xlim(2.89, 5.31)\n else:\n axes[-1].set_xlim(wrest[mask].min(), wrest[mask].max())\n \n axes[-1].set_xlabel(f'obs wavelenth, z = {z:.5f}')\n \n #axes[0].set_title(os.path.basename(file))\n \n for ax in axes:\n xl = ax.get_xlim()\n ok = wobs > xl[0]\n ok &= wobs < xl[1]\n ok &= np.abs(wrest-5008) > 100\n ok &= np.abs(wrest-6564) > 100\n ok &= mask\n if ok.sum() == 0:\n ax.set_visible(False)\n continue\n \n ymax = np.maximum((_model*unit_conv)[ok].max(), 10*np.median((eflam*unit_conv)[ok]))\n \n ymin = np.minimum(-0.1*ymax, -3*np.median((eflam*unit_conv)[ok]))\n ax.set_ylim(ymin, ymax*1.3)\n # print(xl, ymax)\n \n if ok.sum() > 0:\n if (np.nanmax((flam/eflam)[ok]) > 20) & (full_log):\n ax.set_ylim(0.005*ymax, ymax*5)\n ax.semilogy()\n \n if len(axes) > 0:\n gs.tight_layout(fig, pad=0.8)\n else:\n fig.tight_layout(pad=0.8)\n \n if label is not None:\n fig.text(0.015*12./12, 0.005, f'{label}',\n ha='left', va='bottom',\n transform=fig.transFigure, fontsize=8)\n \n fig.text(1-0.015*12./12, 0.005, time.ctime(),\n ha='right', va='bottom',\n transform=fig.transFigure, fontsize=6)\n \n \n return fig, spec, data", "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def change_resolution(self, L, M, N, NFP=None, sym=None):\n self._NFP = NFP if NFP is not None else self.NFP\n if L != self.L or M != self.M or N != self.N or sym != self.sym:\n self.L = L\n self.M = M\n self.N = N\n self._sym = sym if sym is not None else self.sym\n self._modes = self._get_modes(\n self.L, self.M, self.N, spectral_indexing=self.spectral_indexing\n )\n self._set_up()", "def resolution(self):\n return Prism.resolution(self,self.beam,self.wavelength)", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def update_spectrum(self, spectrum):\n self.spectrum = spectrum\n return", "def change_resolution(self, L, M, sym=None):\n if L != self.L or M != self.M or sym != self.sym:\n self.L = L\n self.M = M\n self._sym = sym if sym is not None else self.sym\n self._modes = self._get_modes(\n self.L, self.M, spectral_indexing=self.spectral_indexing\n )\n self._set_up()", "def changeRes(width, height):\n\n\tlive_capture.set(3, width)\n\tlive_capture.set(4, height)", "def renormalize_by_spectrum(self, spectrum, dispersion='match', trim_mode='wav', inplace=False):\n\n spec = self.copy()\n spec2 = spectrum.copy()\n\n\n\n if dispersion == \"match\":\n spec.match_dispersions(spec2, match_secondary=False,\n force=True, interp_kind='linear')\n elif isinstance(dispersion,(list,)):\n spec.trim_dispersion(dispersion, mode=trim_mode,inplace=True)\n spec2.trim_dispersion(dispersion, mode=trim_mode,inplace=True)\n else:\n print(\"Spectra will be normalized but dispersion ranges do not necessarily match!\")\n\n\n\n average_self_flux = np.trapz(spec.flux, spec.dispersion)\n\n average_spec_flux = np.trapz(spec2.flux, spec2.dispersion)\n\n self.scale = (average_spec_flux/average_self_flux)\n\n if inplace:\n self.flux_scale_factor = average_spec_flux/average_self_flux\n self.flux = self.flux * (average_spec_flux/average_self_flux)\n else:\n spec = self.copy()\n spec.flux_scale_factor = average_spec_flux/average_self_flux\n flux = self.flux * (average_spec_flux/average_self_flux)\n spec.scale = self.scale\n spec.flux = flux\n\n return spec", "def guess_scaling(name, spectrum):\n spectra = '%s/disp/%s.1d.fits' % (name, zerocount(spectrum))\n skyname = '%s/sky.1d.fits' % name\n spectrafits = pyfits.open(spectra)\n skyfits = pyfits.open(skyname)\n scalings = []\n for line in LINES:\n spec_peak, spec_cont = get_peak_cont(spectrafits, line, 5)\n sky_peak, sky_cont = get_peak_cont(skyfits, line, 5)\n scale = ((spec_peak - spec_cont) / (sky_peak - sky_cont))\n scalings.append(scale)\n return avg(*scalings)", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def setResolution(self, resolution):\n assert(resolution > 1 and resolution <= 8192)\n self.resolution = resolution", "def set_wavelength(self, wavelength):\n assert 0 <= wavelength <= 5000\n # Note: When connected via the IC bus of the camera, it is not\n # possible to change the wavelength (or the grating) while the CCD\n # is acquiring. So this will fail with an exception, and that's\n # probably the best we can do (unless we want to synchronize with the\n # CCD and ask to temporarily stop the acquisition).\n\n # Currently the SDK sometimes fail with 20201: SHAMROCK_COMMUNICATION_ERROR\n # when changing wavelength by a few additional nm. It _seems_ that it\n # works anyway (but not sure).\n # It seems that retrying a couple of times just works\n\n retry = 0\n while True:\n # set in nm\n err = self._dll.ShamrockSetWavelength(self._device, c_float(wavelength))\n if err != 20202 and retry <= 5: # as long as no success and lower than 5 retries\n # just try again\n retry += 1\n print(\"Failed to set wavelength, will try again\")\n time.sleep(0.1)\n else:\n self._grating_center = wavelength\n self.status(\"Wavelength change\", err)\n break", "def wavelength_solution(file_name):\n file_data = read_file(file_name)\n header_data = file_data[0]\n image_data = file_data[1]\n\n range_begin = header_data['CRVAL3']\n pixel_begin = header_data['CRPIX3']\n step_size = header_data['CD3_3']\n steps = len(image_data)\n range_end = range_begin + steps * step_size\n return {'begin': range_begin, 'end': range_end, 'steps': steps}", "def set_resolution(self):\n file_name = os.path.basename(self.in_file)\n if '1KM' in file_name:\n self.resolution = 1000\n else:\n raise ValueError(\n 'Cant read this data, please check its resolution: {}'.format(self.in_file))", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def Schlafly16(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def set_resolution(self, res, video_file = \"output.avi\"):\n\t\tprint(\"Setting resolution to %s.\"%res)\n\t\thigh_res = (1088,720)\n\t\tmed_res = (640,480)\n\t\tlow_res = (320,240)\t\n\t\tif self.vs is not None:\t\n\t\t\tself.teardown()\n\t\t\tsleep(1)\n\n\t\tif res == \"high\": #high\n\t\t\tself.camera_matrix = np.array([[8.4360221747968785e+02, 0., 544.], [0., 8.4385823040303683e+02, 360.],[0., 0.,\n \t\t1.]])\n\t\t\tself.dist_coeff = np.array([1.7626446405747770e-01, -3.4120481004692560e-01,\n\t\t \t-2.1890672094602151e-03, -3.6706857342688248e-05,\n\t\t \t8.1488779271148601e-02])\n\t\t\tres = high_res\n\t\t\tself.cop.horizontal_fov = 76.884\n\t\t\tself.cop.vertical_fov = 51.714\n\n\t\telif res == \"med\": #med\n\t\t\tself.camera_matrix = np.array([[4.9855533317091482e+02, 0., 320.], [0., 4.9967286973785622e+02, 240.],[0., 0.,\n\t\t1.]])\n\t\t\tself.dist_coeff = np.array([1.9695524980263868e-01, -4.7266256496392656e-01,\n \t\t\t-2.8509501186610737e-03, -6.6742476969470338e-04,\n \t\t\t2.9734384543609033e-01])\n\t\t\tres = med_res\n\t\t\tself.cop.horizontal_fov = 85.521\n\t\t\tself.cop.vertical_fov = 69.626\n\n\t\telif res == \"low\": #low\n\t\t\tself.camera_matrix = np.array([[2.4848460687057266e+02, 0., 160.], [0., 2.4930955561049109e+02, 120.], [0., 0.,\n \t\t1.]])\n\t\t\tself.dist_coeff = np.array([2.1646548043084851e-01, -6.2149098910402545e-01,\n \t\t\t-1.9510859152085493e-03, -1.6281010642558004e-03,\n \t\t\t\t5.5614584686671453e-01])\n\t\t\tres = low_res\n\t\t\tself.cop.horizontal_fov = 83.237\n\t\t\tself.cop.vertical_fov = 68.536\n\t\t\n\t\tself.vs = VideoStream(usePiCamera=True, resolution=res).start()\n\t\tsleep(.5)\n\n\t\t#set copter properties\n\t\tself.cop.horizontal_resolution = res[0]\n\t\tself.cop.vertical_resolution = res[1]\n\t\t#full camera fov\n\t\tself.cop.horizontal_fov_rad = self.cop.horizontal_fov * math.pi / 180\n\t\tself.cop.vertical_fov_rad = self.cop.vertical_fov * math.pi / 180\n\n\t\tframe = self.vs.read()\n\t\tself.horizontal_resolution = frame.shape[1]\n\t\tself.vertical_resolution = frame.shape[0]\n\t\tself.c_x_image = self.horizontal_resolution / 2\n\t\tself.c_y_image = self.vertical_resolution / 2\n\t\tself.frame = frame\n\n\t\t#for color target stuff\n\t\tf_x = self.camera_matrix[0][0]\n\t\tf_y = self.camera_matrix[1][1]\n\t\tself.m = (f_x + f_y) / (2 * self.foc)\n\n\t\t#file writing\n\t\tfpsL = 60\n\t\tfpsM = 125\n\t\tfpsH = 40\n\t\t\n\t\tfourcc = cv2.VideoWriter_fourcc(*'MJPG')\n\t\tif res == high_res:\n\t\t\tself.out = cv2.VideoWriter(video_file, fourcc,fpsH,(self.horizontal_resolution,self.vertical_resolution),True)\n\t\telif res == med_res:\n\t\t\tself.out = cv2.VideoWriter(video_file, fourcc,fpsM,(self.horizontal_resolution,self.vertical_resolution),True)\n\t\telif res == low_res:\n\t\t\tself.out = cv2.VideoWriter(video_file, fourcc,fpsL,(self.horizontal_resolution,self.vertical_resolution),True)\n\n\t\tself.fps = FPS().start()", "def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res", "def airy_and_slicer(surface, wavelength, scale_mas, PSF_window, N_window):\n\n # Print message to know we are updating the cache\n print('Recalculating Airy Pattern for %.3f microns' % wavelength)\n\n # Plate scales [Px, Py] for each spaxel scale in mm / arcsec,\n # depending on the surface [IS: Image Slicer, DET: Detector]\n plate_scales = {'IS': {4.0: [125, 250], 60.0: [16.67, 16.67]},\n 'DET': {4.0: [3.75, 7.5], 60.0: [0.5, 0.5]}}\n plate_x = plate_scales[surface][scale_mas][0]\n plate_y = plate_scales[surface][scale_mas][1]\n\n # We know how many Microns the pixels of the Geometric PSF span [PSF_window / N_window]\n pix_sampling = PSF_window / N_window # micron at the detector plane\n # Using the plate scale we calculate how many m.a.s each of those pixels have to span\n pix_scale_x = pix_sampling / plate_x # milliarcsec / pixel\n pix_scale_y = pix_sampling / plate_y # milliarcsec / pixel\n\n # Calculate the relative size of the pupil aperture needed to ensure the PSF is\n # sampled with the given pix_scale at the focal plane\n ELT_DIAM = 39\n MILIARCSECS_IN_A_RAD = 206265000\n pix_rad_x = pix_scale_x / MILIARCSECS_IN_A_RAD # radians / pixel\n pix_rad_y = pix_scale_y / MILIARCSECS_IN_A_RAD\n RHO_APER_x = pix_rad_x * ELT_DIAM / (wavelength * 1e-6)\n RHO_APER_y = pix_rad_y * ELT_DIAM / (wavelength * 1e-6)\n RHO_OBSC_x = 0.30 * RHO_APER_x # ELT central obscuration\n RHO_OBSC_y = 0.30 * RHO_APER_y # ELT central obscuration\n\n # Sanity check\n PIX_RAD_x = RHO_APER_x * wavelength / ELT_DIAM * 1e-6\n PIX_RAD_y = RHO_APER_y * wavelength / ELT_DIAM * 1e-6\n PIX_MAS_x = PIX_RAD_x * MILIARCSECS_IN_A_RAD\n PIX_MAS_y = PIX_RAD_y * MILIARCSECS_IN_A_RAD\n\n # Define the ELT pupil mask. Note that we use a central obscuration too\n N = 2048\n x = np.linspace(-1, 1, N)\n xx, yy = np.meshgrid(x, x)\n\n # To get the anamorphic scaling we define the equation for an ellipse\n rho = np.sqrt((xx / RHO_APER_x) ** 2 + (yy / RHO_APER_y) ** 2)\n\n # (1) Propagate to the Image Slicer Focal plane\n elt_mask = (RHO_OBSC_x / RHO_APER_x < rho) & (rho < 1.0)\n pupil = elt_mask * np.exp(1j * elt_mask)\n image_electric = fftshift(fft2(pupil))\n\n if surface == 'IS':\n # print(\"IS\")\n # We are already at the Image Slicer, don't do anything else\n min_pix, max_pix = N // 2 - N_window // 2, N // 2 + N_window // 2\n final_psf = (np.abs(image_electric))**2\n final_psf /= np.max(final_psf)\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n elif surface == 'DET':\n # print(\"DET\")\n # (1.1) Add slicer effect by masking\n # We mask the PSF covering a band of size 1x SPAXEL, depending on the scale\n # If we have 4x4 mas, then we cover a band of 4 mas over the PSF\n x_min, x_max = -N/2 * PIX_MAS_x, N/2 * PIX_MAS_x\n y_min, y_max = -N/2 * PIX_MAS_y, N/2 * PIX_MAS_y\n x_slice = np.linspace(x_min, x_max, N, endpoint=True)\n y_slice = np.linspace(y_min, y_max, N, endpoint=True)\n x_grid, y_grid = np.meshgrid(x_slice, y_slice)\n slicer_mask = np.abs(y_grid) < scale_mas / 2\n\n # ## Show the PSF both in [mas] space where it should be circular and in [pixel] space where it should be anamorphic\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # # plt.colorbar(img1, ax=ax)\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'X [mas]')\n # ax.set_ylabel(r'Y [mas]')\n # ax.set_xlim([-10, 10])\n # ax.set_ylim([-10, 10])\n #\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[-N/2, N/2, -N/2, N/2], cmap='bwr')\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'Pixels [ ]')\n # ax.set_ylabel(r'Pixels [ ]')\n # ax.set_xlim([-100, 100])\n # ax.set_ylim([-100, 100])\n\n # plt.show()\n\n # (2) Propagate the masked electric field to Pupil Plane\n pup_grating = ifft2(fftshift(slicer_mask * image_electric))\n # (2.1) Add pupil mask, this time without the central obscuration\n aperture_mask = rho < 1.0\n\n # (3) Propagate back to Focal Plane\n final_focal = fftshift(fft2(aperture_mask * pup_grating))\n final_psf = (np.abs(final_focal))**2\n final_psf /= np.max(final_psf)\n\n # (4) Crop the PSF to fit to the necessary window to ease the convolutions\n min_pix, max_pix = N//2 - N_window//2, N//2 + N_window//2\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n # If we want to show the plots for Documentation\n\n # fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n # psf_airy = (np.abs(image_electric))**2\n # img1 = ax1.imshow(psf_airy, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax1.axhline(y=scale_mas/2, linestyle='--', color='black')\n # ax1.axhline(y=-scale_mas/2, linestyle='--', color='black')\n # ax1.set_xlabel(r'X [mas]')\n # ax1.set_ylabel(r'Y [mas]')\n # ax1.set_xlim([-15, 15])\n # ax1.set_ylim([-15, 15])\n # ax1.set_title(r'Airy Pattern | Slicer Mask %.1f mas' % scale_mas)\n #\n # img2 = ax2.imshow(aperture_mask * (np.abs(pup_grating)**2), extent=[-1, 1, -1, 1], cmap='bwr')\n # ax2.set_title(r'Pupil Plane | Aperture Mask')\n # ax2.set_xlim([-0.25, 0.25])\n # ax2.set_ylim([-0.25, 0.25])\n #\n # img3 = ax3.imshow(final_psf, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax3.set_xlabel(r'X [mas]')\n # ax3.set_ylabel(r'Y [mas]')\n # ax3.set_xlim([-15, 15])\n # ax3.set_ylim([-15, 15])\n # ax3.set_title(r'Diffraction Effects')\n # plt.show()\n\n return crop_psf", "def setColorResolution(width=1,height=1):\n dislin.setres(width,height)", "def retarder(self):\n self.spectrum = self.spectrum", "def dichroic(self):\n self.spectrum = self.spectrum", "def spectrl2_data():\n # reference spectra generated with solar_utils==0.3\n kwargs = {\n 'surface_tilt': 0,\n 'relative_airmass': 1.4899535986910446,\n 'apparent_zenith': 47.912086486816406,\n 'aoi': 47.91208648681641,\n 'ground_albedo': 0.2,\n 'surface_pressure': 101300,\n 'ozone': 0.344,\n 'precipitable_water': 1.42,\n 'aerosol_turbidity_500nm': 0.1,\n 'dayofyear': 75\n }\n df = pd.read_csv(SPECTRL2_TEST_DATA)\n # convert um to nm\n df['wavelength'] *= 1000\n df[['specdif', 'specdir', 'specetr', 'specglo']] /= 1000\n return kwargs, df", "def resample(self, new_dispersion, inplace=False, force=False):\n\n # Mapping of the SpecOneD object variables to the function\n # variables\n\n old_spec_wavs = self.dispersion\n spec_fluxes = self.flux\n if self.flux_err is not None:\n spec_errs = self.flux_err\n else:\n spec_errs = None\n\n new_spec_wavs = new_dispersion\n\n if force:\n indices = np.where((new_spec_wavs < old_spec_wavs.max()) &\n (new_spec_wavs > old_spec_wavs.min()))\n new_spec_wavs = new_spec_wavs[indices]\n\n # Arrays of left-hand sides and widths for the old and new bins\n spec_widths = np.zeros(old_spec_wavs.shape[0])\n spec_lhs = np.zeros(old_spec_wavs.shape[0])\n spec_lhs[0] = old_spec_wavs[0]\n spec_lhs[0] -= (old_spec_wavs[1] - old_spec_wavs[0]) / 2\n spec_widths[-1] = (old_spec_wavs[-1] - old_spec_wavs[-2])\n spec_lhs[1:] = (old_spec_wavs[1:] + old_spec_wavs[:-1]) / 2\n spec_widths[:-1] = spec_lhs[1:] - spec_lhs[:-1]\n\n filter_lhs = np.zeros(new_spec_wavs.shape[0] + 1)\n filter_widths = np.zeros(new_spec_wavs.shape[0])\n filter_lhs[0] = new_spec_wavs[0]\n filter_lhs[0] -= (new_spec_wavs[1] - new_spec_wavs[0]) / 2\n filter_widths[-1] = (new_spec_wavs[-1] - new_spec_wavs[-2])\n filter_lhs[-1] = new_spec_wavs[-1]\n filter_lhs[-1] += (new_spec_wavs[-1] - new_spec_wavs[-2]) / 2\n filter_lhs[1:-1] = (new_spec_wavs[1:] + new_spec_wavs[:-1]) / 2\n filter_widths[:-1] = filter_lhs[1:-1] - filter_lhs[:-2]\n\n if filter_lhs[0] < spec_lhs[0] or filter_lhs[-1] > spec_lhs[-1]:\n\n raise ValueError(\"spectres: The new wavelengths specified must fall\"\n \"within the range of the old wavelength values:\",\n filter_lhs[0], spec_lhs[0], filter_lhs[-1],\n spec_lhs[-1], \"\\n Consider setting force=True\")\n\n # Generate output arrays to be populated\n res_fluxes = np.zeros(spec_fluxes[..., 0].shape + new_spec_wavs.shape)\n\n if spec_errs is not None:\n if spec_errs.shape != spec_fluxes.shape:\n raise ValueError(\n \"If specified, spec_errs must be the same shape\"\n \"as spec_fluxes.\")\n else:\n res_fluxerrs = np.copy(res_fluxes)\n\n start = 0\n stop = 0\n\n # Calculate new flux and uncertainty values, loop over new bins\n for j in range(new_spec_wavs.shape[0]):\n\n # Find first old bin which is partially covered by the new bin\n while spec_lhs[start + 1] <= filter_lhs[j]:\n start += 1\n\n # Find last old bin which is partially covered by the new bin\n while spec_lhs[stop + 1] < filter_lhs[j + 1]:\n stop += 1\n\n # If new bin is fully within one old bin these are the same\n if stop == start:\n\n res_fluxes[..., j] = spec_fluxes[..., start]\n if spec_errs is not None:\n res_fluxerrs[..., j] = spec_errs[..., start]\n\n # Otherwise multiply the first and last old bin widths by P_ij\n else:\n\n start_factor = ((spec_lhs[start + 1] - filter_lhs[j])\n / (spec_lhs[start + 1] - spec_lhs[start]))\n\n end_factor = ((filter_lhs[j + 1] - spec_lhs[stop])\n / (spec_lhs[stop + 1] - spec_lhs[stop]))\n\n spec_widths[start] *= start_factor\n spec_widths[stop] *= end_factor\n\n # Populate res_fluxes spectrum and uncertainty arrays\n f_widths = spec_widths[start:stop + 1] * spec_fluxes[...,\n start:stop + 1]\n res_fluxes[..., j] = np.sum(f_widths, axis=-1)\n res_fluxes[..., j] /= np.sum(spec_widths[start:stop + 1])\n\n if spec_errs is not None:\n e_wid = spec_widths[start:stop + 1] * spec_errs[...,\n start:stop + 1]\n\n res_fluxerrs[..., j] = np.sqrt(np.sum(e_wid ** 2, axis=-1))\n res_fluxerrs[..., j] /= np.sum(spec_widths[start:stop + 1])\n\n # Put back the old bin widths to their initial values for\n # later use\n spec_widths[start] /= start_factor\n spec_widths[stop] /= end_factor\n\n if inplace:\n\n self.dispersion = new_dispersion\n self.flux = res_fluxes\n if spec_errs is not None:\n self.flux_err = res_fluxerrs\n\n self.reset_mask()\n\n else:\n\n spec = self.copy()\n\n spec.dispersion = new_dispersion\n spec.flux = res_fluxes\n if spec_errs is not None:\n spec.flux_err = res_fluxerrs\n\n spec.reset_mask()\n\n return spec", "def homogeneize_spectra_resolution(res=2.95, dataset=\"MUSE\"):\n for field in context.fields:\n print(field)\n input_dir = os.path.join(context.data_dir, dataset, \"combined\", field,\n \"spec1d_ellipv0\")\n if not os.path.exists(input_dir):\n continue\n output_dir = os.path.join(context.data_dir, dataset, \"combined\", field,\n \"spec1d_ellipv0_fwhm{}\".format(res))\n if not(os.path.exists(output_dir)):\n os.mkdir(output_dir)\n specs = sorted([_ for _ in os.listdir(input_dir) if _.endswith(\n \".fits\")])\n for i, filename in enumerate(specs):\n print(\"Convolving file {} ({} / {})\".format(filename, i+1,\n len(specs)))\n filepath = os.path.join(input_dir, filename)\n output = os.path.join(output_dir, filename)\n data = Table.read(filepath, format=\"fits\")\n wave = data[\"wave\"]\n flux = data[\"flux\"]\n fluxerr = data[\"fluxerr\"]\n muse_fwhm = get_muse_fwhm()\n obsres = muse_fwhm(wave)\n newflux, newfluxerr = broad2res(wave, flux, obsres,\n res, fluxerr=fluxerr)\n newtable = Table([wave, newflux, newfluxerr],\n names=[\"wave\", \"flux\", \"fluxerr\"])\n newtable.write(output, overwrite=True)", "def AdjustDispMag(self, n_subiter):\n if n_subiter == 1:\n self.disp_mag *= 1.0 / const.OPTSTEPADJUSTOR\n else:\n self.disp_mag *= const.OPTSTEPADJUSTOR", "def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios", "def resample(self, octave_bands):\n self.energy_absorption = {\n \"coeffs\": octave_bands(**self.energy_absorption),\n \"center_freqs\": octave_bands.centers,\n }\n self.scattering = {\n \"coeffs\": octave_bands(**self.scattering),\n \"center_freqs\": octave_bands.centers,\n }", "def set_render_resolution(self, width, height, pixel_aspect=1.0):\n raise NotImplementedError(\"set_render_resolution is not implemented\")", "def spaxel_scale(scale=4, wave=1.0):\n\n scale_rad = scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wave * 1e-6)\n print(rho)", "def convertToSpectroGram(self):", "def extract_specs(self):\n vDeflection_unit = \"lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit\".format(\n self.channel_numbers[\"vDeflection\"])\n self.units[\"vDeflection\"] = self.general[vDeflection_unit]\n\n height_unit = \"lcd-info.{}.conversion-set.conversion.nominal.scaling.unit.unit\".format(\n self.channel_numbers[\"height\"])\n self.units[\"height\"] = self.general[height_unit]", "def camera(self):\n self.spectrum = self.spectrum", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = 1 # Field of view fo spectrometer (radius of FOV)\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.fiber_diameter = 1e-3 # Diameter of optical fiber\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_ss_loc = 1 # Shutter speed location in filename\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n self.file_datestr_loc = 0\n self.plume_params_file = 'plume_params.txt'\n self.plume_speed_id = 'plume_speed='\n self.plume_dist_id = 'plume_distance='\n\n # File which flags that a scan is complete. The file will be empty, just its presence is required\n self.scan_complete = 'complete.txt'\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 1 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.6 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [300, 335] # Range of wavelengths used in checking integration time\n self.saturation_pixels = 2 # Number of pixels to check\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(0.1, 0.5, 0.05),\n np.arange(0.5, 1, 0.1),\n np.arange(1, 5, 0.5),\n np.arange(5, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def raw_resolution(resolution, splitter=False):\n width, height = resolution\n if splitter:\n fwidth = (width + 15) & ~15\n else:\n fwidth = (width + 31) & ~31\n fheight = (height + 15) & ~15\n return fwidth, fheight", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n # the first column should be the wavelength in nanometers, the second is the tilt power density/nm in\n # W/(m**2 nm) = J s^-1 m^-2 nm^-1 = C V m^-2 nm^-1\n spectras = {\"AM0Etr\": 1, \"AM1.5G\": 2, \"AM1.5D\": 3}\n self.spectrum = np.genfromtxt(path.join(path.dirname(__file__), './ASTMG173.csv'), delimiter=\",\",\n skip_header=2)[:, [0, spectras[spectra]]]\n self.start_w = start_w\n self.stop_w = stop_w\n # build custom spectrum if necessary\n if start_w != 280.0 or stop_w != 4000.0:\n self.spectrum = self.sub_spectrum(start_w, stop_w)\n\n # create the PowerSpectrum interpolator\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def calibration_wheel(self):\n self.spectrum = self.spectrum", "def analyseData800nm():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm') #0.31, 0.3\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #0.31, 0.3", "def change_resolution(self, L, M, N, NFP=None, sym=None):\n self._NFP = NFP if NFP is not None else self.NFP\n if L != self.L or M != self.M or N != self.N or sym != self.sym:\n self._L = L\n self._M = M\n self._N = N\n self._sym = sym if sym is not None else self.sym\n self._modes = self._get_modes(self.L, self.M, self.N)\n self._set_up()", "def spectral_model(self):\n spec_type = self.data['SpectrumType'].strip()\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux_Density']\n errs['amplitude'] = self.data['Unc_Flux_Density']\n pars['reference'] = self.data['Pivot_Energy']\n\n if spec_type == 'PowerLaw':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw(**pars)\n elif spec_type == 'PLExpCutoff':\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff']\n model = ExponentialCutoffPowerLaw3FGL(**pars)\n elif spec_type == 'LogParabola':\n pars['alpha'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['beta'] = self.data['beta'] * u.dimensionless_unscaled\n errs['alpha'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['beta'] = self.data['Unc_beta'] * u.dimensionless_unscaled\n model = LogParabola(**pars)\n elif spec_type == \"PLSuperExpCutoff\":\n # TODO: why convert to GeV here? Remove?\n pars['reference'] = pars['reference'].to('GeV')\n pars['index_1'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n pars['index_2'] = self.data['Exp_Index'] * u.dimensionless_unscaled\n pars['ecut'] = self.data['Cutoff'].to('GeV')\n errs['index_1'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n errs['index_2'] = self.data['Unc_Exp_Index'] * u.dimensionless_unscaled\n errs['ecut'] = self.data['Unc_Cutoff'].to('GeV')\n model = PLSuperExpCutoff3FGL(**pars)\n else:\n raise ValueError('Spectral model {} not available'.format(spec_type))\n\n model.parameters.set_parameter_errors(errs)\n return model", "def resolution(self, radius, wave = None):\n dev = Prism.minDeviation(self,wave)\n alpha = dev/2 + self.angle/2\n\n # Form path difference between top and bottom of the beam\n d = 4*radius*math.sin(self.angle/2)/math.cos(alpha)\n dmax = 2.0*self.height*math.tan(self.angle/2) # Length of bottom of prism\n if d > dmax:\n d = dmax\n print(\"Resolution limited by size of prism\")\n\n\n dn = self.n.getDerivative(wave) # dn/d lambda\n return 1000*d*dn # scale to microms", "def hz2mel(hz):\n return 2595 * pylab.log10(1+hz/700.0)", "def get_spectra_pixell(alm1, alm2=None, spectra=None):\n \n if spectra is None:\n if alm2 is None:\n cls = curvedsky.alm2cl(alm1)\n else:\n cls = curvedsky.alm2cl(alm1, alm2)\n l = np.arange(len(cls))\n return l, cls\n \n \n cls = curvedsky.alm2cl(alm1[:,None], alm2[None,:])\n l = np.arange(len(cls[0,0]))\n cl_dict = {}\n for i, l1 in enumerate([\"T\",\"E\",\"B\"]):\n for j, l2 in enumerate([\"T\",\"E\",\"B\"]):\n cl_dict[l1+l2] = cls[i,j]\n \n return(l, cl_dict)", "def resolution_init():\n\n defaultResolution = pm.PyNode(\"defaultResolution\")\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n # Adding/Checking ftrack resolution attribute\n resolution_set = False\n if hasattr(defaultResolution, \"ftrackResolutionSet\"):\n attr = pm.Attribute(\"defaultResolution.ftrackResolutionSet\")\n resolution_set = attr.get()\n else:\n pm.addAttr(\n defaultResolution,\n longName=\"ftrackResolutionSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not resolution_set:\n width = task.getParent().get(\"width\")\n defaultResolution.width.set(width)\n pm.warning(\"Changed resolution width to: {0}\".format(width))\n height = task.getParent().get(\"height\")\n defaultResolution.height.set(height)\n pm.warning(\"Changed resolution height to: {0}\".format(height))\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.width.set(width)\n pm.warning(\"Changed vray resolution width to: {0}\".format(width))\n vray_settings.height.set(height)\n pm.warning(\"Changed vray resolution height to: {0}\".format(height))", "def registration_resolution_changed(self):\n self._write_image('res' + str(self.resolution))\n self.resolution = self.resolution + 1", "def test1():\n\t\n\td = np.arange(-10000,10000,10) # MHz\n\t#Voigt\n\tp_dict = {'Bfield':300,'rb85frac':1,'Btheta':0,'lcell':75e-3,'T':58,'Dline':'D2','Elem':'Cs'}\n\t\n\t#timing:\n\tst = time.clock()\n\tTF = get_spectra(d,[1,0,0],p_dict,outputs=['Iy'])\n\tet = time.clock() - st\n\tprint(('E-field - Elapsed time (s):', et))\n\n\t'''\n\t#check vs old elecsus\n\tfrom elecsus_v2.libs import spectra as old_spec\n\tst = time.clock()\n\tTF_old = old_spec.get_spectra(d,p_dict,outputs=['Iy'])\n\tet = time.clock() - st\n\tprint 'Old elecsus - Elapsed time (s):', et\n\t'''\n\t\n\tfig = plt.figure(\"Faraday comparison\")\n\tax1 = fig.add_subplot(111)\n\tax1.plot(d,TF[0],'r',lw=2,label='Faraday')\n\t#ax1.plot(d,TF_old[0],'k--',lw=2,label='Vanilla ElecSus')\n\t\n\t#ax1.legend(loc=0)\n\t\n\tax1.set_xlabel('Detuning (MHz)')\n\tax1.set_ylabel('Transmission')\n\t\n\tplt.show()", "def __init__(self, wavelength):\r\n self.dividerString_ = \"\\nPixel\"\r\n \"\"\"\r\n Their relative wavenumber ranges are not.\r\n \"\"\"\r\n if wavelength == 785:\r\n self.START = 182\r\n self.END = 1986\r\n if wavelength == 1064:\r\n self.START = 58\r\n self.END = 486 \r\n \"\"\"\r\n Indices of ramanshift, dark and raw data are invariant. The indices\r\n refer to the column position of the data vector\r\n \"\"\"\r\n self.RAMANSHIFT = 3\r\n self.DARK = 4\r\n self.RAWDATA = 6\r\n \"\"\"\r\n Locations and files start as empty strings \r\n \"\"\"\r\n self.location = \"\"\r\n self.files = \"\"", "def autoscales(N, dt, dj, wf, w0):\n \n if wf == 'morlet':\n s0 = (dt * (w0 + np.sqrt(2 + w0**2))) / (PI2)\n else:\n raise ValueError('wavelet function not available')\n\n J = np.floor(dj**-1 * np.log2((N * dt) / s0))\n s = np.empty(int(J + 1))\n\n for i in range(s.shape[0]):\n s[i] = s0 * 2**(i * dj)\n\n return s", "def updateFreqAxis(self, ax, n_ticks=5, delay=False):\n rf = self.uv.h_common['REF_FREQ'] /1e6\n chw = self.uv.d_frequency['CH_WIDTH'] / 1e6\n bw = self.uv.d_frequency['TOTAL_BANDWIDTH'] / 1e6\n rp = self.uv.h_common['REF_PIXL']\n nchan = self.uv.h_common['NO_CHAN']\n\n\n #print rf, chw, bw\n\n ticks = ax.get_xticks()\n #print ticks\n tmin, tmax = np.min(ticks), np.max(ticks)\n if tmin < 0: tmin = 0\n #print tmin, tmax\n tlocs = map(int, np.linspace(tmin, tmax, n_ticks))\n\n if rp == 1:\n tlabs = np.linspace(rf, rf+bw, n_ticks)\n else:\n rf_low = rf - chw * rp\n tlabs = np.linspace(rf_low, rf_low+bw, n_ticks)\n #print tlocs\n #print tlabs\n ax.set_xticks(tlocs)\n if not delay:\n ax.set_xticklabels([\"%2.2f\"%tt for tt in tlabs])\n else:\n tlabs = np.linspace(-1.0/chw/1e3 * nchan/2, 1.0/chw/1e3 * nchan/2, n_ticks)\n ax.set_xticklabels([\"%2.2f\"%tt for tt in tlabs])", "def update_dimensions(self, temp):\n # get number of bands\n if hasattr(temp,'t') and hasattr(temp.t, 'size') and temp.t.size > 0:\n self.dimensions[2]=temp.t.size\n self.t=temp.t.copy()\n else:\n self.dimensions[2] = 1\n # calculate y dimensions with new extents\n self.dimensions[0] = np.int64((self.extent[3] - self.extent[2])/self.spacing[1]) + 1\n # calculate x dimensions with new extents\n self.dimensions[1] = np.int64((self.extent[1] - self.extent[0])/self.spacing[0]) + 1\n # calculate x and y arrays\n self.x = self.extent[0] + self.spacing[0]*np.arange(self.dimensions[1])\n self.y = self.extent[2] + self.spacing[1]*np.arange(self.dimensions[0])\n return self", "def _configure_frequencies(self) -> None:\n i = 3\n while i < len(self._lora_frequencies):\n self.set_ch_parameters(i, self._lora_frequencies[i], 0, 5, True)\n i += 1\n self.set_ch_parameters(i, 868800000, 7, 7, True)", "def __init__(self, resolution, normalize=True, eps=1e-6):\n super().__init__()\n self.r = int(resolution)\n self.normalize = normalize\n self.eps = eps", "def reference_wl(infilename, outfilename, regfilename, frameid, calib_lst):\n data, head = fits.getdata(infilename, header=True)\n\n npoints = data['points'].max()\n\n newdescr = [descr for descr in data.dtype.descr]\n # add new columns\n newdescr.append(('order',np.int16))\n newdescr.append(('wavelength','>f8',(npoints,)))\n\n newspec = []\n\n # prepare for self reference. means one channel is ThAr\n file_identlist = []\n\n # find unique channels in the input spectra\n channel_lst = np.unique(data['channel'])\n\n # open region file and write headers\n regfile = open(regfilename, 'w')\n regfile.write('# Region file format: DS9 version 4.1'+os.linesep)\n regfile.write('global dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" ')\n regfile.write('select=1 highlite=1 dash=0 fixed=1 edit=0 move=0 delete=0 include=1 source=1'+os.linesep)\n\n # find aperture locations\n aperture_coeffs = get_aperture_coeffs_in_header(head)\n\n # loop all channels\n for channel in sorted(channel_lst):\n\n # filter the spectra in current channel\n mask = (data['channel'] == channel)\n if mask.sum() == 0:\n continue\n spec = data[mask]\n\n # check if the current frameid & channel are in calib_lst\n if frameid in calib_lst and channel in calib_lst[frameid]:\n self_reference = True\n calib = calib_lst[frameid][channel]\n else:\n self_reference = False\n # find the closet ThAr\n refcalib_lst = []\n if frameid <= min(calib_lst):\n calib = calib_lst[min(calib_lst)][channel]\n refcalib_lst.append(calib)\n elif frameid >= max(calib_lst):\n calib = calib_lst[max(calib_lst)][channel]\n refcalib_lst.append(calib)\n else:\n for direction in [-1, +1]:\n _frameid = frameid\n while(True):\n _frameid += direction\n if _frameid in calib_lst and channel in calib_lst[_frameid]:\n calib = calib_lst[_frameid][channel]\n refcalib_lst.append(calib)\n #print(item.frameid, 'append',channel, frameid)\n break\n elif _frameid <= min(calib_lst) or _frameid >= max(calib_lst):\n break\n else:\n continue\n\n # get variable shortcuts.\n # in principle, these parameters in refcalib_lst should have the same\n # values. so just use the last calib solution\n k = calib['k']\n offset = calib['offset']\n xorder = calib['xorder']\n yorder = calib['yorder']\n\n if self_reference:\n coeff = calib['coeff']\n else:\n # calculate the average coefficients\n coeff_lst = np.array([_calib['coeff'] for _calib in refcalib_lst])\n coeff = coeff_lst.mean(axis=0, dtype=np.float64)\n\n # write important parameters into the FITS header\n leading_str = 'HIERARCH GAMSE WLCALIB CHANNEL %s'%channel\n head[leading_str+' K'] = k\n head[leading_str+' OFFSET'] = offset\n head[leading_str+' XORDER'] = xorder\n head[leading_str+' YORDER'] = yorder\n\n # write the coefficients\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n head[leading_str+' COEFF %d %d'%(j, i)] = coeff[j,i]\n\n # if the input spectra is a wavelength standard frame (e.g. ThAr), write\n # calibration solutions into FITS header\n if self_reference:\n head[leading_str+' MAXITER'] = calib['maxiter']\n head[leading_str+' STDDEV'] = calib['std']\n head[leading_str+' WINDOWSIZE'] = calib['window_size']\n head[leading_str+' NTOT'] = calib['ntot']\n head[leading_str+' NUSE'] = calib['nuse']\n head[leading_str+' NPIXEL'] = calib['npixel']\n\n # pack the identfied line list\n for aperture, list1 in calib['identlist'].items():\n for row in list1:\n file_identlist.append(row)\n\n for row in spec:\n aperture = row['aperture']\n npixel = len(row['wavelength'])\n order = aperture*k + offset\n wl = get_wavelength(coeff, npixel, np.arange(npixel), np.repeat(order, npixel))\n\n # add wavelength into FITS table\n item = list(row)\n item.append(order)\n item.append(wl)\n newspec.append(tuple(item))\n\n # write wavlength information into regfile\n if (channel, aperture) in aperture_coeffs:\n coeffs = aperture_coeffs[(channel, aperture)]\n position = poly.Chebyshev(coef=coeffs, domain=[0, npixel-1])\n color = {'A': 'red', 'B': 'green'}[channel]\n\n # write text in the left edge\n x = -6\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={A%d, O%d} color=%s'\n text = string%(x+1, y+1, aperture, order, color)\n regfile.write(text+os.linesep)\n print('-------'+text)\n\n # write text in the right edge\n x = npixel-1+6\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={A%d, O%d} color=%s'\n text = string%(x+1, y+1, aperture, order, color)\n regfile.write(text+os.linesep)\n\n # write text in the center\n x = npixel/2.\n y = position(x)\n string = '# text(%7.2f, %7.2f) text={Channel %s, Aperture %3d, Order %3d} color=%s'\n text = string%(x+1, y+1+5, channel, aperture, order, color)\n regfile.write(text+os.linesep)\n\n # draw lines\n x = np.linspace(0, npixel-1, 50)\n y = position(x)\n for (x1,x2), (y1, y2) in zip(pairwise(x), pairwise(y)):\n string = 'line(%7.2f,%7.2f,%7.2f,%7.2f) # color=%s'\n text = string%(x1+1, y1+1, x2+1, y2+1, color)\n regfile.write(text+os.linesep)\n\n # draw ticks at integer wavelengths\n pix = np.arange(npixel)\n if wl[0] > wl[-1]:\n wl = wl[::-1]\n pix = pix[::-1]\n f = intp.InterpolatedUnivariateSpline(wl, pix, k=3)\n w1 = wl.min()\n w2 = wl.max()\n for w in np.arange(int(math.ceil(w1)), int(math.floor(w2))+1):\n x = f(w)\n y = position(x)\n if w%10==0:\n ticklen = 3\n string = '# text(%7.2f, %7.2f) text={%4d} color=%s'\n text = string%(x+1+20, y+1+5, w, color)\n regfile.write(text+os.linesep)\n else:\n ticklen = 1\n string = 'line(%7.2f, %7.2f, %7.2f, %7.2f) # color=%s wl=%d'\n text = string%(x+1+20, y+1, x+1+20, y+1+ticklen, color, w)\n regfile.write(text+os.linesep)\n\n # draw identified lines in region file\n if self_reference and aperture in calib['identlist']:\n list1 = calib['identlist'][aperture]\n for row in list1:\n x = row['pixel']\n y = position(x)\n ps = ('x', 'circle')[row['mask']]\n string = 'point(%7.2f, %7.2f) # point=%s color=%s wl=%9.4f'\n text = string%(x+1, y+1, ps, color, row['wavelength'])\n regfile.write(text+os.linesep)\n\n newspec = np.array(newspec, dtype=newdescr)\n\n regfile.close()\n\n pri_hdu = fits.PrimaryHDU(header=head)\n tbl_hdu1 = fits.BinTableHDU(newspec)\n lst = [pri_hdu, tbl_hdu1]\n\n if len(file_identlist)>0:\n #file_identlist = np.array(file_identlist, dtype=identlinetype)\n file_identlist = np.array(file_identlist, dtype=list1.dtype)\n tbl_hdu2 = fits.BinTableHDU(file_identlist)\n lst.append(tbl_hdu2)\n hdu_lst = fits.HDUList(lst)\n\n if os.path.exists(outfilename):\n os.remove(outfilename)\n hdu_lst.writeto(outfilename)", "def plot_solution(self, identlist, aperture_lst, plot_ax1=False, **kwargs):\n coeff = kwargs.pop('coeff')\n k = kwargs.pop('k')\n offset = kwargs.pop('offset')\n npixel = kwargs.pop('npixel')\n std = kwargs.pop('std')\n nuse = kwargs.pop('nuse')\n ntot = kwargs.pop('ntot')\n xorder = kwargs.pop('xorder')\n yorder = kwargs.pop('yorder')\n clipping = kwargs.pop('clipping')\n maxiter = kwargs.pop('maxiter')\n\n label_size = 13 # fontsize for x, y labels\n tick_size = 12 # fontsize for x, y ticks\n\n #wave_scale = 'linear'\n wave_scale = 'reciprocal'\n\n #colors = 'rgbcmyk'\n\n self._ax2.cla()\n self._ax3.cla()\n\n if plot_ax1:\n self._ax1.cla()\n x = np.linspace(0, npixel-1, 100, dtype=np.float64)\n\n # find the maximum and minimum wavelength\n wl_min, wl_max = 1e9,0\n allwave_lst = {}\n for aperture in aperture_lst:\n order = k*aperture + offset\n wave = get_wavelength(coeff, npixel, x, np.repeat(order, x.size))\n allwave_lst[aperture] = wave\n wl_max = max(wl_max, wave.max())\n wl_min = min(wl_min, wave.min())\n # plot maximum and minimum wavelength, to determine the display\n # range of this axes, and the tick positions\n self._ax1.plot([0, 0],[wl_min, wl_max], color='none')\n yticks = self._ax1.get_yticks()\n self._ax1.cla()\n\n\n for aperture in aperture_lst:\n order = k*aperture + offset\n color = 'C{}'.format(order%10)\n\n # plot pixel vs. wavelength\n if plot_ax1:\n wave = allwave_lst[aperture]\n if wave_scale=='reciprocal':\n self._ax1.plot(x, 1/wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n else:\n self._ax1.plot(x, wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n\n # plot identified lines\n if aperture in identlist:\n list1 = identlist[aperture]\n pix_lst = list1['pixel']\n wav_lst = list1['wavelength']\n mask = list1['mask'].astype(bool)\n res_lst = list1['residual']\n\n if plot_ax1:\n if wave_scale=='reciprocal':\n self._ax1.scatter(pix_lst[mask], 1/wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], 1/wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n else:\n self._ax1.scatter(pix_lst[mask], wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n\n repeat_aper_lst = np.repeat(aperture, pix_lst.size)\n self._ax2.scatter(repeat_aper_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax2.scatter(repeat_aper_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n self._ax3.scatter(pix_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax3.scatter(pix_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n\n # refresh texts in the residual panels\n text = 'R.M.S. = {:.5f}, N = {}/{}'.format(std, nuse, ntot)\n self._ax3._residual_text.set_text(text)\n text = u'Xorder = {}, Yorder = {}, clipping = \\xb1{:g}, Niter = {}'.format(\n xorder, yorder, clipping, maxiter)\n self._ax2._fitpar_text.set_text(text)\n\n # adjust layout for ax1\n if plot_ax1:\n self._ax1.set_xlim(0, npixel-1)\n if wave_scale == 'reciprocal':\n _y11, _y22 = self._ax1.get_ylim()\n newtick_lst, newticklabel_lst = [], []\n for tick in yticks:\n if _y11 < 1/tick < _y22:\n newtick_lst.append(1/tick)\n newticklabel_lst.append(tick)\n self._ax1.set_yticks(newtick_lst)\n self._ax1.set_yticklabels(newticklabel_lst)\n self._ax1.set_ylim(_y22, _y11)\n self._ax1.set_xlabel('Pixel', fontsize=label_size)\n self._ax1.set_ylabel(u'\\u03bb (\\xc5)', fontsize=label_size)\n self._ax1.grid(True, ls=':', color='gray', alpha=1, lw=0.5)\n self._ax1.set_axisbelow(True)\n self._ax1._aperture_text.set_text('')\n for tick in self._ax1.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax1.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n # adjust axis layout for ax2 (residual on aperture space)\n self._ax2.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax2.axhline(y=i*std, color='k', ls=':', lw=0.5)\n x1, x2 = self._ax2.get_xlim()\n x1 = max(x1,aperture_lst.min())\n x2 = min(x2,aperture_lst.max())\n self._ax2.set_xlim(x1, x2)\n self._ax2.set_ylim(-6*std, 6*std)\n self._ax2.set_xlabel('Aperture', fontsize=label_size)\n self._ax2.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax2.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax2.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n ## adjust axis layout for ax3 (residual on pixel space)\n self._ax3.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax3.axhline(y=i*std, color='k', ls=':', lw=0.5)\n self._ax3.set_xlim(0, npixel-1)\n self._ax3.set_ylim(-6*std, 6*std)\n self._ax3.set_xlabel('Pixel', fontsize=label_size)\n self._ax3.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax3.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax3.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)", "def set_wavelength(self, wavelength):\n print('Setting Santec wavelength to %.4f nm' % wavelength)\n\n # We need to select which of the 4 lasers to select depending on\n # the desired wavelength\n\n if 1530.0 < wavelength < 1630.000001:\n self.santec1.write(\"SW 4\")\n self.santec4.write(\"WA %.4f\" % wavelength)\n if self.active_module != 4:\n self.active_module = 4\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1440.0 < wavelength < 1530.1:\n self.santec1.write(\"SW 3\")\n self.santec3.write(\"WA %.4f\" % wavelength)\n if self.active_module != 3:\n self.active_module = 3\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1355 < wavelength < 1440.1:\n self.santec1.write(\"SW 2\")\n self.santec2.write(\"WA %.4f\" % wavelength)\n if self.active_module != 2:\n self.active_module = 2\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1259.999999 < wavelength < 1355.1:\n self.santec1.write(\"SW 1\")\n self.santec1.write(\"WA %.4f\" % wavelength)\n if self.active_module != 1:\n self.active_module = 1\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n else:\n print(\"Wavelength out of range. No change will be made\")", "def scale(self):", "def reference_spec_wavelength(spec, calib_lst, weight_lst):\n combined_calib = combine_calib(calib_lst, weight_lst)\n\n k = combined_calib['k']\n offset = combined_calib['offset']\n xorder = combined_calib['xorder']\n yorder = combined_calib['yorder']\n npixel = combined_calib['npixel']\n coeff = combined_calib['coeff']\n\n # calculate the wavelength for each aperture\n for row in spec:\n aperture = row['aperture']\n npoints = len(row['wavelength'])\n order = aperture*k + offset\n wavelength = get_wavelength(coeff, npixel,\n np.arange(npoints), np.repeat(order, npoints))\n row['order'] = order\n row['wavelength'] = wavelength\n\n card_lst = []\n #prefix = 'HIERARCH GAMSE WLCALIB'\n #if fiber is not None:\n # prefix = prefix + ' FIBER {}'.format(fiber)\n card_lst.append(('K', k))\n card_lst.append(('OFFSET', offset))\n card_lst.append(('XORDER', xorder))\n card_lst.append(('YORDER', yorder))\n card_lst.append(('NPIXEL', npixel))\n\n # write the coefficients to fits header\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n key = 'COEFF {:d} {:d}'.format(j, i)\n value = coeff[j,i]\n card_lst.append((key, value))\n\n # write information for every reference\n for icalib, (calib, weight) in enumerate(zip(calib_lst, weight_lst)):\n prefix = 'REFERENCE {:d}'.format(icalib+1)\n card_lst.append((prefix+' FILEID', calib['fileid']))\n card_lst.append((prefix+' DATE-OBS', calib['date-obs']))\n card_lst.append((prefix+' EXPTIME', calib['exptime']))\n card_lst.append((prefix+' WEIGHT', weight))\n card_lst.append((prefix+' NTOT', calib['ntot']))\n card_lst.append((prefix+' NUSE', calib['nuse']))\n card_lst.append((prefix+' STDDEV', calib['std']))\n\n return spec, card_lst", "def set_samplerate(self, samplerate):\n\t\tnew_samplerate = _PM_UPDATE_RATE/min(max(1,samplerate),200)\n\t\tshift = min(math.ceil(math.log(new_samplerate,2)),16)\n\t\tself.output_decimation = 2**shift\n\t\tself.output_shift = shift\n\n\t\tprint \"Output decimation: %f, Shift: %f, Samplerate: %f\" % (self.output_decimation, shift, _PM_UPDATE_RATE/self.output_decimation)", "def switch_resolution(self, sync_frame):\n w, h = sync_frame.split(':')[1].split('x')\n if int(h) != self.height and int(w) != self.width:\n self.width = int(w)\n self.height = int(h)\n return True", "def adjust_octave(midi_hz, measured_hz):\n # find non-silent frames\n singing_region = np.where((measured_hz > 1.0) & (midi_hz > 1.0))[0]\n cent_differences = np.log2((midi_hz + 1e-10) / (measured_hz + 1e-10)) * 1200\n octaves = np.arange(-3, 4) * 1200\n octave_error = octaves[np.argmin(np.abs(octaves - np.median(cent_differences[singing_region])))]\n midi_hz = np.power(2, (cent_differences - octave_error) / 1200) * measured_hz\n return midi_hz", "def Fitzpactrick09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def plotSpect(spec, sr):\r\n fig, ax = plt.subplots()\r\n img = librosa.display.specshow(spec, x_axis='time', y_axis='mel', sr=sr, fmax=8000, ax=ax) \r\n fig.colorbar(img, ax=ax, format='%+2.0f dB') \r\n ax.set(title='Mel-frequency spectrogram')", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def setMicrostepResolution(self, res, motor=0): \n\t\tcmd = 'SAP'\t # Get axis parameter\n\t\ttype = 140\t\t # Microstep resolution\n\t\tvalue = int(log2(res))\t\t # Microstep resolution \n\t\tself.sendCommand(cmd, type, motor, value)\n\t\tdata = self.receiveData()\n\t\tif data.status != 100:\n\t\t\tif self.errorDict.has_key(data.status):\n\t\t\t\traise MotorError(self.errorDict[data.status])\n\t\t\telif data.status == None:\n\t\t\t\traise MotorError('Incorrect controller response, trying to reconnect')\n\t\t\telse:\n\t\t\t\traise MotorError(''.join(('Unknown error, ', str(data.status))))", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def widen(self):\n t, h = self.time, self.half_duration\n h *= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def TwoDynSpectraPlot(Data_Ch_A, Data_Ch_B, VminA, VmaxA, VminB, VmaxB, Suptitle,\n CBarLabelA, CBarLabelB, no_of_spectra,\n TimeFigureScale, TimeScale, frequency,\n FreqPointsNum, colormap, TitleA, TitleB, fig_file_name,\n currentDate, currentTime, Software_version, customDPI):\n fig, axarr = plt.subplots(2, 1, figsize=(16.0, 9.0))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.08)\n im0 = axarr[0].imshow(np.flipud(Data_Ch_A), aspect='auto', vmin=VminA, vmax=VmaxA,\n extent=[0, no_of_spectra, frequency[0], frequency[FreqPointsNum-1]], cmap=colormap)\n rc('font', size=8, weight='bold')\n\n ticks_loc = axarr[0].get_yticks().tolist() # <---- Added to suppress warning\n axarr[0].yaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) # <---- Added to suppress warning\n\n axarr[0].set_ylabel('Frequency, MHz', fontweight='bold', fontsize=10)\n axarr[0].set_yticklabels(axarr[0].get_yticks(), fontsize=8, fontweight='bold')\n cbar = fig.colorbar(im0, ax=axarr[0], pad=0.005)\n cbar.ax.tick_params(labelsize=8)\n cbar.set_label(CBarLabelA, fontsize=9, fontweight='bold')\n text = axarr[0].get_xticks().tolist()\n for i in range(len(text)-1):\n k = int(text[i])\n text[i] = ' '\n\n ticks_loc = axarr[0].get_xticks().tolist() # <---- Added to suppress warning\n axarr[0].xaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) # <---- Added to suppress warning\n\n axarr[0].set_xticklabels(text)\n axis_copy = axarr[0].twiny()\n axis_copy.set_xlim(0, no_of_spectra)\n text = axis_copy.get_xticks().tolist()\n for i in range(len(text)-1):\n k = int(text[i])\n text[i] = TimeFigureScale[k]\n\n ticks_loc = axis_copy.get_xticks().tolist() # <---- Added to suppress warning\n axis_copy.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) # <---- Added to suppress warning\n\n axis_copy.set_xticklabels(text, fontsize=8, fontweight='bold')\n axarr[0].set_title(TitleA, fontsize=10, fontweight='bold', style='italic', y=1.05)\n im1 = axarr[1].imshow(np.flipud(Data_Ch_B), aspect='auto', vmin=VminB, vmax=VmaxB,\n extent=[0, no_of_spectra, frequency[0], frequency[-1]], cmap=colormap)\n # frequency[FreqPointsNum-1]\n\n ticks_loc = axarr[1].get_xticks().tolist() # <---- Added to suppress warning\n axarr[1].xaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) # <---- Added to suppress warning\n\n ticks_loc = axarr[1].get_yticks().tolist() # <---- Added to suppress warning\n axarr[1].yaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) # <---- Added to suppress warning\n\n axarr[1].set_xlabel('UTC Time, HH:MM:SS.msec', fontsize=10, fontweight='bold')\n axarr[1].set_ylabel('Frequency, MHz', fontsize=10, fontweight='bold')\n cbar = fig.colorbar(im1, ax=axarr[1], pad=0.005)\n cbar.set_label(CBarLabelB, fontsize=9, fontweight='bold')\n cbar.ax.tick_params(labelsize=8)\n text = axarr[1].get_xticks().tolist()\n for i in range(len(text)-1):\n k = int(text[i])\n text[i] = TimeScale[k]\n axarr[1].set_xticklabels(text, fontsize=8, fontweight='bold')\n axarr[1].set_yticklabels(axarr[1].get_yticks(), fontsize=8, fontweight='bold')\n axarr[1].set_title(TitleB, fontsize=10, fontweight='bold', style='italic', y=1.00)\n fig.suptitle(Suptitle, fontsize=10, fontweight='bold', x=0.46, y=1.01)\n fig.subplots_adjust(top=0.91)\n fig.text(0.72, 0.065, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=6, transform=plt.gcf().transFigure)\n fig.text(0.1, 0.065, 'Software version: ' + Software_version + ', yerin.serge@gmail.com, IRA NASU',\n fontsize=6, transform=plt.gcf().transFigure)\n pylab.savefig(fig_file_name, bbox_inches='tight', dpi=customDPI)\n plt.close('all')\n return 0", "def determine_exposure_time(self, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate various SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def _get_all_spectra(self):\n pass", "def rdspecfits(self, ext='SCI', verbose=False ):\n # TODO : read in flux uncertainty array when available\n\n hdulist = pyfits.open(self.filename)\n\n try :\n # reading a DEEP2/DEEP3 spectrum\n extroot='BXSPF'\n wb,fb,eb = hdulist['%s-B'%extroot].data[0][1], hdulist['%s-B'%extroot].data[0][0], hdulist['%s-B'%extroot].data[0][2]\n wr,fr,er = hdulist['%s-R'%extroot].data[0][1], hdulist['%s-R'%extroot].data[0][0], hdulist['%s-R'%extroot].data[0][2]\n return( np.append( wb, wr ), np.append( fb,fr ), np.append( eb,er) )\n except :\n pass\n\n # determine the wavelength range\n # covered by this spectrum\n if len(hdulist) == 1 : ext = 0\n refwave = hdulist[ext].header['CRVAL1']\n refpix = hdulist[ext].header['CRPIX1']\n if 'CD1_1' in hdulist[ext].header.keys() :\n dwave = hdulist[ext].header['CD1_1']\n elif 'CDELT1' in hdulist[ext].header.keys() :\n dwave = hdulist[ext].header['CDELT1']\n else :\n raise exceptions.RuntimeError(\n \"wavelength step keyword not found\")\n\n nwave = hdulist[ext].header['NAXIS1']\n nap = hdulist[ext].header['NAXIS']\n widx = np.arange( nwave )\n wave = (widx - (refpix-1))*dwave + refwave\n flux = []\n if nap>1:\n for i in range( nap ):\n flux.append( hdulist[ext].data[i] )\n else :\n flux = hdulist[ext].data\n self.wave = wave\n self.flux = flux\n\n # TODO : check for flux uncertainty array\n self.fluxerror = np.zeros(len(self.flux))\n\n return", "def reference_self_wavelength(spec, calib):\n\n # calculate the wavelength for each aperture\n for row in spec:\n aperture = row['aperture']\n npoints = len(row['wavelength'])\n order = aperture*calib['k'] + calib['offset']\n wavelength = get_wavelength(calib['coeff'], calib['npixel'],\n np.arange(npoints), np.repeat(order, npoints))\n row['order'] = order\n row['wavelength'] = wavelength\n\n card_lst = []\n card_lst.append(('K', calib['k']))\n card_lst.append(('OFFSET', calib['offset']))\n card_lst.append(('XORDER', calib['xorder']))\n card_lst.append(('YORDER', calib['yorder']))\n card_lst.append(('NPIXEL', calib['npixel']))\n\n # write the coefficients to fits header\n for j, i in itertools.product(range(calib['yorder']+1),\n range(calib['xorder']+1)):\n key = 'COEFF {:d} {:d}'.format(j, i)\n value = calib['coeff'][j,i]\n card_lst.append((key, value))\n\n # write other information to fits header\n card_lst.append(('WINDOW_SIZE', calib['window_size']))\n card_lst.append(('MAXITER', calib['maxiter']))\n card_lst.append(('CLIPPING', calib['clipping']))\n card_lst.append(('Q_THRESHOLD', calib['q_threshold']))\n card_lst.append(('NTOT', calib['ntot']))\n card_lst.append(('NUSE', calib['nuse']))\n card_lst.append(('STDDEV', calib['std']))\n card_lst.append(('DIRECTION' , calib['direction']))\n\n # pack the identfied line list\n identlist = []\n for aperture, list1 in calib['identlist'].items():\n for row in list1:\n identlist.append(row)\n identlist = np.array(identlist, dtype=list1.dtype)\n\n return spec, card_lst, identlist", "def quality(self):\n return self.plays * self.number", "def mel2hz(mel):\n\treturn 700 * (10 ** (mel / 2595.0) - 1)", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def setCoordinateResolution(*args):", "def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification", "def tuned(sample_rate, freqs, min_fres=None, power_of_2=False, use_padding=True, wndtype=Type.rectangle, dtype=np.float_):\n\n freqs = np.atleast_1d(freqs) \n high_f = np.max(freqs)\n assert (sample_rate / 2) >= high_f, \"Highest target frequency violates Nyquist sampling theorem.\"\n \n logger.info(\"Tuning window size for frequencies {}\".format(\", \".join([str(e) for e in freqs])))\n\n if min_fres is None: \n if len(freqs) > 1:\n # Compute minimal pairwise absolute frequency diffs. \n dists = [math.fabs(pair[0]-pair[1]) for pair in itertools.combinations(freqs, 2)]\n min_fres = np.min(dists) / 2 # Should give us 2 bins between target frequencies.\n else:\n min_fres = freqs[0] / 5 # For a single frequency we just use a fifth for spacing.\n logger.info(\"Minimum frequency resolution not specified. Set to {:.2f}Hz\".format(min_fres))\n else:\n logger.info(\"Minimum frequency resolution given {:.2f}Hz\".format(min_fres))\n\n # From f_res = 1 / T = fs / ws we can compute the required number of samples as \n nsamples = int(math.ceil(sample_rate / min_fres))\n nsamples += nsamples % 2 \n \n ntotal = nsamples\n if power_of_2:\n # Find next power of 2\n ntotal = 2**((nsamples-1).bit_length())\n\n npad = 0\n if use_padding:\n npad = (ntotal - nsamples)\n else:\n nsamples = ntotal\n\n logger.info(\"Window tuned. Length {} ({} data, {} padding). Capture time of {:.5f}s\".format(ntotal, nsamples, npad, nsamples / sample_rate)) \n return Window(nsamples, sample_rate, npads=npad, wndtype=Window.Type.rectangle, dtype=dtype)", "def band_width(self, band_width):\n self._band_width = band_width", "def calibration_spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 4*8 # Duration\n + 2*8 # Quiet time\n + 4*4 # Live time\n + 2*8 # Avg Temperature\n + 1 # Spare\n + 1 # Comp Schema accum S\n + 3 # Comp Schema accum K\n + 3 # Comp Schema accum M\n + 4*8 # Detector mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1*8 # Sub spectrum mask\n + 2 # Spare\n + 8*( # 8 x \n 2 # Spare\n + 10 # Number of spectral points\n + 10 # Number of summed channels in spectral point\n + 10 # Lowest channel in sub spectrum \n )\n + 2*8 # Number of structure in packet\n )\n\n variable = (\n num_samples * (\n 4 # Spare\n + 5 # Detector ID\n + 4 # Pixel ID\n + 3 # Sub spec ID\n + 16 # Number of compressed spectral points\n + num_energies*1*8 # Compressed spectral point\n\n )\n )\n\n return fixed_header, variable", "def resample_spectrum(combined_spectrum, camera, showplot=False):\n\n # Unpack the wavelengths and fluxes.\n wls, fls = zip(*combined_spectrum)\n\n # Generate the re-sampled x-axis, starting at the min. wavelength and ending\n # at the max. wavelength. The final bin size should be 0.05 Ang. for SWP\n # cameras or 0.10 Ang. for LWP and LWR cameras. We oversample by a factor\n # of 10 before binning down.\n oversample = 10.\n if camera in [\"LWP\", \"LWR\"]:\n wl_step = 0.1 / oversample\n else:\n wl_step = 0.05 / oversample\n\n # Identify gaps in the data, interpolate those gaps separately so you don't\n # interpolate over a gap. A gap is defined as anywhere with more than three\n # missing points (based on the mean wavelength difference across the\n # spectrum).\n wl_diffs = numpy.diff(wls)\n # These are the *end points* of a given subsection.\n wl_gaps = numpy.where(numpy.digitize(wl_diffs, [3.*numpy.mean(wl_diffs)]) !=\n 0)[0]\n # If there are no gaps at all, then define the gap to be the last element.\n if wl_gaps.size == 0:\n wl_gaps = numpy.asarray([len(wls)-1])\n\n # Build the binned spectrum for each subspectrum (skipping over gaps).\n prev_index = 0\n binned_wls = []\n binned_fls = []\n # Only build up the interpolated spectrum if it is to be plotted.\n if showplot:\n interpolated_wls = []\n interpolated_fls = []\n\n for gap_ind in wl_gaps:\n # Get interpolated spectrum for this subsection.\n new_wls, new_fls = interpolate_subspec(wls, fls, prev_index, gap_ind,\n wl_step)\n # Push the interpolated values into the list via extension, but only if\n # it is to be plotted.\n if showplot:\n interpolated_wls.extend(new_wls)\n interpolated_fls.extend(new_fls)\n # Now bin the spectrum down by a factor of 10 in resolution to our\n # desired wavelength spacing.\n # First need to pad to an integer of 10 by adding NaNs.\n if len(new_wls) % 10 != 0:\n n_pad = 10 - (len(new_wls) % 10)\n new_wls.extend([numpy.nan]*n_pad)\n new_fls.extend([numpy.nan]*n_pad)\n binned_sub_wl = numpy.nanmean(numpy.asarray(new_wls).reshape(-1, 10),\n axis=1)\n binned_sub_fl = numpy.nanmean(numpy.asarray(new_fls).reshape(-1, 10),\n axis=1)\n binned_wls.extend(binned_sub_wl)\n binned_fls.extend(binned_sub_fl)\n # Update where the next sub_spectrum starts.\n prev_index = gap_ind+1\n\n # If the last gap did not cover to the end of the spectrum, do one more\n # subsection.\n if prev_index < len(wls):\n # Get interpolated spectrum for the final subsection.\n new_wls, new_fls = interpolate_subspec(wls, fls, prev_index, len(wls),\n wl_step)\n # Push the interpolated values into the list via extension, but only if\n # it is to be plotted.\n if showplot:\n interpolated_wls.extend(new_wls)\n interpolated_fls.extend(new_fls)\n # Now bin the spectrum down by a factor of 10 in resolution to our\n # desired wavelength spacing.\n # First need to pad to an integer of 10 by adding NaNs.\n if len(new_wls) % 10 != 0:\n n_pad = 10 - (len(new_wls) % 10)\n new_wls.extend([numpy.nan]*n_pad)\n new_fls.extend([numpy.nan]*n_pad)\n binned_sub_wl = numpy.nanmean(numpy.asarray(new_wls).reshape(-1, 10),\n axis=1)\n binned_sub_fl = numpy.nanmean(numpy.asarray(new_fls).reshape(-1, 10),\n axis=1)\n binned_wls.extend(binned_sub_wl)\n binned_fls.extend(binned_sub_fl)\n\n # Show the plotted spectra if requested.\n if showplot:\n import matplotlib.pyplot as pyp\n pyp.plot(wls, fls, '-ko')\n # Uncomment the lines below to overplot the (oversampled) interpolated\n # spectrum.\n if showplot:\n pyp.plot(interpolated_wls, interpolated_fls, '-ro')\n pyp.plot(binned_wls, binned_fls, '-go')\n for gapmark_ind in wl_gaps:\n pyp.axvline(wls[gapmark_ind])\n pyp.suptitle(\"Red = Oversampled, Green = Resampled, Black = Original\")\n pyp.show()\n return zip(binned_wls, binned_fls)", "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )", "def combine_sky_spectra(name):\n sky_list = get_sky_spectra(name)\n sizes = get(name, 'sizes')\n scaled = []\n for spectra in sky_list:\n scale = sizes[spectra] # scale by the number of pixels arcoss\n num = zerocount(spectra)\n sarith('%s/disp/%s.1d' % (name, num), '/', scale,\n '%s/sky/%s.scaled' % (name, num))\n scaled.append('%s/sky/%s.scaled' % (name, num))\n if os.path.isfile('%s/sky.1d' % name):\n os.remove('%s/sky.1d' % name)\n scombine(list_convert(scaled), '%s/sky.1d' % name)", "def get_instr_rescale(self):\n if self.gamepad is None: \n return self._harp_rescale_\n else:\n return self._gamepad_rescale_", "def setPixelsPerInchShrinkToFit(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInchShrinkToFit\"] = value", "def recon_steer_bands(pyr, freq_resps, numlevels, numorientations):\n \n result_bands = np.zeros(pyr[0].shape)\n\n freq_hi = np.fft.fftshift(np.fft.fft2(pyr[0]))\n result_hi = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_hi, np.conjugate(freq_resps[0])))).real \n \n freq_lo = np.fft.fftshift(np.fft.fft2(pyr[2]))\n result_lo = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_lo, np.conjugate(freq_resps[2])))).real\n \n freq_resp_band = freq_resps[1]\n pyr_band = pyr[1] \n for i in range(numlevels):\n for j in range(numorientations): \n freq_band = np.fft.fftshift(np.fft.fft2(pyr_band[i][j]))\n result_band = np.fft.ifft2(np.fft.fftshift(np.multiply(freq_band, np.conjugate(freq_resp_band[i][j])))).real\n result_bands = result_bands + result_band \n result = result_bands + result_hi + result_lo\n return result" ]
[ "0.6454344", "0.62690383", "0.58017045", "0.57871443", "0.5685021", "0.56665695", "0.5640041", "0.5633661", "0.56125605", "0.5611095", "0.55840474", "0.55816716", "0.5537546", "0.55354196", "0.54867184", "0.54815644", "0.5469447", "0.54505324", "0.5441042", "0.5431353", "0.5423181", "0.5419752", "0.5391347", "0.5390135", "0.5375535", "0.53751856", "0.5367182", "0.53519404", "0.53168476", "0.5307172", "0.53065336", "0.52992976", "0.5294197", "0.5287858", "0.52770513", "0.525632", "0.5226972", "0.5213283", "0.520997", "0.5201932", "0.517657", "0.5172106", "0.51650983", "0.5149085", "0.5148283", "0.5145149", "0.51183516", "0.5116982", "0.51132786", "0.50971895", "0.50918144", "0.50824827", "0.5080159", "0.50783813", "0.5077831", "0.5076351", "0.5069228", "0.5056999", "0.5045498", "0.5043798", "0.50310916", "0.50293046", "0.50194323", "0.5017802", "0.50166047", "0.50128436", "0.5001192", "0.500116", "0.5000788", "0.49935958", "0.4993028", "0.49886504", "0.49870768", "0.4985571", "0.4984532", "0.4982784", "0.49789518", "0.4978585", "0.49779934", "0.49741104", "0.49537668", "0.49493998", "0.4947685", "0.4944401", "0.49435604", "0.49420908", "0.49399287", "0.49399287", "0.49399287", "0.49399287", "0.49370095", "0.49361002", "0.49314007", "0.49306324", "0.49282113", "0.49232194", "0.49221948", "0.49165916", "0.49164912", "0.4912946", "0.49116623" ]
0.0
-1
return a dictionary of the current index
def to_dict(self): d = {} d.update(**self._lick) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self):\n return dict(data='index')", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def index(self):\n return self._index", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def get_index(self):\n return self.index", "def get_index(self):\n return self.index", "def getIndex(self):\n return self.index", "def index(self):\n return self.data.index", "def index():\n\n return dict()", "def current_index(self):\n return self._current_index", "def index(self):\n return self._data.get('index')", "def index(self):\n return self.container['index']", "def index(self):\n return self.data.index.values", "def index(self):\n return (self._data_dict.get('tab_index', -1), self._data_dict.get('index_in_tab', -1))", "def idx(self):\n if self._idx is None:\n self._loads()\n return self._idx", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def get_current_index(self):\n assert(self.is_started())\n return self.currIndex", "def getIndex(self):\n\n return self._index", "def currentSubIndex(self):\n logger.debug(\"Func: currentSubIndex/getter\")\n return self._currentsDict[\"currentSubIndex\"]", "def idx(self):\n return self._idx", "def edit_index(state):\n node = state\n for key in (\"layers\", \"mode\"):\n node = node.get(key, {})\n return node.get(\"index\", 0)", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def get_index(self):\n return self.inverted_index", "def current_index(self) -> int:\n return self._current_index", "def index(self) -> int:", "def global_index(self):\n raise NotImplementedError", "def index(self):\n return copy.deepcopy(self._innercontainer)", "def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def index(self) -> int:\r\n return self._index", "def get_item_indexinfo(self, modelitem):\n indexinfo = {}\n\n if self._level_specific_columns:\n for key in self._indexinfo[ modelitem.level() ]:\n indexinfo[ key ] = modelitem.columnval( self._indexinfo[ modelitem.level() ][ key ] )\n else:\n for key in self._indexinfo:\n indexinfo[ key ] = modelitem.columnval( self._indexinfo[ key ] )\n\n return indexinfo", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def return_index(self, idx):\n return (\n self.timeseries[idx],\n self.ch_amount,\n self.freq[idx],\n self.ch_name[idx],\n self.units[idx],\n )", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def isect_index(self):\n return self._lazy_isect_index()", "def obj_index(self) -> str:\n return str(self._data[\"index\"])", "def index(self):\n if hasattr(self, '_m_index'):\n return self._m_index if hasattr(self, '_m_index') else None\n\n self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag)\n return self._m_index if hasattr(self, '_m_index') else None", "def indexes(self):\n return {'status': self._status_sort, 'rms': self._rms_sort}", "def _get_ea_index():\n ea_index_temp = {'Address': 5, 'Agency': 10, 'City': 4, 'Country': 3,\n 'Datacenter': 7, 'Division': 8, 'Interface Name': 13,\n 'Region_List': 2, 'Requester Email': 9, 'Site': 6,\n 'VLAN Description': 11, 'IPR Designation': 16}\n return ea_index_temp", "def get_index(self):\n with open(self.index_path, \"r\") as f:\n return json.load(f)", "def getSectionIndex(self) -> int:\n ...", "def get_index(self, _quals):\n return self._options['index']", "def index(self):\n return self._ll_tree.get_index()", "def index(self):\n return self.frame.index", "def info(self) -> Dict:\n info = super().info\n info[\"cur_pos\"] = self.cur_pos\n return info", "def __build_state_index(self):\n\n # the index for the system state\n # [rho_i, q_in, q_out, r_i, f_i]\n x_index = {}\n\n # add the density index\n x_index['density'] = OrderedDict()\n for i in range(0, self.num_cells):\n x_index['density'][i] = i\n dim_state = self.num_cells\n\n # add the upstream boundary flow\n x_index['qin'] = dim_state\n x_index['qout'] = dim_state + 1\n dim_state += 2\n\n # add on ramp variables\n # x_index['onramp'] = OrderedDict()\n # if self.cell_onramp is not None:\n # # if onramp exist in the network, otherwise skip\n # for cell_id in self.cell_onramp:\n # # add the absolute index into the state index dictionary\n # # r_i index = self.x_index{'onramp'][cell_i]\n # x_index['onramp'][cell_id] = dim_state\n # dim_state += 1\n #\n #\n # # add off ramp state variables\n # x_index['offramp'] = OrderedDict()\n # if self.cell_offramp is not None:\n # for cell_id in self.cell_offramp:\n # # add the absolute index\n # x_index['offramp'][cell_id] = dim_state\n # dim_state += 1\n\n return x_index, dim_state", "def index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"index\")", "def idx(self, store):\n if hasattr(store, 'index'):\n return store.index.get(self, None)\n else:\n return store.idx(self)", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def to_dict_index(df):\r\n return df.to_dict('index')", "def __getitem__(self, index):\n item_info = {\n \"ID\": self.ID[index], \n \"turn_id\": self.turn_id[index], \n \"turn_belief\": self.turn_belief[index], \n \"gating_label\": self.gating_label[index], \n \"context_plain\":self.dialog_history[index].split(), \n \"turn_uttr_plain\": self.turn_uttr[index], \n \"turn_domain\": self.turn_domain[index], \n \"generate_y\": [v.split() for v in self.generate_y[index]],\n \"slot_temp\": self.slot_temp\n }\n return item_info", "def return_index(self, idx):\n return (\n self.timeseries[:, idx],\n self.ch_amount,\n self.freq,\n self.ch_name[idx],\n self.units[idx],\n self.start_time,\n )", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)", "def to_dict(self):\n return dict(\n index=self.index,\n python_file=self.python_file,\n trial_date=self.trial_date,\n trial_time=self.trial_time,\n comment=self.comment,\n commit=self.commit,\n commit_message=self.commit_message,\n is_dirty=self.is_dirty,\n start_step=self.start_step\n )", "def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )", "def indexed_dataset(self) -> Dict[int, List]:\n if self.__indexed_dataset is None:\n dataset = self.dataset()\n truncated_dataset = dataset[:1000]\n self.__indexed_dataset = {\n i: dataset[i] for i in range(len(dataset))\n }\n return self.__indexed_dataset", "def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}", "def index(self):\n self.index_value(self.proxy_get())", "def index(self):\n return self.dataset.index", "def get_player_dict(self) -> dict:\n return self.df.to_dict('index')", "def index():\n data = te.getMarketsData(marketsField='index', output_type='df')\n return jsonify(data.to_dict(orient='records'))", "def to_json(self) -> Dict[str, Any]:\n\n return {\n **self.index.to_json(),\n \"timelock\": self.timelock,\n \"amount\": self.amount,\n \"spend_key\": self.spend_key.hex(),\n \"state\": self.state.value,\n }", "def current_index(self):\n job = self.client.query(\"SELECT MAX(ID) FROM {}.{};\".format(self.database_name, self.table_name))\n for row in job.result():\n if row[0] == None:\n return 1\n current_index = row[0] + 1\n return current_index", "def index_to_mapping(self) -> Dict[int, int]:\n if not self._reverse_atom_mappings:\n self._reverse_atom_mappings = {\n index: mapping for mapping, index in self.mapping_to_index.items()\n }\n return self._reverse_atom_mappings", "def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx", "def indexed(self):\n return self.properties.get('indexed', None)", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def stats_indexing(self, host):\n\n s = self.get_stats(host, 'indexing')\n\n data = {\n 'delete_time_in_millis': s['delete_time_in_millis'],\n 'delete_total': s['delete_total'],\n 'delete_current': s['delete_current'],\n 'index_time_in_millis': s['index_time_in_millis'],\n 'index_total': s['index_total'],\n 'index_current': s['index_current']\n }\n\n return data", "def inspectedIndex(self):\n if self.inspectedNodeIsVisible:\n return self.createIndex(0, 0, self._inspected_item)\n else:\n return self.rootIndex()", "def get_indices(self):\r\n return self._indices", "def index(self, item):\n return self.__values.index(item)", "def generate_dict(self):\n dict = defaultdict(list)\n for i in range(self.no_of_docs-1):\n doc_txt = self.doc_to_df(i)\n #assign key to index in dictionary and its locations as tuples(docid,line,wordpos) as the values\n for j in range(len(doc_txt)):\n for k in range(doc_txt.shape[1]):\n key = doc_txt[k][j]\n dict[key].append((i,j,k))", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n # fetching a slice returns an OrderedDict\n return self._main[index].items()\n key = self._main._sequence[index]\n return (key, self._main[key])", "def step_index(df):\n steps = {}\n for step in df.STEP:\n steps[step] = df.index[df.STEP == step]\n return steps", "def get_index_array(self):\n return self.region_pairs", "def _make_observation(self) -> Dict[str, np.ndarray]:\n return {\n \"cur_pos\": np.array([self.cur_pos], dtype=int),\n }", "def get_index(self):\n\t\treturn call_sdk_function('PrlVmDev_GetIndex', self.handle)", "def get_index(self, key):\n return self.keys.index(key)", "def degree_index_dict(self):\n did = dict()\n for i,c in enumerate(self.classes):\n if isinstance(c, lambda_class) or isinstance(c, psi_class) or c == 0:\n continue \n try:\n degree = c.degree\n except AttributeError:\n degree = 1\n if not did.has_key(degree):\n did[degree] = []\n did[degree].append(i+1)\n return did", "def get_items_to_index(self):\n\t\treturn []", "def indices(self):\n return self.index.indices", "def index(self):\n return self._model_item.index()", "def get_positions(self) -> Dict[str, int]:\n\n with self._lock:\n return {\n name: self._return_factor * i\n for name, i in self._current_positions.items()\n }", "def index_config(self):\n return {\n 'settings': self.settings,\n 'mappings': self.mappings\n }" ]
[ "0.7655854", "0.71581787", "0.7109777", "0.7092145", "0.6977287", "0.6977287", "0.6973881", "0.69227993", "0.6913379", "0.6897256", "0.68841493", "0.6860583", "0.68244886", "0.68158996", "0.6750094", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.6705135", "0.6624536", "0.6566864", "0.6558116", "0.65511155", "0.6474426", "0.6474426", "0.6473925", "0.6470233", "0.64406955", "0.6434512", "0.63916236", "0.6390075", "0.63806313", "0.63489264", "0.63453823", "0.6304921", "0.6304921", "0.6304921", "0.6304921", "0.6304921", "0.630321", "0.62815565", "0.62809587", "0.6251678", "0.62373096", "0.62343854", "0.6224004", "0.62130225", "0.6208842", "0.62058204", "0.61866254", "0.61853045", "0.61750096", "0.61627805", "0.61585605", "0.61584175", "0.6152982", "0.6148502", "0.61440307", "0.613664", "0.61334133", "0.6095539", "0.6088982", "0.60847074", "0.60809034", "0.6066715", "0.6059505", "0.60525787", "0.60508424", "0.6043764", "0.6039518", "0.60274345", "0.60203606", "0.60203177", "0.6005989", "0.5997135", "0.5979408", "0.5960587", "0.59516555", "0.5929465", "0.5909026", "0.5904796", "0.58863944", "0.58863", "0.5875406", "0.5872282", "0.58673865", "0.58641165", "0.58608985", "0.5852746", "0.58422333", "0.5832342", "0.5829958", "0.5827462" ]
0.0
-1
return the unitwise definition corresponding to attrname
def _get_wavelength_attrs_with_units(self, attrname, units='AA'): attr = self._lick[attrname] if self.wavelength_unit is not None: if units is None: return attr * unit[self.wavelength_unit] else: return (attr * unit[self.wavelength_unit]).to(units) else: return attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)).to(units)\n else:\n return attr", "def _parse_unit_attr(attr: str) -> str:\n parts = attr.split('_', maxsplit=1)\n valid_attr = len(parts) == 2 and parts[0] == \"unit\"\n if not valid_attr:\n raise ValueError(\"{0} is not a valid unit attribute.\".format(attr))\n return parts[1]", "def mineral_attr(attribute):\n return attribute[0]", "def get_attr(self, attr_name, ds_name=None):\n if self.science_product:\n return self.__nc_attr(attr_name, ds_name)\n\n return self.__h5_attr(attr_name, ds_name)", "def get(self, attrname):\n return self.__dict__['_'+attrname]", "def create_descr(self, attr_name):", "def getUnitDefinition(self, *args):\n return _libsbml.Model_getUnitDefinition(self, *args)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def get_attribute(self, name):\n\n pass", "def get_unit(self,tag):", "def __h5_attr(self, attr_name, ds_name):\n if ds_name is not None:\n dset = self.fid['/PRODUCT/{}'.format(ds_name)]\n if attr_name not in dset.attrs.keys():\n return None\n\n attr = dset.attrs[attr_name]\n else:\n if attr_name not in self.fid.attrs:\n return None\n\n attr = self.fid.attrs[attr_name]\n\n if isinstance(attr, bytes):\n return attr.decode('ascii')\n\n return attr", "def get_attr(self):\n attr = self._bld.FindOrCreateAttribute(self._sobj, self.sname)\n return attr._narrow(self.stype)", "def _desc_op(attr_name):", "def attributeDecl(self, elem, name, type, defi, defaultValue, nameList):\n pass", "def attr(self, name):\r\n return Assert(getattr(self.obj, name))", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def attr(node: md.Document, name: str) -> str:\n return node.getAttribute(name)", "def get_attr(self, name: str):\n return self.call(name)", "def __getattr__(self, name):\n if name == \"mu\":\n self.mu = self.mdp.stationary_distribution(\n seed=1000, iterations=100000, policy=self.target_policy)\n return self.mu\n elif name == \"beh_mu\":\n self.beh_mu = self.mdp.stationary_distribution(\n seed=1000, iterations=100000, policy=self.behavior_policy)\n return self.beh_mu\n elif name == \"V_true\":\n self.V_true = dynamic_prog.estimate_V_discrete(\n self.mdp, policy=self.target_policy, gamma=self.gamma)\n return self.V_true\n else:\n raise AttributeError(name)", "def get_unit(shared, unit_name):\n if (shared.config.get_safe('data', 'use_units') != 'off'):\n unit_val, unit_str = shared.config.get_safe_literal('units', unit_name,\n default=(1.0, ''))\n if unit_str:\n unit_str = ' [' + unit_str + ']'\n else:\n unit_val = 1.0\n unit_str = ''\n \n return unit_val, unit_str", "def about_attribute(self, name):\n for cdef in self.getmro():\n if name in cdef.attrs:\n s_result = cdef.attrs[name].s_value\n if s_result != s_ImpossibleValue:\n return s_result\n else:\n return None\n return None", "def __getattr__( self, attrName ):\r\n if attrName!=attrName.lower() and attrName!=\"caseSensitive\" and not self.caseSensitive and \\\r\n (attrName.startswith(\"start_\") or attrName.startswith(\"end_\")):\r\n return getattr(self,attrName.lower())\r\n raise AttributeError, attrName", "def __nc_attr(self, attr_name, ds_name):\n if ds_name is not None:\n for grp_name in ['/target_product', '/side_product']:\n dset = self.fid['{}/{}'.format(grp_name, ds_name)]\n if attr_name in dset.ncattrs():\n return dset.getncattr(attr_name)\n\n return None\n\n if attr_name not in self.fid.ncattrs():\n return None\n\n return self.fid.getncattr(attr_name)", "def _mangle_attr(name):\n return 'm_' + name", "def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]", "def getAttrs(element, exclude=(), required=()):\n conversionTable = {'lowerBound':PQU.PQU, 'upperBound':PQU.PQU, 'value':PQU.PQU, 'energy':PQU.PQU,\n 'neutronWidth':PQU.PQU, 'captureWidth':PQU.PQU, 'fissionWidthA':PQU.PQU, 'fissionWidthB':PQU.PQU, 'competitiveWidth':PQU.PQU,\n 'levelSpacing':PQU.PQU, 'Q':PQU.PQU, 'radius':PQU.PQU, 'effectiveRadius':PQU.PQU,\n 'reconstructCrossSection':getBool, 'multipleRegions': getBool, 'LdependentScatteringRadii': getBool,\n 'calculateChannelRadius':getBool, 'computeAngularDistribution':getBool, 'forSelfShieldingOnly': getBool,\n 'calculateShift':getBool,'calculatePenetrability':getBool,\n 'LvaluesNeededForConvergence':int, 'ENDF_MT':int, 'index':int, 'L':int,\n 'neutronDOF':floatOrint, 'gammaDOF':floatOrint, 'competitiveDOF':floatOrint, 'fissionDOF':floatOrint,\n 'spin':xParticle.spin, 'parity':xParticle.parity,\n 'scatteringRadius':(lambda foo: scatteringRadius(PQU.PQU(foo)) if foo!='energyDependent' else foo),\n }\n attrs = dict( element.items() )\n for key in attrs.keys():\n if key in exclude: attrs.pop(key)\n elif key in conversionTable: attrs[key] = conversionTable[key]( attrs[key] )\n for val in required:\n if val not in attrs: attrs[val] = False\n return attrs", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def createUnitDefinition(self):\n return _libsbml.Model_createUnitDefinition(self)", "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def attributes_get(self, attr_name):\n if not self.sqs_attr:\n return None\n\n if attr_name not in self.sqs_attr:\n return None\n\n return self.sqs_attr[attr_name]", "def get_attribute(tdesc, attr_name, required=True):\n # Fields stored in hex format by default.\n default_hex = ('cipher_text', 'iv', 'key')\n\n data = tdesc.find(attr_name)\n if data is None:\n if required:\n raise subcmd.TpmTestError('node \"%s\" does not have attribute \"%s\"' %\n (tdesc.get('name'), attr_name))\n return ''\n\n # Attribute is present, does it have to be decoded from hex?\n cell_format = data.get('format')\n if not cell_format:\n if attr_name in default_hex:\n cell_format = 'hex'\n else:\n cell_format = 'ascii'\n elif cell_format not in ('hex', 'ascii'):\n raise subcmd.TpmTestError('%s:%s, unrecognizable format \"%s\"' %\n (tdesc.get('name'), attr_name, cell_format))\n\n text = ' '.join(x.strip() for x in data.text.splitlines() if x)\n if cell_format == 'ascii':\n return text\n\n # Drop spaces from hex representation.\n text = text.replace(' ', '')\n if len(text) & 3:\n raise subcmd.TpmTestError('%s:%s %swrong hex number size' %\n (tdesc.get('name'), attr_name, utils.hex_dump(text)))\n # Convert text to binary\n value = ''\n for x in range(len(text)/8):\n try:\n value += struct.pack('<I', int('0x%s' % text[8*x:8*(x+1)], 16))\n except ValueError:\n raise subcmd.TpmTestError('%s:%s %swrong hex value' %\n (tdesc.get('name'), attr_name, utils.hex_dump(text)))\n return value", "def get_visual_attrib_template():\n return {\"conaffinity\": \"0\", \"contype\": \"0\", \"mass\": \"1e-8\", \"group\": \"1\"}", "def calc_attribute_statistics(self, statistic_name):\n stats = {}\n for var, grids in self.attributes.items():\n if len(grids) > 1:\n stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)()\n for t, x in enumerate(grids)]), statistic_name)()\n else:\n stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)()\n return stats", "def getFluidAttr(*args, attribute: AnyStr=\"\", lowerFace: bool=True, xIndex: int=0, xvalue:\n bool=True, yIndex: int=0, yvalue: bool=True, zIndex: int=0, zvalue: bool=True,\n **kwargs)->None:\n pass", "def getattribute(self, name):\n return self.attributes[name]", "def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]", "def __getattr__(self, attr):\n return self.product.get(attr, \"\")", "def get_attr(name):\n userDoc = get_user()\n _idx = userDoc.index.get(name, None)\n\n if _idx is not None:\n return userDoc.attributes[_idx]\n else:\n return None", "def getattrs(self, attrlist):\n\t\treturn np.array([getattr(self, attr) for attr in attrlist])", "def apply_membership_attr_name(self, attr_name):\n return self.apply_membership_func(lambda x: getattr(x, attr_name))", "def unit_type(self) -> str:", "def test_attributes(self):\n ujml_code = '<?xml version=\"1.0\"?><ujml version=\"{}\">'.format(uj_version) + '''\n <a_stoff a_str=\"qwerty\"\n a_int=\"9001\"\n a_bool=\"True\"\n a_float=\"1.2\"\n a_list=\"1,2,3,4\"\n a_eval=\"2+2\"\n a_exec=\"global x; x=3+3*b\">\n\n </a_stoff>\n </ujml>\n '''\n a_stoff = from_string(ujml_code)[0]\n\n self.assertEqual(\"qwerty\", a_stoff.a_str)\n self.assertEqual(9001, a_stoff.a_int)\n self.assertTrue(a_stoff.a_bool)\n self.assertEqual(1.2, a_stoff.a_float)\n self.assertEqual([1, 2, 3, 4], a_stoff.a_list)\n self.assertEqual(4, a_stoff.a_eval)\n a_stoff.a_exec(b=4)\n self.assertEqual(15, a_stoff.root.interpreter['x'])", "def _Attribute(self,t):\n # Only a limited set of globals supported\n func_dict = None\n \n # pyflamegpu singleton\n if isinstance(t.value, ast.Name):\n if t.value.id == \"pyflamegpu\":\n if t.attr in self.fgpu_attrs:\n # proceed\n self.write(\"flamegpu::\")\n self.write(t.attr)\n else:\n self.RaiseError(t, f\"Attribute '{t.attr}' does not exist in pyflamegpu object\")\n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n # not sure how a numpy attribute would be used without function call or type hint but translate anyway \n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, \"Unsupported attribute\")", "def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")", "def extract_attribute(self, cls, attr_name):\r\n\r\n def extract(value, name):\r\n\r\n try:\r\n return getattr(value, name)\r\n except (AttributeError, IndexError):\r\n return None\r\n\r\n attributes = [\r\n extract(value, attr_name)\r\n for galaxy in self.galaxies\r\n for value in galaxy.__dict__.values()\r\n if isinstance(value, cls)\r\n ]\r\n\r\n if attributes == []:\r\n return None\r\n elif isinstance(attributes[0], float):\r\n return values.ValuesIrregular(values=attributes)\r\n elif isinstance(attributes[0], tuple):\r\n return grid_2d_irregular.Grid2DIrregular(grid=attributes)", "def get_joint_attrib_template():\n return {\n \"type\": \"free\",\n }", "def get_attr(cls, dset, name, default=None):\n dec = default\n if name in dset.attrs:\n raw = dset.attrs[name]\n if (sys.version_info[0] > 2) and (\"decode\" in dir(raw)):\n dec = raw.decode()\n else:\n dec = raw\n return dec", "def getVarUnit( self, name, adbOut ):\n\n if name not in _adbUnit: return None\n\n unit = None\n for item in _adbUnit[name]:\n if item[1] == 'all' or adbOut.lower() in item[1].split(','):\n if item[0] == \"None\":\n unit= \"nondim\"\n else:\n unit= acuUnit.getDefUnit( item[0] )\n \n break\n return unit", "def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)", "def getName(self):\n return _libsbml.UnitDefinition_getName(self)", "def create_descr(self, attr_name):\n def _get(self):\n return getattr(self._numba_attrs, attr_name)\n def _set(self, value):\n return setattr(self._numba_attrs, attr_name, value)\n return property(_get, _set)", "def wxname2attr(self, name):\n assert name.startswith('wx')\n\n #cn = self.codegen.get_class(self.codegen.cn(name))\n cn = self.codegen.cn(name)\n namespace, cn = cn.rsplit(\".\",1)\n if namespace==\"wx\":\n import wx\n return getattr(wx, cn)\n if namespace==\"wx.propgrid\":\n import wx.propgrid\n return getattr(wx.propgrid, cn)\n if namespace==\"wx.grid\":\n import wx.grid\n return getattr(wx.propgrid, cn)\n raise ValueError(\"namespace %s not implemented\"%namespace)", "def __getattr__(self, name):\n if name == \"mu\" or name == \"mu_next\" or name == \"mu_r\" or name == \"mu_phi\" or name == \"mu_phi_next\":\n self.mu, self.mu_r, self.mu_next, self.mu_phi, self.mu_phi_next = mdp.samples_distribution(self.mdp, policy=self.target_policy,\n policy_traj=self.behavior_policy,\n phi=self.phi,\n n_next=self.mu_n_next,\n n_iter=self.mu_iter,\n n_restarts=self.mu_restarts,\n seed=self.mu_seed,\n n_subsample=self.mu_subsample)\n return self.__dict__[name]\n elif name == \"mu_tar\" or name == \"mu_next_tar\" or name == \"mu_r_tar\" or name == \"mu_phi_tar\" or name == \"mu_phi_next_tar\":\n self.mu_tar, self.mu_r_tar, self.mu_next_tar, self.mu_phi_tar, self.mu_phi_next_tar = mdp.samples_distribution(self.mdp, policy=self.target_policy,\n phi=self.phi,\n n_next=self.mu_n_next,\n n_iter=self.mu_iter,\n n_restarts=self.mu_restarts,\n seed=self.mu_seed,\n n_subsample=self.mu_subsample)\n return self.__dict__[name]\n elif name == \"mu_accum_r\":\n self.mu_accum_r = mdp.accum_reward_for_states(self.mdp, policy=self.target_policy, states=self.mu,\n gamma=self.gamma, seed=self.mu_seed,\n n_eps=10, l_eps=200, verbose=10)\n return self.__dict__[name]\n else:\n raise AttributeError(name)", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def IDX_CHECK(attribute_name):\n if attribute_name == 'Alt':\n return 0\n if attribute_name == 'Bar':\n return 1\n if attribute_name == 'Fri':\n return 2\n if attribute_name == 'Hun':\n return 3\n if attribute_name == 'Pat':\n return 4\n if attribute_name == 'Price':\n return 5\n if attribute_name == 'Rain':\n return 6\n if attribute_name == 'Res':\n return 7\n if attribute_name == 'Type':\n return 8\n if attribute_name == 'Est':\n return 9", "def __getattribute__(self, name):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n return object.__getattribute__(self, name)\n\n else:\n return getattr(self.env, name)", "def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass", "def __getitem__(self, name):\n return self.gattrs[name]", "def __getitem__(self, attribute_name: str) -> Attribute:\n return self._attributes_by_name[attribute_name]", "def getattr(self, name, *default):\n for attr in self.attributes:\n if attr.name.lower() == name.lower():\n return attr\n else:\n if default:\n return default[0]\n raise KeyError(name)", "def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:\n raise NotImplementedError()", "def _named_attrs(self, parts:dict) -> \\\n (QA4SMNamedAttributes, list, QA4SMNamedAttributes):\n\n if not self.ismetr():\n raise IOError(self.varname, '{} is not in form of a QA4SM metric variable.')\n\n if self.g == 0:\n a = QA4SMAttributes(self.attrs)\n ref_ds = QA4SMNamedAttributes(a.ref_dc - a._offset_id_dc,\n a.get_ref_names()['short_name'], self.attrs)\n return ref_ds, None, None\n else:\n dss = []\n ref_ds = QA4SMNamedAttributes(parts['ref_id'], parts['ref_ds'], self.attrs)\n ds = QA4SMNamedAttributes(parts['sat_id0'], parts['sat_ds0'], self.attrs)\n dss.append(ds)\n if self.g == 3:\n ds = QA4SMNamedAttributes(parts['sat_id1'], parts['sat_ds1'], self.attrs)\n dss.append(ds)\n mds = QA4SMNamedAttributes(parts['mds_id'], parts['mds'], self.attrs)\n else:\n mds = None\n return ref_ds, dss, mds", "def attr_namer(name, renames=renames):\n if name in renames:\n return renames[name]\n return name", "def __getattr__(self, name):\n if name == 'wd':\n return self.__wd\n raise AttributeError", "def _attribute(self, name: _expression.String) -> _expression.Any:\n for c in self.constants:\n if c.name == name.native_value:\n assert isinstance(c.value, _expression.Any)\n return c.value\n\n return super(CompositeType, self)._attribute(name) # Hand over up the inheritance chain, this is important", "def get_dim_attribute(self,attr):\n return [getattr(dim,attr) for dim in self.dimensions]", "def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None", "def get_attr(self, location, attr, default=None):\r\n return self.get_attrs(location).get(attr, default)", "def _get_samples_attribute(self, attr, *args, **kwargs):\n try:\n vals = [getattr(fk, attr)(*args, **kwargs) for fk in self.samples_]\n except TypeError:\n vals = [getattr(fk, attr) for fk in self.samples_]\n try:\n unit_ = Unit(str(vals[0].unit))\n return np.array([v.value for v in vals]) * unit_\n except AttributeError:\n return np.array(vals)", "def getElementName(self):\n return _libsbml.UnitDefinition_getElementName(self)", "def __getattr__(self, name):\n if name in self:\n return self[name]\n raise AttributeError(_(\"Unknown attribute '%s'.\") % name)", "def getAttrName(self, context):\r\n return self.attr if self.attr is not None else context.attr", "def parse_unit(self, unitelem) -> Unit:\n u = Unit()\n\n u.unitid = unitelem.attrib['id'].strip()\n div = unitelem.find('{*}divide')\n if div is not None:\n nom = div.find('{*}unitNumerator').find('{*}measure')\n denom = div.find('{*}unitDenominator').find('{*}measure')\n u.nom = re.sub('.*:', '', nom.text).lower()\n u.denom = re.sub('.*:', '', denom.text).lower()\n else:\n m = unitelem.find('{*}measure')\n u.nom = re.sub('.*:', '', m.text).lower()\n\n return u", "def attribute(self, name, by_ref=False):\n\n if by_ref:\n return self._attributes_by_ref[name]\n else:\n try:\n return self._attributes[name]\n except KeyError:\n raise NoSuchAttributeError(\"Unknown attribute '{}' \"\n \"in dimension '{}'\"\n .format(name, self.name),\n name)", "def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name", "def getglobal(self, attName):\n return self.attributes[attName]", "def attributeType(self) -> unicode:\n ...", "def getAttr(self, name, *args):\n if len(args) > 0:\n return self.attrs.get( name, args[0] )\n return self.attrs[name]", "def getAttribute(self, name):\n \n return self[self._name][name]", "def device_state_attributes(self):\n # attributes = super().device_state_attributes\n attributes = {ATTR_UNIT_OF_MEASUREMENT: self._unit}\n return attributes", "def attributeName(*args, leaf: bool=True, long: bool=True, nice: bool=True, short: bool=True,\n **kwargs)->AnyStr:\n pass", "def get_field_attr(name):\n # de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.\n # is dat omdat er twee entiteiten in 1 scherm staan?\n fields = []\n opts = my.rectypes[name]._meta\n for x in opts.get_fields(): # fields:\n fldname = x.name\n fldtype = x.get_internal_type()\n if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):\n # if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))\n continue\n try:\n length = x.max_length\n except AttributeError:\n length = -1\n fields.append((fldname, fldtype[:-5], length))\n return fields", "def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')", "def getCustomAttribute(self):\n\t\treturn self.Attribute", "def find_info( attr, kw, metadata, default='' ):\n str_attr = str(attr)\n return kw.get( str_attr, metadata.get( str_attr, default ) )", "def extensible_attributes():\n return 'extensibleattributedef?'", "def get_dim_attribute(self,attr):\n return [getattr(getattr(self,name),attr) for name in self._dimensions]", "def find_attribute(orm_device, attr_name, attr_type):\n for template_id in orm_device['attrs']:\n for attr in orm_device['attrs'][template_id]:\n if (attr['label'] == attr_name) and (attr['type'] == attr_type):\n LOGGER.debug(f\" retrieving attribute {attr}\")\n return attr\n return None", "def getElementName(self):\n return _libsbml.ListOfUnitDefinitions_getElementName(self)", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def attribute(self, name):\n\n attrs = [attr for attr in self.attributes if attr.name == name]\n\n if attrs:\n return attrs[0]\n else:\n raise NoSuchAttributeError(name)", "def question(self, name: str) -> Optional[NumericalAttribute]:\n return super().attribute(name=name)", "def get_unit(self):\n return self.unit" ]
[ "0.63725454", "0.60422206", "0.5772077", "0.57339555", "0.5650211", "0.5626752", "0.5624979", "0.55762106", "0.55762106", "0.5543511", "0.55427015", "0.55405295", "0.5533721", "0.5516388", "0.54795426", "0.5476638", "0.5459001", "0.5459001", "0.5439841", "0.54318297", "0.5421264", "0.54183096", "0.54182243", "0.5405263", "0.5390103", "0.5385034", "0.537344", "0.5331236", "0.53082806", "0.52970684", "0.52893883", "0.52811337", "0.52811337", "0.52809244", "0.5279733", "0.52671695", "0.5253596", "0.52501607", "0.5242785", "0.5224088", "0.52183634", "0.52183634", "0.52036256", "0.51874655", "0.5165726", "0.5163875", "0.5154811", "0.5128768", "0.51283073", "0.5127832", "0.5117124", "0.5108869", "0.5101482", "0.50758487", "0.5063396", "0.50633675", "0.505736", "0.505684", "0.5044163", "0.5040867", "0.502981", "0.50251", "0.5013233", "0.50120974", "0.5008213", "0.49933088", "0.49893832", "0.49819162", "0.4980904", "0.49749026", "0.49602747", "0.4957705", "0.49571612", "0.49568042", "0.49564004", "0.49502513", "0.49480262", "0.49412948", "0.49377915", "0.49330458", "0.4930961", "0.49220005", "0.49211553", "0.49168608", "0.49148837", "0.4906337", "0.4904726", "0.49040216", "0.49019", "0.4901091", "0.48997355", "0.48976493", "0.48881745", "0.4886406", "0.488431", "0.48837304", "0.48825735", "0.48819992", "0.48819786", "0.48810893" ]
0.6441973
0
display information about the current Index
def info(self): txt = """Lick Index {s.name} wavelength units: {s.wavelength_unit} Index Band: {s.band} Blue continuum band: {s.blue} Red continuum band: {s.red} Measurement unit: {s.index_unit}""".format(s=self) print(txt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self):\r\n return None", "def get_info_format(self):\n return self.session.api.get_index(self)", "def show_elasticsearch_index_info(cluster: str, index: str):\n\n elastic = sreElastic(host=cluster)\n pp = pprint.PrettyPrinter(indent=2, width=120)\n\n print(\"\\nLocation:\")\n pp.pprint(elastic.get_index_location(index))\n print(\"\\nRouting:\")\n pp.pprint(elastic.get_index_routing(index))\n print(\"\\n\")", "def index(self):\n return dict(data='index')", "def index(self):\n return self._index", "def index(self):\n return self.container['index']", "def info(self):", "def info(self):", "def index(self):\n self.index_value(self.proxy_get())", "def index():\n return render_template(\n 'index_t.html',\n call_counter=str(get_model().call_counter),\n app_version=str(app.config.get('GIT_HASH', None))\n )", "def index(self):\n return self._data.get('index')", "def index(self):\n\t\treturn render_template('index.html')", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def show_index(self):\n\n df = self.__df_timings\n return df.index", "def index(self, data) -> None:\n pass", "def index():\n return render_template('index.html', getName=ASK_NAME)", "def index():\n pass", "def index(self):\n return render_template('main/index.html')", "def __repr__(self):\n return str(self.index)", "def print_indices(self):\n # Putting the param in the endpoint here because why not\n endpoint = \"/_cat/indices?v\"\n url = self.base_url + endpoint\n r = requests.get(url, headers=self.headers, verify=False)\n r.raise_for_status()\n print(r.text)\n return", "def index_template(self):\n return '{}/{}.html'.format(self.object_name, self.index_endpoint)", "def index(self):\n return self._quote_get('option/index')", "def action_index(biv_obj):\n return pnf.Nomination().execute(biv_obj)", "def info(self):\n self._info()", "def show(self):\n\n pass", "def index():\n return 'There is nothing here.'", "def index():\n return 'Thanks for using the Bird Stats API.'", "def on_start(self):\n self.get_index()", "def index():\n return list()", "def get_index(self):\n self.client.get('/')", "def print_index(self):\n\n # Process event handlers\n pygame.event.pump()\n\n # Buttons\n for button in range(0, self.num_buttons):\n value = self.js.get_button(button)\n if value:\n print \"Button {} on\".format(button)\n \n # Axes\n for axis in range(0, self.num_axes):\n value = self.js.get_axis(axis)\n if value > 0:\n print \"Axis {} positive\".format(axis)\n elif value < 0:\n print \"Axis {} negative\".format(axis)\n \n # Hats\n for hat in range(0, self.num_hats):\n value = self.js.get_hat(hat)\n if any(value) != 0:\n print \"Hat {}: {}\".format(hat, value)", "def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))", "def index():\n\n return {\n 'page': 'index',\n }", "def index():\n\n INTERFACE.add_dir(u'RÚV', 'view_category', '1')\n INTERFACE.add_dir(u'RÚV Íþróttir', 'view_category', '10')\n INTERFACE.add_dir(u'RÁS 1', 'view_category', '2')\n INTERFACE.add_dir(u'RÁS 2', 'view_category', '3')\n INTERFACE.add_dir(u'Rondó', 'view_category', 'rondo')\n INTERFACE.add_dir(u'Krakkasarpurinn', 'view_category', 'born')\n INTERFACE.add_dir(u'Hlaðvarp', 'view_podcast_index', '')\n INTERFACE.add_dir(u'Leita', 'search', '')", "def detail(self, req):\n return self.index(req)", "def display_for_index(self, index):\n obj = index.data(self.ObjectRole)\n cb = self.DISPLAY_CALLBACKS.get(index.column())\n if not cb:\n return \"\"\n return cb(obj)", "def getIndex(self):\n return self.index", "def index():\n return render_template(\"index.html\",\n title='Index')", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def show(self):\n pass", "def index():\n\tresults = queries.index()\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=None)", "def index():\n\n return dict()", "def showmeta(self,\r\n index):\r\n\r\n return self.get_metadata_from_note(index)", "def index(self):\n return self.render(\"admin/index.html\")", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def index_stats(self):\r\n request = http.Request('GET', '/metadata/index_stats')\r\n return request, parsers.parse_json", "def show(self) -> None:", "def index(self):\n return self.data.index", "def index(request):\n return render(\n request,\n 'core/index.html',\n {\n 'current_view': 'index',\n }\n )", "def index():\n today = datetime.today()\n return render_template(\"index.html.j2\", today=today)", "def index(self) -> int:", "def details(self):\n pass", "def current_index(self):\n return self._current_index", "def index():\n\n\treturn(render_template('index.html'))", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def get_index(self):\n return self.index", "def get_index(self):\n return self.index", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def index():\n \n currentDateTime = current_datetime()\n fromDateTime = calc_day(currentDateTime, -3)\n\n # Adjust if any graphs should be shown in index page\n # Temperatur=XML(render_graph(3, 5, fromDateTime, currentDateTime, show_dots=False))\n # Procent_smoke=XML(render_graph(3, 6, fromDateTime, currentDateTime, show_dots=False))\n # Kitchen_Stove=XML(render_graph(2, 3, fromDateTime, currentDateTime, show_dots=False))\n # Humid=XML(render_graph(3, 4, fromDateTime, currentDateTime, show_dots=False))\n # Brightness=XML(render_graph(3, 7, fromDateTime, currentDateTime, show_dots=False))\n # Hall_motions=XML(render_graph(1, 1, fromDateTime, currentDateTime, show_dots=False, hits=True))\n # Hall_door=XML(render_graph(1, 2, fromDateTime, currentDateTime, show_dots=False, on_off=['Open', 'Close']))\n\n # return dict(test=locals())\n # return dict(test=device_monitoring)\n return dict()", "def get_info ( self ):\n proxy = self._cur_control.proxy\n return ( proxy.list(), proxy.index )", "async def index():\n return \"Index\"", "def show():\n info(str(Project))", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n return 'OK'", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def index():\n return render_template('index.html')", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def index():\n return render_template('0-index.html')", "def index():\r\n return render_template('index.html')", "def index(self):\n s = \"\"\n\n sb = []\n for sim in self.simulations.values():\n url = \"{0.uid}/{0.password}/status\".format(sim)\n sb.append(\"<a href='{0}'>{1.uid}</a></br>\".format(\n url, sim))\n s += \"<b>Simulations running:</b></br>\"\n s += \"\\n\".join(sb)\n\n s += \"<b>List of items in shop:</b>\\n</br>\"\n s += \"\\n</br>\".join(self.shop.itemAndCostDict.keys())\n \n s += \"</br><b>List of all items:</b>\\n</br>\"\n s += \"\\n</br>\".join(item.items.keys())\n\n return s", "def index(request):\n data = Information.objects.all()\n args = {'data': data}\n return render_to_response('tasks/index.html', args, context_instance=RequestContext(request))", "def index():\n\treturn render_template(\"index.html\", title=\"Home\")", "def display_contents(CurrentList):\n\n print(\"========================Start of display_contents() Method*\")\n print(\"The number of items in list are :\" + str(len(CurrentList)))\n print(\"----- Fl.ID--- ||sub_T|| reqStart||Dur ||Start||End\")\n # Flight ID||sub_Time||reqStart||reqDuration||actualStart||actualEnd\")\n for j in range(len(CurrentList)):\n print(str(j) + \": \" + CurrentList[j].showFlightInfo())\n print(\"========================END of display_contents() Method *\")", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def index_value(self):\r\n\t\tfor index, column_header in enumerate(self.header_row):\r\n\t\t\tprint(index, column_header)", "def print_res(self, result, index=None):\n if index is not None:\n print(str(index).rjust(3)+ \" \" + _c.bold + _c.blue + result[\"title\"] + _c.reset)\n if result[\"description\"]:\n print(\" \"*4 + \"Description:\\t\", result[\"description\"])\n print(\n \" \"*4 +\n result[\"highlight\"].replace(\"<highlight>\", _c.blue).replace(\"</highlight>\", _c.reset),\n )\n print(\" \"*4 + \"Path: \", result[\"path\"])\n else:\n print(\"Title:\\t\\t\", result[\"title\"])\n if result[\"description\"]:\n print(\"Description:\\t\", result[\"description\"])\n print(result[\"highlight\"])\n print(\"Path: \", result[\"path\"])", "def show(self):\n\t\traise NotImplementedError()", "def show(self):", "def index():\n return render_template('index.html', title='Home')", "def show_index_page():\n\n return render_template('index.html')", "def obj_index(self) -> str:\n return str(self._data[\"index\"])", "def show(self):\n raise NotImplementedError", "def show(self):\n raise NotImplementedError", "def __str__(self):\n return \"{}_human\".format(self.index)", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def index():\n result = query_db('SELECT * FROM climate ORDER BY time DESC LIMIT 1;',\n one=True)\n try:\n context = {'temp': round(result['temp'], 1),\n 'humid': round(result['humid'], 1),\n 'pressure': round(result['pressure']),\n 'time': datetime.datetime.fromtimestamp(int(result['time'])\n ).strftime('%d-%m-%y %H:%M')}\n except TypeError:\n context = {'temp': None,\n 'humid': None,\n 'pressure': None,\n 'time': None}\n return render_template('index.html', **context)" ]
[ "0.7144294", "0.6980231", "0.6847654", "0.68377274", "0.67996037", "0.67142814", "0.6706408", "0.6706408", "0.66565186", "0.66213363", "0.6611364", "0.65879714", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.6553538", "0.6447819", "0.64418566", "0.6430477", "0.63898826", "0.63553447", "0.6343505", "0.63375974", "0.63126373", "0.6301881", "0.62956804", "0.6295462", "0.6289598", "0.62877434", "0.6283174", "0.62562495", "0.6250067", "0.6248203", "0.6245411", "0.6238725", "0.623778", "0.62215614", "0.62204975", "0.62162435", "0.62152815", "0.6212293", "0.6209454", "0.6206866", "0.62003016", "0.6194072", "0.6193454", "0.6160317", "0.61564493", "0.61491877", "0.6144033", "0.61358476", "0.6134433", "0.613113", "0.61176866", "0.6108461", "0.6107836", "0.6104529", "0.60765815", "0.60765815", "0.60582834", "0.6057121", "0.60542923", "0.6044777", "0.60426444", "0.60423094", "0.60423094", "0.6031937", "0.60298896", "0.6013608", "0.60131234", "0.6009599", "0.60092974", "0.60085815", "0.5987092", "0.5983437", "0.5975852", "0.59722686", "0.59705895", "0.59705275", "0.5968051", "0.5962144", "0.5957129", "0.59482497", "0.59481984", "0.5947438", "0.5947438", "0.5943638", "0.5936933", "0.5921951", "0.5921535", "0.59177464" ]
0.6661338
9
compute spectral index after continuum subtraction
def __call__(self, *args, **kwargs): return self.get(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = 8.801830992E-03\n A4 = 8.435237228E-05\n A5 = 1.681656789E-06\n A6 = -1.675425449E-08\n A7 = 8.326602461E-10\n\n n = np.sqrt( A0 + A1 * wavelength_um ** 4 + A2 * wavelength_um ** 2 + A3 * wavelength_um ** -2 + \\\n A4 * wavelength_um ** -4 + A5 * wavelength_um ** -6 + A6 * wavelength_um ** -8 + A7 * wavelength_um ** -10 )\n\n return n", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def spectral_decrease(sign, fs):\n f, ff = plotfft(sign, fs)\n\n k = len(ff)\n soma_num = 0\n for a in range(2, k):\n soma_num = soma_num + ((ff[a]-ff[1])/(a-1))\n\n ff2 = ff[2:]\n if not np.sum(ff2):\n return 0\n else:\n soma_den = 1 / np.sum(ff2)\n return soma_den * soma_num", "def baseline(spectra):\n\n return spectra - np.mean(spectra, axis=0)", "def spectralIndices(\n x: Union[ee.Image, ee.ImageCollection],\n index: Union[str, List[str]] = \"NDVI\",\n G: Union[float, int] = 2.5,\n C1: Union[float, int] = 6.0,\n C2: Union[float, int] = 7.5,\n L: Union[float, int] = 1.0,\n cexp: Union[float, int] = 1.16,\n nexp: Union[float, int] = 2.0,\n alpha: Union[float, int] = 0.1,\n slope: Union[float, int] = 1.0,\n intercept: Union[float, int] = 0.0,\n gamma: Union[float, int] = 1.0,\n kernel: str = \"RBF\",\n sigma: Union[float, str] = \"0.5 * (a + b)\",\n p: Union[float, int] = 2,\n c: Union[float, int] = 1.0,\n online: bool = False,\n drop: bool = False,\n) -> Union[ee.Image, ee.ImageCollection]:\n platformDict = _get_platform_STAC(x)\n\n if isinstance(sigma, int) or isinstance(sigma, float):\n if sigma < 0:\n raise Exception(f\"[sigma] must be positive! Value passed: sigma = {sigma}\")\n\n if p <= 0 or c < 0:\n raise Exception(\n f\"[p] and [c] must be positive! Values passed: p = {p}, c = {c}\"\n )\n\n additionalParameters = {\n \"g\": float(G),\n \"C1\": float(C1),\n \"C2\": float(C2),\n \"L\": float(L),\n \"cexp\": float(cexp),\n \"nexp\": float(nexp),\n \"alpha\": float(alpha),\n \"sla\": float(slope),\n \"slb\": float(intercept),\n \"gamma\": float(gamma),\n \"p\": float(p),\n \"c\": float(c),\n }\n\n spectralIndices = _get_indices(online)\n indicesNames = list(spectralIndices.keys())\n\n if not isinstance(index, list):\n if index == \"all\":\n index = list(spectralIndices.keys())\n elif index in [\n \"vegetation\",\n \"burn\",\n \"water\",\n \"snow\",\n \"drought\",\n \"urban\",\n \"kernel\",\n ]:\n temporalListOfIndices = []\n for idx in indicesNames:\n if spectralIndices[idx][\"type\"] == index:\n temporalListOfIndices.append(idx)\n index = temporalListOfIndices\n else:\n index = [index]\n\n for idx in index:\n if idx not in list(spectralIndices.keys()):\n warnings.warn(\n f\"Index {idx} is not a built-in index and it won't be computed!\"\n )\n else:\n\n def temporalIndex(img):\n lookupDic = _get_expression_map(img, platformDict)\n lookupDic = {**lookupDic, **additionalParameters}\n kernelParameters = _get_kernel_parameters(img, lookupDic, kernel, sigma)\n lookupDic = {**lookupDic, **kernelParameters}\n lookupDicCurated = _remove_none_dict(lookupDic)\n if all(\n band in list(lookupDicCurated.keys())\n for band in spectralIndices[idx][\"bands\"]\n ):\n return img.addBands(\n img.expression(\n spectralIndices[idx][\"formula\"], lookupDicCurated\n ).rename(idx)\n )\n else:\n warnings.warn(\n f\"This platform doesn't have the required bands for {idx} computation!\"\n )\n return img\n\n if isinstance(x, ee.imagecollection.ImageCollection):\n x = x.map(temporalIndex)\n elif isinstance(x, ee.image.Image):\n x = temporalIndex(x)\n\n if drop:\n x = x.select(index)\n\n return x", "def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names", "def diff_index_calc(oct_abund_list1, oct_abund_list2):\n rel_index_list = []\n abs_index_list = []\n smty_index_list = []\n for i in range(10):\n abund_data_array = sc.asarray(oct_abund_list1[i], dtype='double')\n abund_sim_array = sc.asarray(oct_abund_list2[i], dtype = 'double')\n \n # make the length of the arrays similar to each other\n if len(abund_data_array) < len(abund_sim_array):\n small_len = abund_data_array\n long_len = abund_sim_array\n else:\n small_len = abund_sim_array\n long_len = abund_data_array\n diff = len(long_len) - len(small_len) \n small_len = sc.append(small_len, [0]*diff)\n \n relative_index_vect = abs(long_len - small_len)/long_len \n rel_index_list.append(sum(relative_index_vect)/len(relative_index_vect))\n \n absolute_index_vect = abs(long_len - small_len)\n abs_index_list.append(sum(absolute_index_vect)/len(absolute_index_vect))\n \n similarity_index_vect = []\n for i in range(len(long_len)):\n similarity_index_vect.append(sc.minimum(long_len[i], small_len[i])/sc.amax([long_len[i], small_len[i]]))\n \n smty_index_list.append(sum(similarity_index_vect)/len(similarity_index_vect)) \n \n rel_index_final = sum(rel_index_list)/10\n abs_index_final = sum(abs_index_list)/10\n smty_index_final = sum(smty_index_list)/10\n \n return (rel_index_final, abs_index_final, smty_index_final)", "def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def apply_electronics_gain(full_frame, difference):\n #electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n #electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n all_quads = []\n num_quads = full_frame.shape[0]\n for quads in range(0, num_quads):\n active_quad = full_frame[quads, :, :]\n if difference[quads] < 0: # Note: Difference is odd-even\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n elif difference[quads] > 0:\n gain_even = 1/electronics_gain_odd[quads]\n gain_odd = 1/electronics_gain_even[quads]\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n spec_pix, spat_pix = active_quad.shape\n gain_applied_quad = np.array([[0]*spec_pix]*spat_pix)\n even_detector_active_quad = gain_even*active_quad[:, ::2]\n odd_detector_active_quad = gain_odd*active_quad[:, 1::2]\n\n gain_applied_quad = np.reshape(gain_applied_quad, (spec_pix, spat_pix))\n gain_applied_quad[:, ::2] = even_detector_active_quad\n gain_applied_quad[:, 1::2] = odd_detector_active_quad\n #print(np.max(gain_applied_quad))\n #cc\n all_quads.append(gain_applied_quad)\n #cc\n return np.array(all_quads)", "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def refractive_index_porous_silica(wavelength, porosity=0.5):\n wavelength_um = wavelength / 1000\n n = np.sqrt(1 + \\\n (0.6961663 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.06840432 ** 2) + \\\n (0.4079426 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.11624142 ** 2) + \\\n (0.8974794 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 9.8961612 ** 2)\n )\n n_air = 1.00029\n\n n_total = np.sqrt(n ** 2 * (1 - porosity) + n_air ** 2 * (porosity)) + 0 * 1j\n\n # k0 = 5e-6\n # k1 = 5e-7\n # wavelength0 = 0.31\n # wavelength1 = 0.36\n\n # n_total = n_total + 1j*refractive_index_imaginary_silica(wavelength)*1e4\n # n_total = n_total + 1j*np.exp( np.log(k0) + np.log(k1) * (wavelength - wavelength0)/(wavelength1-wavelength0))\n\n return n_total", "def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def coherence_from_spectral(Sw):\r\n\r\n Sxx = Sw[0, 0].real\r\n Syy = Sw[1, 1].real\r\n\r\n Sxy_mod_sq = (Sw[0, 1] * Sw[1, 0]).real\r\n Sxy_mod_sq /= Sxx\r\n Sxy_mod_sq /= Syy\r\n return Sxy_mod_sq", "def spect(self):\n return 1", "def fft_index(fft, frequency):\n\treturn 2 * int(len(fft) * frequency / AUDIO_RATE) # Not entirely clear on why I need to multiply by 2 here. I don't need to if I use fft instead of rfft, but then I get a bunch of crazy high frequency FFT data, or is it complex numbers or something...", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def get_sound_index(self):\n # Return difference between the two last compared elements\n lhs = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_left\n #rhs = ThreadManagment.last_cmp_right_by_thread.get(self.thread.ident, 0)\n #return round((lhs + rhs) / 2)\n return lhs", "def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}", "def refractive_index(self, theta):\n sin2th = np.sin(theta)**2\n cos2th = np.cos(theta)**2\n\n A = self.S * sin2th + self.P * cos2th\n B = self.R * self.L * sin2th + self.P * self.S * (1 + cos2th)\n F = np.sqrt(((self.R * self.L - self.P * self.S) * sin2th)**2\n + (2 * self.P * self.D)**2 * cos2th) # contents can never be negative\n n_fast = np.sqrt((B - F) / (2 * A))\n n_slow = np.sqrt((B + F) / (2 * A))\n return np.concatenate((n_fast[...,np.newaxis], n_slow[...,np.newaxis]), axis=-1)", "def stZCR(frame):\n count = len(frame)\n countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2\n return (np.float64(countZ) / np.float64(count-1.0))", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def stZCR(frame):\n count = len(frame)\n countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2\n return (numpy.float64(countZ) / numpy.float64(count-1.0))", "def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num):\r\n Sxx = spectrogram_converter.mel_spectrogram(wav_path)\r\n for i in range(Sxx.shape[0]):\r\n for j in range(Sxx.shape[1]):\r\n X[index, 0, i, j] = Sxx[i, j]\r\n y[index] = curr_speaker_num\r\n return 1", "def calc(self, wavelength):\n if wavelength < self.minWavelength or wavelength > self.maxWavelength:\n return 0\n mm=wavelength%self.interval\n s=self._calcd(wavelength-mm)\n if mm==0:\n return s\n m=mm*1.0/self.interval\n e=self._calcd((wavelength-mm)+self.interval)\n return s+(e-s)*m", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def create_spectrum(warr, farr, earr=None, sub_cont=False):\n spec=Spectrum.Spectrum(warr, farr, earr, stype='continuum')\n #normalize the spectra to 5500\n n5500 = np.interp(5500, spec.wavelength, spec.flux)\n spec.flux = spec.flux/n5500\n if earr is not None:\n spec.var = spec.var/n5500\n #add in continuum subtraction\n if sub_cont:\n coef = np.polyfit(spec.wavelength, spec.flux, 9)\n spec.flux = spec.flux - np.polyval(coef, spec.wavelength) \n return spec", "def findSpectralAxis(img):\n if (type(img) == str):\n myia = createCasaTool(iatool)\n myia.open(img)\n else:\n myia = img\n mycs = myia.coordsys()\n try:\n iax = mycs.findaxisbyname(\"spectral\")\n except:\n print \"ERROR: can't find spectral axis. Assuming it is 3.\"\n iax = 3\n mycs.done()\n return iax", "def offset(freqs, re0, im0):\n return re0 + 1j * im0", "def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def wave_samples(self):\n return self._quantized_subsamples", "def getSpectralEnergyFrame(datatype, traceList, outfile, channelStart, channelEnd, winlen=1000):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n\r\n wlen = 256\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n nperlen = len(traceList[0].data)\r\n if winlen>=nperlen:\r\n nFrames=1\r\n else:\r\n nFrames = int(nperlen/winlen)\r\n\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n\r\n for iframe in range(nFrames): \r\n Emat = None\r\n for itr in range(0,nTraces):\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data[iframe*winlen:(iframe+1)*winlen]), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n #energy = np.abs(np.log10(np.abs(energy/np.max(energy)))*10.0)\r\n #energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n \r\n #datafile = 'spectralenergy_{0}_ch{1}_{2}.npy'.format(outfile,channelStart,channelEnd)\r\n #np.save(datafile,Emat)\r\n #scale to 0 255\r\n print (Emat.max())\r\n Emat = (255.0 / Emat.max() * (Emat - Emat.min())).astype(np.uint8)\r\n im = Image.fromarray(Emat, 'L')\r\n imgfile = 'spectralenergy_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n im.save(imgfile)\r\n histogram = im.histogram()\r\n imgfile = 'spectralhist_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n plt.figure()\r\n plt.plot(histogram)\r\n plt.savefig(imgfile)", "def _get_spectrograms(self, index):\n file = self._waves[index]\n\n # get hyper-parameters\n hp = self.hparams\n\n w, _ = lr.load(file, sr=hp.sr)\n w, _ = lr.effects.trim(w) # triming\n\n linear = audio.wave2spec(w, hp)\n\n return linear, w", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, [i, (i + 1) % N]], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, i], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def spectate(self):\n pass", "def modulation_index(phase: np.ndarray, amplitude: np.ndarray) -> float:\n indices = indices_of_binned_phase(phase, num_bins=12)\n avg_amps = np.array([np.median(amplitude[idx]) for idx in indices],\n dtype=np.float64)\n return _modulation_index(avg_amps)", "def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad", "def get_refractive_index(freq,medium):\n epsi_t=get_Permittivty_from_Medium(freq,medium)\n epsi=reduce_matrix_to_scalar(epsi_t)\n return cmath.sqrt(epsi)", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def mod_ave2(z):\r\n x = np.mean(np.sin(TAU*z), 0) # col ave\r\n y = np.mean(np.cos(TAU*z), 0) # col ave\r\n phi = np.arctan(x/y) / TAU\r\n calc = (phi + np.where(y < 0, -0.5, 0) + 0.5) % 1 - 0.5\r\n return calc", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def calc_ind(sel_lines):\n\n print()\n print(\"CALCULATING INDICES\")\n print(\"-------------------\")\n\n # remove duplicates of ind_id and gives a list of selected indices\n sel_ind = list(set(sel_lines['ind_id']))\n sel_ind = np.asarray(sel_ind)\n\n index = {}\n index['index'] = []\n index['value'] = []\n index['error'] = []\n index['flg'] = []\n index['mfrac_neg'] = []\n index['snr'] = []\n\n print(\"index\\tvalue\\terror\\t\\tsnr\\tflag\\tmfrac_neg\")\n print(\"-----\\t-----\\t-----\\t\\t---\\t----\\t---------\")\n\n ind_ids = np.asarray(sel_lines['ind_id'])\n rows = len(sel_lines['ln_id'])\n for i in range(len(sel_ind)): # each index\n\n var = [sel_lines['ind_var'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flux = [sel_lines['flux'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n err = [sel_lines['error'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flg = [sel_lines['flg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n frac_neg = [sel_lines['frac_neg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n snr = [sel_lines['snr'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n ln_c = [sel_lines['ln_c'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n\n # Maximum fraction of flux with negative values of all lines in index\n mfrac_neg = max(frac_neg)\n\n if \"negFlux\" in flg: flg_ind = 'negFlux'\n else: flg_ind = None\n\n # Median snr of index bandpasses:\n if snr is None or snr[0] is None:\n snr_ind = None\n else:\n snr_ind = np.median(snr)\n\n for k in range(len(var)):\n if 'L' not in var[k] and 'R' not in var[k]:\n msg=\"*** ERROR: 'ind_var' variable (in config file config_lines.txt) must start with either an 'L' for core line or 'R' for reference line. Value given was '{}'\".format(var[k])\n sys.exit(msg)\n\n # Add line variables for numerator or denominator:\n num = [ln_c[k]*flux[k] for k in range(len(var)) if 'L' in var[k]]\n num_err = [ln_c[k]*err[k] for k in range(len(var)) if 'L' in var[k]]\n denom = [ln_c[k]*flux[k] for k in range(len(var)) if 'R' in var[k]]\n denom_err = [ln_c[k]*err[k] for k in range(len(var)) if 'R' in var[k]]\n\n num = np.asarray(num)\n denom = np.asarray(denom)\n num_err = np.asarray(num_err)\n denom_err = np.asarray(denom_err)\n\n ind = sum(num) / sum(denom)\n\n # Error using propagation of errors for lines and ref lines\n ind_err = np.sqrt(sum(num_err**2) + ind**2 * sum(denom_err**2)) /sum(denom)\n\n if snr_ind: snr_ind = round(snr_ind, 2)\n\n index['index'].append(sel_ind[i])\n index['value'].append(ind)\n index['error'].append(ind_err)\n index['flg'].append(flg_ind)\n index['mfrac_neg'].append(mfrac_neg)\n index['snr'].append(snr_ind)\n\n print(\"{}\\t{:.4f}\\t{:.6f}\\t{}\\t{}\\t{:.4f}\".format(index['index'][i], index['value'][i], index['error'][i], index['snr'][i], index['flg'][i], index['mfrac_neg'][i]))\n\n return index", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def spectral_diff_matrix(n, xmin=0, xmax=2*np.pi):\n\n h = 2 * np.pi / n\n kk = np.arange(1, n)\n n1 = int(np.floor((n - 1) / 2))\n n2 = int(np.ceil((n - 1) / 2))\n if np.mod(n, 2) == 0:\n topc = 1 / np.tan(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, -np.flip(topc[0:n1])))\n else:\n topc = 1 / np.sin(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, np.flip(topc[0:n1])))\n\n col1 = np.concatenate(([0], 0.5 * ((-1) ** kk) * temp))\n row1 = -col1\n D = 2 * np.pi / (xmax - xmin) * toeplitz(col1, r=row1)\n return D", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def stimulus_response_coherence(filename, segment_length):\n data, stimulus, sampling_interval, time = load_data(filename)\n nyquist = 1./(sampling_interval * 2.)\n f_step = 1./(sampling_interval * segment_length)\n f = np.arange(0, nyquist + f_step, f_step)\n noOfSamples = data.shape[0]\n noOfSegments = int(np.floor(noOfSamples/segment_length))\n kernel = gauss_kernel(0.001, 1./sampling_interval, 0.01)\n window = np.hanning(segment_length)\n coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n exp_coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n # we will need the psth for the expected coherence \n psth = np.zeros(data.shape[0])\n for i in range(data.shape[1]):\n psth = psth + np.convolve(data[:,i], kernel, mode='same') * (1./sampling_interval)\n psth = psth/data.shape[1]\n # go and calculate the spectra\n for i in range(data.shape[1]):\n trace = data[:,i]/sampling_interval\n trace = np.convolve(trace, kernel, mode=\"same\")\n f_resp = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_psth = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_stim = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n for n in range(noOfSegments):\n start\t= n * segment_length\n end \t= start + segment_length\n resp_segment = trace[start:end]\n resp_segment = resp_segment - np.mean(resp_segment)\n resp_segment = resp_segment * window\n psth_segment = psth[start:end]\n psth_segment = psth_segment - np.mean(psth_segment)\n psth_segment = psth_segment * window\n stim_segment = stimulus[start:end]\n stim_segment = stim_segment - np.mean(stim_segment)\n stim_segment = stim_segment * window\n \n f_resp[:, n] = np.fft.fft(resp_segment, segment_length)\n f_stim[:, n] = np.fft.fft(stim_segment, segment_length)\n f_psth[:, n] = np.fft.fft(psth_segment, segment_length)\n\n f_resp_conj = np.conjugate(f_resp) # complex conjugate spectrum of response segments\n f_stim_conj = np.conjugate(f_stim) # complex conjugate spectra of stimulus segments\n f_psth_conj = np.conjugate(f_psth) # complex conjugate spectra of psth segments\n\n sr_cross_spectrum = np.mean(f_stim_conj * f_resp, axis=1) # cross spectrum S*R\n ss_auto_spectrum = np.mean(f_stim_conj * f_stim, axis=1) # auto spectrum S*S\n\n rs_cross_spectrum = np.mean(f_resp_conj * f_stim, axis=1) # cross spectrum R*S\n rr_auto_spectrum = np.mean(f_resp_conj * f_resp, axis=1) # auto spectrum R*R\n \n pr_cross_spectrum = np.mean(f_psth_conj * f_resp, axis=1) # cross spectrum PSTH*R\n pp_auto_spectrum = np.mean(f_psth_conj * f_psth, axis=1) # auto spectrum PSTH*PSTH\n rp_cross_spectrum = np.mean(f_resp_conj * f_psth, axis=1) # cross spectrum R*PSTH\n \n coherence_spectra[:, i] = (sr_cross_spectrum * rs_cross_spectrum) / (ss_auto_spectrum * rr_auto_spectrum)\n exp_coherence_spectra[:, i] = (pr_cross_spectrum * rp_cross_spectrum) / (pp_auto_spectrum * rr_auto_spectrum)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(f, np.mean(coherence_spectra[:len(f),:], axis=1), color='dodgerblue', label=\"r-s coherence\")\n ax.plot(f, np.mean(exp_coherence_spectra[:len(f),:], axis=1), color='silver', label=\"r-r coherence\")\n ax.set_xlim([0, 300])\n ax.set_ylim([0, 1])\n ax.set_xlabel('frequency [Hz]')\n ax.set_ylabel('coherence')\n ax.legend(fontsize=9)\n plt.show()", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def calculate_index_and_derivative(wl):\n index = np.sqrt(1 + (0.6961663 * wl * wl) / (wl * wl - 0.0684043 * 0.0684043)\n + (0.4079426 * wl * wl) / (wl * wl - 0.1162414 * 0.1162414)\n + (0.8974794 * wl * wl) / (wl * wl - 9.896161 * 9.896161)\n )\n\n index_derivative = \\\n (\n - (1.79496 * wl * wl * wl) / (pow(-97.934 + wl * wl, 2))\n + (1.79496 * wl) / (-97.934 + wl * wl)\n\n - (0.815885 * wl * wl * wl) / (pow(-0.0135121 + wl * wl, 2))\n + (0.815885 * wl) / (-0.0135121 + wl * wl)\n\n - (1.39233 * wl * wl * wl) / (pow(-0.00467915 + wl * wl, 2))\n + (1.39233 * wl) / (-0.00467915 + wl * wl)\n ) \\\n / \\\n (2 * np.sqrt(\n 1\n + (0.897479 * wl * wl) / (-97.934 + wl * wl)\n + (0.407943 * wl * wl) / (-0.0135121 + wl * wl)\n + (0.696166 * wl * wl) / (-0.00467915 + wl * wl)\n )\n )\n return index, index_derivative", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def calcumul_index(path,x,name_champ_label,indice2,list_drop,pathlist_names_feature):\n sql=sqlite3.connect(path)\n df=pd.read_sql_query(\"SELECT * FROM output\", sql)\n df=df.groupby(\"originfid\").mean()\n if 'band' in df.columns[6] :\n globals()[\"df%s\"%x]=col_sqlite(path,x,list_drop,pathlist_names_feature)\n label = globals()[\"df%s\"%x][name_champ_label]\n globals()[\"%s\"%x]=globals()[\"df%s\"%x].astype(float)\n print(indice2)\n if indice2 not in ['NDVI', 'NDWI','SM','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = globals()[\"%s\"%x].filter(like=band1_indice)\n df_b2 = globals()[\"%s\"%x].filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = globals()[\"df%s\"%x].filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n df_indice_col=df_indice_col.iloc[:-1]\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_%s\"%indice2]=globals()[\"df_%s\"%indice2].astype(float)\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T \n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n else :\n label = df[name_champ_label]\n print(indice2)\n if indice2 not in ['ndvi', 'ndwi','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1','SM']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = df.filter(like=band1_indice)\n df_b2 = df.filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = df.filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T\n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n return globals()[\"df_mean_%s\"%indice2], globals()[\"df_%s\"%indice2]", "def freqdomain(self):\n \n\n #self.df = self.f[1] - self.f[0]\n #frequency vector\n #fv = fftshift(fftfreq(len(eta),1./fs))\n #fv = fv[len(fv)/2:]\n \n #spectral analysis\n self.sn1 = self.espec1(self.n1)\n self.sn2 = self.espec1(self.n2)\n self.sn3 = self.espec1(self.n3)\n self.sn12 = self.espec2(self.n1,self.n2)\n self.sn13 = self.espec2(self.n1,self.n3)\n self.sn23 = self.espec2(self.n2,self.n3)\n \n #delta freq\n self.df = self.f[3] - self.f[2]\n\n #calculo do numero de onda\n #self.wavenumber()\n #k = numeronda(h,f,len(f))\n #k = np.array(k)\n\n #calculo dos coeficientes de fourier - NDBC 96_01 e Steele (1992)\n c = self.sn2[:,1] + self.sn3[:,1]\n cc = np.sqrt(self.sn1[:,1] * (c))\n \n self.a1 = self.sn12[:,3] / cc\n self.b1 = self.sn13[:,3] / cc\n \n self.a2 = (self.sn2[:,1] - self.sn3[:,1]) / c\n self.b2 = 2 * self.sn12[:,2] / c\n \n #calcula direcao de onda\n #mean direction\n self.dire1 = np.array([np.angle(np.complex(self.b1[i],self.a1[i]),deg=True) for i in range(len(self.a1))])\n \n #principal direction\n self.dire2 = 0.5 * np.array([np.angle(np.complex(self.b2[i],self.a2[i]),deg=True) for i in range(len(self.a2))])\n \n #condicao para valores maiores que 360 e menores que 0\n self.dire1[np.where(self.dire1 < 0)] = self.dire1[np.where(self.dire1 < 0)] + 360\n self.dire1[np.where(self.dire1 > 360)] = self.dire1[np.where(self.dire1 > 360)] - 360\n self.dire2[np.where(self.dire2 < 0)] = self.dire2[np.where(self.dire2 < 0)] + 360\n self.dire2[np.where(self.dire2 > 360)] = self.dire2[np.where(self.dire2 > 360)] - 360\n \n #acha o indice da frequencia de pico\n ind = np.where(self.sn1[:,1] == np.max(self.sn1[:,1]))[0]\n \n #periodo de pico\n self.tp = (1. / self.f[ind])[0]\n \n #momento espectral de ordem zero total - m0\n self.m0 = np.sum(self.sn1[:,1]) * self.df\n \n #calculo da altura significativa\n self.hm0 = 4.01 * np.sqrt(self.m0)\n \n #direcao do periodo de pico\n self.dp = self.dire1[ind][0]\n \n #Espalhamento direcional\n #Formula do sigma1 do livro Tucker&Pitt(2001) \"Waves in Ocean Engineering\" pags 196-198\n c1 = np.sqrt(self.a1 ** 2 + self.b1 ** 2)\n c2 = np.sqrt(self.a2 ** 2 + self.b2 ** 2)\n \n s1 = c1 / (1-c1)\n s2 = (1 + 3 * c2 + np.sqrt(1 + 14 * c2 + c2 ** 2)) / (2 * (1 - c2))\n \n self.sigma1 = np.sqrt(2 - 2 * c1) * 180 / np.pi\n self.sigma2 = np.sqrt((1 - c2) / 2) * 180 / np.pi\n \n self.sigma1p = np.real(self.sigma1[ind])[0]\n self.sigma2p = np.real(self.sigma2[ind])[0]\n \n # pondaf = np.array([hm0, tp, dp, sigma1p, sigma2p])\n \n #hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2\n #return hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def stitchSpectra(lamb_list,count_list, method=\"scale\", edgeremove=(0, 0), shiftToPositive=False, dlambda=None):\r\n rawData=np.array([np.array(lamb_list),np.array(count_list)])\r\n rawData=rawData.swapaxes(0,1)\r\n coefficients = []\r\n print(\"Removing edges for stitching:\", *edgeremove)\r\n omitRight = rawData[0].shape[1] - math.floor(rawData[0].shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", 0, omitRight)\r\n processed = np.array(rawData[0][:, 0:omitRight]) \r\n if dlambda is None:\r\n dlambda = math.fabs(processed[0, 1] - processed[0, 0]) ## lambda steps of first spectrum are kept\r\n for i, spec in enumerate(rawData[1:]):\r\n omitLeft = math.floor(spec.shape[1] * edgeremove[0])\r\n omitRight = spec.shape[1] - math.floor(spec.shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", omitLeft, omitRight)\r\n if i == len(rawData)-2:\r\n spec = np.array(spec[:, omitLeft:]) ## do not shorten last array at end\r\n else:\r\n spec = np.array(spec[:, omitLeft:omitRight]) # shorten middle arrays at both sides\r\n print(\"Stitching spectrum in range\", np.min(spec[0,]), np.max(spec[0,]))\r\n # calculate overlap\r\n overlap = (np.min(spec[0,]), np.max(processed[0,])) \r\n #lambdas = np.arange(*overlap, dlambda)\r\n #leftfun = interp1d(processed[0,], processed[1,])\r\n #rightfun = interp1d(spec[0,], spec[1,])\r\n left = np.mean(processed[1, processed[0,] > overlap[0]]) ##mean of counts of overlap\r\n right = np.mean(spec[1, spec[0,] < overlap[1]])\r\n if method == \"shift\":\r\n # calculate offset in overlap region\r\n offset = left - right\r\n print(\"Stitching offset %s in overlap\", offset, *overlap)\r\n # add shifted spectrum\r\n spec[1,] = spec[1,] + offset\r\n coefficients.append(offset)\r\n elif method == \"scale\":\r\n # calculate factor in overlap region\r\n factor = left/right\r\n print(\"Stitching factor\"+str(factor)+\" in overlap \", *overlap)\r\n spec[1,] = spec[1,] * factor\r\n coefficients.append(factor)\r\n processed = np.concatenate([processed, spec], axis=1)\r\n # interpolate data on grid\r\n interpolated = interp1d(processed[0,], processed[1,])\r\n lambdas = np.arange(processed[0, 0], processed[0, -1], dlambda)\r\n specdata = interpolated(lambdas)\r\n # shift above 0\r\n if shiftToPositive:\r\n minimum = np.min(specdata)\r\n if minimum < 0:\r\n specdata += math.fabs(minimum)\r\n \r\n return (lambdas,specdata,coefficients)", "def spectral_roll_on(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.05*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def refractive_index_glass(wavelength, type='soda-lime-low-iron'):\n\n if type.lower() == 'soda-lime-low-iron':\n wavelength = wavelength / 1000\n n = 1.5130 - 0.003169 * wavelength ** 2 + 0.003962 * wavelength ** -2 + 0 * 1j\n\n # n[wavelength < 0.3] = n[wavelength < 0.3] + 1j*0\n elif type.upper() == 'BK7':\n wavelength = wavelength / 1000\n n = np.sqrt(1 + \\\n (1.03961212 * wavelength ** 2) / (\n wavelength ** 2 - 0.00600069867) + \\\n (0.231792344 * wavelength ** 2) / (\n wavelength ** 2 - 0.0200179144) + \\\n (1.01046945 * wavelength ** 2) / (\n wavelength ** 2 - 103.560653)\n )\n\n return n", "def wvd(fx,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n fn=fn[0]\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa.reshape(fn)\r\n fb=fa.copy()\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1./df\r\n tau=(nh-1)/2\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')\r\n \r\n #create an empty array to put the tf in \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,nn in enumerate(tlst):\r\n #calculate the smallest timeshift possible\r\n taun=min(nn,tau,fn-nn-1)\r\n #make a timeshift array\r\n taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')\r\n #calculate rectangular windowed correlation function of analytic signal\r\n Rnn=4*np.conjugate(fa[nn-taulst])*fb[nn+taulst] \r\n #calculate fft of windowed correlation function\r\n FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))\r\n #put into tfarray\r\n tfarray[:,point]=FTRnn[::-1]\r\n \r\n #normalize\r\n tfarray=tfarray/nh\r\n \r\n return tfarray,tlst,flst", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num, max_duration=None):\n Sxx = spectrogram_converter.mel_spectrogram(wav_path, max_duration)\n for i in range(Sxx.shape[0]):\n for j in range(Sxx.shape[1]):\n X[index, 0, i, j] = Sxx[i, j]\n y[index] = curr_speaker_num\n return 1", "def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat", "def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range", "def get_beat_sync_spectrums(audio):\n y, sr = core.load(audio, sr=44100)\n eql_y = EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr)\n np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return np.vstack([band1list, band2list, band3list])", "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def test_compute_spectral_norms(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, svd_method=ACCURATE_SVD)\n\n\t\t# SLOW method\n\t\ta = details.spectral_norm.to_numpy()\n\t\tself.assertAlmostEqual(a[0],20.2149, places=3)\n\t\tself.assertAlmostEqual(a[1],24.8158, places=3)\n\t\tself.assertAlmostEqual(a[2],19.3795, places=3)", "def snv(spectra):\n\n return (spectra - np.mean(spectra, axis=0)) / np.std(spectra, axis=0)", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def dseries(temp, wavelength):\n if wavelength < 300 or wavelength > 830:\n return 0\n mm=wavelength%10\n s=_dseriesd(temp, wavelength-mm)\n if mm==0:\n return s\n m=mm*0.1\n e=_dseriesd(temp, (wavelength-mm)+10)\n return s+(e-s)*m", "def sincint(x, nres, speclist) :\n\n dampfac = 3.25*nres/2.\n ksize = int(21*nres/2.)\n if ksize%2 == 0 : ksize +=1\n nhalf = ksize//2 \n\n #number of output and input pixels\n nx = len(x)\n nf = len(speclist[0][0])\n\n # integer and fractional pixel location of each output pixel\n ix = x.astype(int)\n fx = x-ix\n\n # outputs\n outlist=[]\n for spec in speclist :\n if spec[1] is None :\n outlist.append([np.full_like(x,0),None])\n else :\n outlist.append([np.full_like(x,0),np.full_like(x,0)])\n\n for i in range(len(x)) :\n xkernel = np.arange(ksize)-nhalf - fx[i]\n # in units of Nyquist\n xkernel /= (nres/2.)\n u1 = xkernel/dampfac\n u2 = np.pi*xkernel\n sinc = np.exp(-(u1**2)) * np.sin(u2) / u2\n sinc /= (nres/2.)\n\n lobe = np.arange(ksize) - nhalf + ix[i]\n vals = np.zeros(ksize)\n vars = np.zeros(ksize)\n gd = np.where( (lobe>=0) & (lobe<nf) )[0]\n\n for spec,out in zip(speclist,outlist) :\n vals = spec[0][lobe[gd]]\n out[0][i] = (sinc[gd]*vals).sum()\n if spec[1] is not None : \n var = spec[1][lobe[gd]]\n out[1][i] = (sinc[gd]**2*var).sum()\n\n for out in outlist :\n if out[1] is not None : out[1] = np.sqrt(out[1])\n \n return outlist", "def spectral_spread(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not np.sum(ff):\n return 0\n else:\n return np.dot(((f-spect_centr)**2), (ff / np.sum(ff)))", "def find_saturated(spectra, saturation_limit=90000):\n\n razlika = np.abs(\n np.diff(spectra, n=1, axis=-1,\n append=spectra[:,-2][:,None]))\n\n saturated_indices = np.unique(\n np.where(razlika > saturation_limit)[0])\n\n if len(saturated_indices)==0 and np.any(spectra==0):\n print(\"No saturated spectra is found;\\n\"\n \"Please make sure to apply this function before any scaling is done!\")\n else:\n return saturated_indices", "def test_best_result(origianl_waveform):\n origianl_waveform = origianl_waveform.flatten()\n recovery_waveform = []\n audio_length = len(origianl_waveform)\n noise = np.random.random_sample((audio_length,))\n noise_list = [x / 100 for x in noise]\n noise_count = 0\n \n for n in origianl_waveform:\n difference = n - noise_list[noise_count]\n recovery_waveform.append(difference)\n noise_count += 1\n \n return np.asarray(recovery_waveform)", "def getLatestSpectrumMeasurements(self): \n return self.spectrum[len(self.spectrum)-1]", "def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def _computeIndex(value, slices_nb):\n \n if doImg >= 0 or doStack or doVideo:\n return value + 1\n else:\n return value % (slices_nb+1)", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def get_beat_sync_chroma_and_spectrum(audio, sr=None, bpm=None):\n if not isinstance(audio, np.ndarray):\n sr = 44100\n y = std.MonoLoader(filename=audio, samplerate=44100)()\n else:\n y = audio\n eql_y = std.EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr, tempo=bpm)\n if framed_dbn.shape[0] % 4 == 0:\n framed_dbn = np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n chromas = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n stft = abs(core.stft(y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n chroma = np.mean(feature.chroma_stft(y=None, S=stft ** 2), axis=1)\n chromas.append(chroma)\n chromas = np.array(chromas).transpose()\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return (chromas, np.vstack([band1list, band2list, band3list]))", "def spectral_roll_off(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.95*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def convolution_spectrum(self, spectrum):\n ret = []\n for i in range(len(spectrum)):\n for j in range(i+1, len(spectrum)):\n diff = abs(spectrum[j] - spectrum[i])\n if diff > 0:\n ret.append(diff)\n return ret" ]
[ "0.6287062", "0.6141436", "0.59078664", "0.58944124", "0.58218056", "0.5802506", "0.5760204", "0.5752179", "0.5743206", "0.57092756", "0.5689199", "0.5686934", "0.56437397", "0.5623954", "0.5587823", "0.5585669", "0.5536266", "0.5534484", "0.5534108", "0.5532117", "0.55112505", "0.5486775", "0.5482242", "0.54519254", "0.5440446", "0.5409895", "0.54056287", "0.539345", "0.53929806", "0.5392049", "0.5389907", "0.5375145", "0.535742", "0.534188", "0.5336566", "0.5334843", "0.53324115", "0.53315127", "0.5328719", "0.53179103", "0.5306754", "0.53062946", "0.5305302", "0.52950937", "0.5290811", "0.5285802", "0.52824205", "0.52822447", "0.52797204", "0.5276565", "0.5271981", "0.5264618", "0.52557796", "0.5247833", "0.5235981", "0.5234837", "0.52317417", "0.5216127", "0.52146685", "0.5202612", "0.5196928", "0.51961637", "0.5193672", "0.51935214", "0.5180132", "0.5177716", "0.51766014", "0.51765615", "0.5175825", "0.5172644", "0.51680654", "0.5166744", "0.51661193", "0.51652616", "0.51643854", "0.51594883", "0.51564884", "0.51559126", "0.5154441", "0.5148406", "0.51441246", "0.51392347", "0.5136036", "0.5135758", "0.5135648", "0.5135288", "0.5121605", "0.5119559", "0.5109049", "0.51041895", "0.5098556", "0.5095513", "0.50944304", "0.5087891", "0.50864905", "0.50832736", "0.5074507", "0.50726557", "0.5072091", "0.5072091", "0.5069914" ]
0.0
-1
compute spectral index after continuum subtraction
def get(self, wave, flux, **kwargs): if hasUnit(wave): _w = wave.to('AA').magnitude else: print("Warning: assuming units are in Angstroms") _w = _drop_units(wave) _f = _drop_units(flux) blue = self._get_wavelength_attrs_with_units('blue').magnitude red = self._get_wavelength_attrs_with_units('red').magnitude band = self._get_wavelength_attrs_with_units('band').magnitude nocheck = kwargs.pop('nocheck', False) not_covered = (blue[0] < _w[0]) | (red[-1] > _w[-1]) if (not_covered): if (not nocheck): raise ValueError("Spectrum does not cover this index.") else: return np.zeros(_f.shape[0]) * float('nan') else: return self._get_indice(_w, _f, blue, red, band, self.index_unit, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = 8.801830992E-03\n A4 = 8.435237228E-05\n A5 = 1.681656789E-06\n A6 = -1.675425449E-08\n A7 = 8.326602461E-10\n\n n = np.sqrt( A0 + A1 * wavelength_um ** 4 + A2 * wavelength_um ** 2 + A3 * wavelength_um ** -2 + \\\n A4 * wavelength_um ** -4 + A5 * wavelength_um ** -6 + A6 * wavelength_um ** -8 + A7 * wavelength_um ** -10 )\n\n return n", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def spectral_decrease(sign, fs):\n f, ff = plotfft(sign, fs)\n\n k = len(ff)\n soma_num = 0\n for a in range(2, k):\n soma_num = soma_num + ((ff[a]-ff[1])/(a-1))\n\n ff2 = ff[2:]\n if not np.sum(ff2):\n return 0\n else:\n soma_den = 1 / np.sum(ff2)\n return soma_den * soma_num", "def baseline(spectra):\n\n return spectra - np.mean(spectra, axis=0)", "def spectralIndices(\n x: Union[ee.Image, ee.ImageCollection],\n index: Union[str, List[str]] = \"NDVI\",\n G: Union[float, int] = 2.5,\n C1: Union[float, int] = 6.0,\n C2: Union[float, int] = 7.5,\n L: Union[float, int] = 1.0,\n cexp: Union[float, int] = 1.16,\n nexp: Union[float, int] = 2.0,\n alpha: Union[float, int] = 0.1,\n slope: Union[float, int] = 1.0,\n intercept: Union[float, int] = 0.0,\n gamma: Union[float, int] = 1.0,\n kernel: str = \"RBF\",\n sigma: Union[float, str] = \"0.5 * (a + b)\",\n p: Union[float, int] = 2,\n c: Union[float, int] = 1.0,\n online: bool = False,\n drop: bool = False,\n) -> Union[ee.Image, ee.ImageCollection]:\n platformDict = _get_platform_STAC(x)\n\n if isinstance(sigma, int) or isinstance(sigma, float):\n if sigma < 0:\n raise Exception(f\"[sigma] must be positive! Value passed: sigma = {sigma}\")\n\n if p <= 0 or c < 0:\n raise Exception(\n f\"[p] and [c] must be positive! Values passed: p = {p}, c = {c}\"\n )\n\n additionalParameters = {\n \"g\": float(G),\n \"C1\": float(C1),\n \"C2\": float(C2),\n \"L\": float(L),\n \"cexp\": float(cexp),\n \"nexp\": float(nexp),\n \"alpha\": float(alpha),\n \"sla\": float(slope),\n \"slb\": float(intercept),\n \"gamma\": float(gamma),\n \"p\": float(p),\n \"c\": float(c),\n }\n\n spectralIndices = _get_indices(online)\n indicesNames = list(spectralIndices.keys())\n\n if not isinstance(index, list):\n if index == \"all\":\n index = list(spectralIndices.keys())\n elif index in [\n \"vegetation\",\n \"burn\",\n \"water\",\n \"snow\",\n \"drought\",\n \"urban\",\n \"kernel\",\n ]:\n temporalListOfIndices = []\n for idx in indicesNames:\n if spectralIndices[idx][\"type\"] == index:\n temporalListOfIndices.append(idx)\n index = temporalListOfIndices\n else:\n index = [index]\n\n for idx in index:\n if idx not in list(spectralIndices.keys()):\n warnings.warn(\n f\"Index {idx} is not a built-in index and it won't be computed!\"\n )\n else:\n\n def temporalIndex(img):\n lookupDic = _get_expression_map(img, platformDict)\n lookupDic = {**lookupDic, **additionalParameters}\n kernelParameters = _get_kernel_parameters(img, lookupDic, kernel, sigma)\n lookupDic = {**lookupDic, **kernelParameters}\n lookupDicCurated = _remove_none_dict(lookupDic)\n if all(\n band in list(lookupDicCurated.keys())\n for band in spectralIndices[idx][\"bands\"]\n ):\n return img.addBands(\n img.expression(\n spectralIndices[idx][\"formula\"], lookupDicCurated\n ).rename(idx)\n )\n else:\n warnings.warn(\n f\"This platform doesn't have the required bands for {idx} computation!\"\n )\n return img\n\n if isinstance(x, ee.imagecollection.ImageCollection):\n x = x.map(temporalIndex)\n elif isinstance(x, ee.image.Image):\n x = temporalIndex(x)\n\n if drop:\n x = x.select(index)\n\n return x", "def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names", "def diff_index_calc(oct_abund_list1, oct_abund_list2):\n rel_index_list = []\n abs_index_list = []\n smty_index_list = []\n for i in range(10):\n abund_data_array = sc.asarray(oct_abund_list1[i], dtype='double')\n abund_sim_array = sc.asarray(oct_abund_list2[i], dtype = 'double')\n \n # make the length of the arrays similar to each other\n if len(abund_data_array) < len(abund_sim_array):\n small_len = abund_data_array\n long_len = abund_sim_array\n else:\n small_len = abund_sim_array\n long_len = abund_data_array\n diff = len(long_len) - len(small_len) \n small_len = sc.append(small_len, [0]*diff)\n \n relative_index_vect = abs(long_len - small_len)/long_len \n rel_index_list.append(sum(relative_index_vect)/len(relative_index_vect))\n \n absolute_index_vect = abs(long_len - small_len)\n abs_index_list.append(sum(absolute_index_vect)/len(absolute_index_vect))\n \n similarity_index_vect = []\n for i in range(len(long_len)):\n similarity_index_vect.append(sc.minimum(long_len[i], small_len[i])/sc.amax([long_len[i], small_len[i]]))\n \n smty_index_list.append(sum(similarity_index_vect)/len(similarity_index_vect)) \n \n rel_index_final = sum(rel_index_list)/10\n abs_index_final = sum(abs_index_list)/10\n smty_index_final = sum(smty_index_list)/10\n \n return (rel_index_final, abs_index_final, smty_index_final)", "def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def apply_electronics_gain(full_frame, difference):\n #electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n #electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n all_quads = []\n num_quads = full_frame.shape[0]\n for quads in range(0, num_quads):\n active_quad = full_frame[quads, :, :]\n if difference[quads] < 0: # Note: Difference is odd-even\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n elif difference[quads] > 0:\n gain_even = 1/electronics_gain_odd[quads]\n gain_odd = 1/electronics_gain_even[quads]\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n spec_pix, spat_pix = active_quad.shape\n gain_applied_quad = np.array([[0]*spec_pix]*spat_pix)\n even_detector_active_quad = gain_even*active_quad[:, ::2]\n odd_detector_active_quad = gain_odd*active_quad[:, 1::2]\n\n gain_applied_quad = np.reshape(gain_applied_quad, (spec_pix, spat_pix))\n gain_applied_quad[:, ::2] = even_detector_active_quad\n gain_applied_quad[:, 1::2] = odd_detector_active_quad\n #print(np.max(gain_applied_quad))\n #cc\n all_quads.append(gain_applied_quad)\n #cc\n return np.array(all_quads)", "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def refractive_index_porous_silica(wavelength, porosity=0.5):\n wavelength_um = wavelength / 1000\n n = np.sqrt(1 + \\\n (0.6961663 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.06840432 ** 2) + \\\n (0.4079426 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.11624142 ** 2) + \\\n (0.8974794 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 9.8961612 ** 2)\n )\n n_air = 1.00029\n\n n_total = np.sqrt(n ** 2 * (1 - porosity) + n_air ** 2 * (porosity)) + 0 * 1j\n\n # k0 = 5e-6\n # k1 = 5e-7\n # wavelength0 = 0.31\n # wavelength1 = 0.36\n\n # n_total = n_total + 1j*refractive_index_imaginary_silica(wavelength)*1e4\n # n_total = n_total + 1j*np.exp( np.log(k0) + np.log(k1) * (wavelength - wavelength0)/(wavelength1-wavelength0))\n\n return n_total", "def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def coherence_from_spectral(Sw):\r\n\r\n Sxx = Sw[0, 0].real\r\n Syy = Sw[1, 1].real\r\n\r\n Sxy_mod_sq = (Sw[0, 1] * Sw[1, 0]).real\r\n Sxy_mod_sq /= Sxx\r\n Sxy_mod_sq /= Syy\r\n return Sxy_mod_sq", "def spect(self):\n return 1", "def fft_index(fft, frequency):\n\treturn 2 * int(len(fft) * frequency / AUDIO_RATE) # Not entirely clear on why I need to multiply by 2 here. I don't need to if I use fft instead of rfft, but then I get a bunch of crazy high frequency FFT data, or is it complex numbers or something...", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def get_sound_index(self):\n # Return difference between the two last compared elements\n lhs = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_left\n #rhs = ThreadManagment.last_cmp_right_by_thread.get(self.thread.ident, 0)\n #return round((lhs + rhs) / 2)\n return lhs", "def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}", "def refractive_index(self, theta):\n sin2th = np.sin(theta)**2\n cos2th = np.cos(theta)**2\n\n A = self.S * sin2th + self.P * cos2th\n B = self.R * self.L * sin2th + self.P * self.S * (1 + cos2th)\n F = np.sqrt(((self.R * self.L - self.P * self.S) * sin2th)**2\n + (2 * self.P * self.D)**2 * cos2th) # contents can never be negative\n n_fast = np.sqrt((B - F) / (2 * A))\n n_slow = np.sqrt((B + F) / (2 * A))\n return np.concatenate((n_fast[...,np.newaxis], n_slow[...,np.newaxis]), axis=-1)", "def stZCR(frame):\n count = len(frame)\n countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2\n return (np.float64(countZ) / np.float64(count-1.0))", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def stZCR(frame):\n count = len(frame)\n countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2\n return (numpy.float64(countZ) / numpy.float64(count-1.0))", "def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num):\r\n Sxx = spectrogram_converter.mel_spectrogram(wav_path)\r\n for i in range(Sxx.shape[0]):\r\n for j in range(Sxx.shape[1]):\r\n X[index, 0, i, j] = Sxx[i, j]\r\n y[index] = curr_speaker_num\r\n return 1", "def calc(self, wavelength):\n if wavelength < self.minWavelength or wavelength > self.maxWavelength:\n return 0\n mm=wavelength%self.interval\n s=self._calcd(wavelength-mm)\n if mm==0:\n return s\n m=mm*1.0/self.interval\n e=self._calcd((wavelength-mm)+self.interval)\n return s+(e-s)*m", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def create_spectrum(warr, farr, earr=None, sub_cont=False):\n spec=Spectrum.Spectrum(warr, farr, earr, stype='continuum')\n #normalize the spectra to 5500\n n5500 = np.interp(5500, spec.wavelength, spec.flux)\n spec.flux = spec.flux/n5500\n if earr is not None:\n spec.var = spec.var/n5500\n #add in continuum subtraction\n if sub_cont:\n coef = np.polyfit(spec.wavelength, spec.flux, 9)\n spec.flux = spec.flux - np.polyval(coef, spec.wavelength) \n return spec", "def findSpectralAxis(img):\n if (type(img) == str):\n myia = createCasaTool(iatool)\n myia.open(img)\n else:\n myia = img\n mycs = myia.coordsys()\n try:\n iax = mycs.findaxisbyname(\"spectral\")\n except:\n print \"ERROR: can't find spectral axis. Assuming it is 3.\"\n iax = 3\n mycs.done()\n return iax", "def offset(freqs, re0, im0):\n return re0 + 1j * im0", "def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def wave_samples(self):\n return self._quantized_subsamples", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def getSpectralEnergyFrame(datatype, traceList, outfile, channelStart, channelEnd, winlen=1000):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n\r\n wlen = 256\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n nperlen = len(traceList[0].data)\r\n if winlen>=nperlen:\r\n nFrames=1\r\n else:\r\n nFrames = int(nperlen/winlen)\r\n\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n\r\n for iframe in range(nFrames): \r\n Emat = None\r\n for itr in range(0,nTraces):\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data[iframe*winlen:(iframe+1)*winlen]), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n #energy = np.abs(np.log10(np.abs(energy/np.max(energy)))*10.0)\r\n #energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n \r\n #datafile = 'spectralenergy_{0}_ch{1}_{2}.npy'.format(outfile,channelStart,channelEnd)\r\n #np.save(datafile,Emat)\r\n #scale to 0 255\r\n print (Emat.max())\r\n Emat = (255.0 / Emat.max() * (Emat - Emat.min())).astype(np.uint8)\r\n im = Image.fromarray(Emat, 'L')\r\n imgfile = 'spectralenergy_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n im.save(imgfile)\r\n histogram = im.histogram()\r\n imgfile = 'spectralhist_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n plt.figure()\r\n plt.plot(histogram)\r\n plt.savefig(imgfile)", "def _get_spectrograms(self, index):\n file = self._waves[index]\n\n # get hyper-parameters\n hp = self.hparams\n\n w, _ = lr.load(file, sr=hp.sr)\n w, _ = lr.effects.trim(w) # triming\n\n linear = audio.wave2spec(w, hp)\n\n return linear, w", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, [i, (i + 1) % N]], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, i], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def modulation_index(phase: np.ndarray, amplitude: np.ndarray) -> float:\n indices = indices_of_binned_phase(phase, num_bins=12)\n avg_amps = np.array([np.median(amplitude[idx]) for idx in indices],\n dtype=np.float64)\n return _modulation_index(avg_amps)", "def spectate(self):\n pass", "def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad", "def get_refractive_index(freq,medium):\n epsi_t=get_Permittivty_from_Medium(freq,medium)\n epsi=reduce_matrix_to_scalar(epsi_t)\n return cmath.sqrt(epsi)", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def mod_ave2(z):\r\n x = np.mean(np.sin(TAU*z), 0) # col ave\r\n y = np.mean(np.cos(TAU*z), 0) # col ave\r\n phi = np.arctan(x/y) / TAU\r\n calc = (phi + np.where(y < 0, -0.5, 0) + 0.5) % 1 - 0.5\r\n return calc", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def calc_ind(sel_lines):\n\n print()\n print(\"CALCULATING INDICES\")\n print(\"-------------------\")\n\n # remove duplicates of ind_id and gives a list of selected indices\n sel_ind = list(set(sel_lines['ind_id']))\n sel_ind = np.asarray(sel_ind)\n\n index = {}\n index['index'] = []\n index['value'] = []\n index['error'] = []\n index['flg'] = []\n index['mfrac_neg'] = []\n index['snr'] = []\n\n print(\"index\\tvalue\\terror\\t\\tsnr\\tflag\\tmfrac_neg\")\n print(\"-----\\t-----\\t-----\\t\\t---\\t----\\t---------\")\n\n ind_ids = np.asarray(sel_lines['ind_id'])\n rows = len(sel_lines['ln_id'])\n for i in range(len(sel_ind)): # each index\n\n var = [sel_lines['ind_var'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flux = [sel_lines['flux'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n err = [sel_lines['error'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flg = [sel_lines['flg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n frac_neg = [sel_lines['frac_neg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n snr = [sel_lines['snr'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n ln_c = [sel_lines['ln_c'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n\n # Maximum fraction of flux with negative values of all lines in index\n mfrac_neg = max(frac_neg)\n\n if \"negFlux\" in flg: flg_ind = 'negFlux'\n else: flg_ind = None\n\n # Median snr of index bandpasses:\n if snr is None or snr[0] is None:\n snr_ind = None\n else:\n snr_ind = np.median(snr)\n\n for k in range(len(var)):\n if 'L' not in var[k] and 'R' not in var[k]:\n msg=\"*** ERROR: 'ind_var' variable (in config file config_lines.txt) must start with either an 'L' for core line or 'R' for reference line. Value given was '{}'\".format(var[k])\n sys.exit(msg)\n\n # Add line variables for numerator or denominator:\n num = [ln_c[k]*flux[k] for k in range(len(var)) if 'L' in var[k]]\n num_err = [ln_c[k]*err[k] for k in range(len(var)) if 'L' in var[k]]\n denom = [ln_c[k]*flux[k] for k in range(len(var)) if 'R' in var[k]]\n denom_err = [ln_c[k]*err[k] for k in range(len(var)) if 'R' in var[k]]\n\n num = np.asarray(num)\n denom = np.asarray(denom)\n num_err = np.asarray(num_err)\n denom_err = np.asarray(denom_err)\n\n ind = sum(num) / sum(denom)\n\n # Error using propagation of errors for lines and ref lines\n ind_err = np.sqrt(sum(num_err**2) + ind**2 * sum(denom_err**2)) /sum(denom)\n\n if snr_ind: snr_ind = round(snr_ind, 2)\n\n index['index'].append(sel_ind[i])\n index['value'].append(ind)\n index['error'].append(ind_err)\n index['flg'].append(flg_ind)\n index['mfrac_neg'].append(mfrac_neg)\n index['snr'].append(snr_ind)\n\n print(\"{}\\t{:.4f}\\t{:.6f}\\t{}\\t{}\\t{:.4f}\".format(index['index'][i], index['value'][i], index['error'][i], index['snr'][i], index['flg'][i], index['mfrac_neg'][i]))\n\n return index", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def spectral_diff_matrix(n, xmin=0, xmax=2*np.pi):\n\n h = 2 * np.pi / n\n kk = np.arange(1, n)\n n1 = int(np.floor((n - 1) / 2))\n n2 = int(np.ceil((n - 1) / 2))\n if np.mod(n, 2) == 0:\n topc = 1 / np.tan(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, -np.flip(topc[0:n1])))\n else:\n topc = 1 / np.sin(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, np.flip(topc[0:n1])))\n\n col1 = np.concatenate(([0], 0.5 * ((-1) ** kk) * temp))\n row1 = -col1\n D = 2 * np.pi / (xmax - xmin) * toeplitz(col1, r=row1)\n return D", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def stimulus_response_coherence(filename, segment_length):\n data, stimulus, sampling_interval, time = load_data(filename)\n nyquist = 1./(sampling_interval * 2.)\n f_step = 1./(sampling_interval * segment_length)\n f = np.arange(0, nyquist + f_step, f_step)\n noOfSamples = data.shape[0]\n noOfSegments = int(np.floor(noOfSamples/segment_length))\n kernel = gauss_kernel(0.001, 1./sampling_interval, 0.01)\n window = np.hanning(segment_length)\n coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n exp_coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n # we will need the psth for the expected coherence \n psth = np.zeros(data.shape[0])\n for i in range(data.shape[1]):\n psth = psth + np.convolve(data[:,i], kernel, mode='same') * (1./sampling_interval)\n psth = psth/data.shape[1]\n # go and calculate the spectra\n for i in range(data.shape[1]):\n trace = data[:,i]/sampling_interval\n trace = np.convolve(trace, kernel, mode=\"same\")\n f_resp = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_psth = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_stim = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n for n in range(noOfSegments):\n start\t= n * segment_length\n end \t= start + segment_length\n resp_segment = trace[start:end]\n resp_segment = resp_segment - np.mean(resp_segment)\n resp_segment = resp_segment * window\n psth_segment = psth[start:end]\n psth_segment = psth_segment - np.mean(psth_segment)\n psth_segment = psth_segment * window\n stim_segment = stimulus[start:end]\n stim_segment = stim_segment - np.mean(stim_segment)\n stim_segment = stim_segment * window\n \n f_resp[:, n] = np.fft.fft(resp_segment, segment_length)\n f_stim[:, n] = np.fft.fft(stim_segment, segment_length)\n f_psth[:, n] = np.fft.fft(psth_segment, segment_length)\n\n f_resp_conj = np.conjugate(f_resp) # complex conjugate spectrum of response segments\n f_stim_conj = np.conjugate(f_stim) # complex conjugate spectra of stimulus segments\n f_psth_conj = np.conjugate(f_psth) # complex conjugate spectra of psth segments\n\n sr_cross_spectrum = np.mean(f_stim_conj * f_resp, axis=1) # cross spectrum S*R\n ss_auto_spectrum = np.mean(f_stim_conj * f_stim, axis=1) # auto spectrum S*S\n\n rs_cross_spectrum = np.mean(f_resp_conj * f_stim, axis=1) # cross spectrum R*S\n rr_auto_spectrum = np.mean(f_resp_conj * f_resp, axis=1) # auto spectrum R*R\n \n pr_cross_spectrum = np.mean(f_psth_conj * f_resp, axis=1) # cross spectrum PSTH*R\n pp_auto_spectrum = np.mean(f_psth_conj * f_psth, axis=1) # auto spectrum PSTH*PSTH\n rp_cross_spectrum = np.mean(f_resp_conj * f_psth, axis=1) # cross spectrum R*PSTH\n \n coherence_spectra[:, i] = (sr_cross_spectrum * rs_cross_spectrum) / (ss_auto_spectrum * rr_auto_spectrum)\n exp_coherence_spectra[:, i] = (pr_cross_spectrum * rp_cross_spectrum) / (pp_auto_spectrum * rr_auto_spectrum)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(f, np.mean(coherence_spectra[:len(f),:], axis=1), color='dodgerblue', label=\"r-s coherence\")\n ax.plot(f, np.mean(exp_coherence_spectra[:len(f),:], axis=1), color='silver', label=\"r-r coherence\")\n ax.set_xlim([0, 300])\n ax.set_ylim([0, 1])\n ax.set_xlabel('frequency [Hz]')\n ax.set_ylabel('coherence')\n ax.legend(fontsize=9)\n plt.show()", "def calculate_index_and_derivative(wl):\n index = np.sqrt(1 + (0.6961663 * wl * wl) / (wl * wl - 0.0684043 * 0.0684043)\n + (0.4079426 * wl * wl) / (wl * wl - 0.1162414 * 0.1162414)\n + (0.8974794 * wl * wl) / (wl * wl - 9.896161 * 9.896161)\n )\n\n index_derivative = \\\n (\n - (1.79496 * wl * wl * wl) / (pow(-97.934 + wl * wl, 2))\n + (1.79496 * wl) / (-97.934 + wl * wl)\n\n - (0.815885 * wl * wl * wl) / (pow(-0.0135121 + wl * wl, 2))\n + (0.815885 * wl) / (-0.0135121 + wl * wl)\n\n - (1.39233 * wl * wl * wl) / (pow(-0.00467915 + wl * wl, 2))\n + (1.39233 * wl) / (-0.00467915 + wl * wl)\n ) \\\n / \\\n (2 * np.sqrt(\n 1\n + (0.897479 * wl * wl) / (-97.934 + wl * wl)\n + (0.407943 * wl * wl) / (-0.0135121 + wl * wl)\n + (0.696166 * wl * wl) / (-0.00467915 + wl * wl)\n )\n )\n return index, index_derivative", "def calcumul_index(path,x,name_champ_label,indice2,list_drop,pathlist_names_feature):\n sql=sqlite3.connect(path)\n df=pd.read_sql_query(\"SELECT * FROM output\", sql)\n df=df.groupby(\"originfid\").mean()\n if 'band' in df.columns[6] :\n globals()[\"df%s\"%x]=col_sqlite(path,x,list_drop,pathlist_names_feature)\n label = globals()[\"df%s\"%x][name_champ_label]\n globals()[\"%s\"%x]=globals()[\"df%s\"%x].astype(float)\n print(indice2)\n if indice2 not in ['NDVI', 'NDWI','SM','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = globals()[\"%s\"%x].filter(like=band1_indice)\n df_b2 = globals()[\"%s\"%x].filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = globals()[\"df%s\"%x].filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n df_indice_col=df_indice_col.iloc[:-1]\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_%s\"%indice2]=globals()[\"df_%s\"%indice2].astype(float)\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T \n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n else :\n label = df[name_champ_label]\n print(indice2)\n if indice2 not in ['ndvi', 'ndwi','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1','SM']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = df.filter(like=band1_indice)\n df_b2 = df.filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = df.filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T\n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n return globals()[\"df_mean_%s\"%indice2], globals()[\"df_%s\"%indice2]", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def freqdomain(self):\n \n\n #self.df = self.f[1] - self.f[0]\n #frequency vector\n #fv = fftshift(fftfreq(len(eta),1./fs))\n #fv = fv[len(fv)/2:]\n \n #spectral analysis\n self.sn1 = self.espec1(self.n1)\n self.sn2 = self.espec1(self.n2)\n self.sn3 = self.espec1(self.n3)\n self.sn12 = self.espec2(self.n1,self.n2)\n self.sn13 = self.espec2(self.n1,self.n3)\n self.sn23 = self.espec2(self.n2,self.n3)\n \n #delta freq\n self.df = self.f[3] - self.f[2]\n\n #calculo do numero de onda\n #self.wavenumber()\n #k = numeronda(h,f,len(f))\n #k = np.array(k)\n\n #calculo dos coeficientes de fourier - NDBC 96_01 e Steele (1992)\n c = self.sn2[:,1] + self.sn3[:,1]\n cc = np.sqrt(self.sn1[:,1] * (c))\n \n self.a1 = self.sn12[:,3] / cc\n self.b1 = self.sn13[:,3] / cc\n \n self.a2 = (self.sn2[:,1] - self.sn3[:,1]) / c\n self.b2 = 2 * self.sn12[:,2] / c\n \n #calcula direcao de onda\n #mean direction\n self.dire1 = np.array([np.angle(np.complex(self.b1[i],self.a1[i]),deg=True) for i in range(len(self.a1))])\n \n #principal direction\n self.dire2 = 0.5 * np.array([np.angle(np.complex(self.b2[i],self.a2[i]),deg=True) for i in range(len(self.a2))])\n \n #condicao para valores maiores que 360 e menores que 0\n self.dire1[np.where(self.dire1 < 0)] = self.dire1[np.where(self.dire1 < 0)] + 360\n self.dire1[np.where(self.dire1 > 360)] = self.dire1[np.where(self.dire1 > 360)] - 360\n self.dire2[np.where(self.dire2 < 0)] = self.dire2[np.where(self.dire2 < 0)] + 360\n self.dire2[np.where(self.dire2 > 360)] = self.dire2[np.where(self.dire2 > 360)] - 360\n \n #acha o indice da frequencia de pico\n ind = np.where(self.sn1[:,1] == np.max(self.sn1[:,1]))[0]\n \n #periodo de pico\n self.tp = (1. / self.f[ind])[0]\n \n #momento espectral de ordem zero total - m0\n self.m0 = np.sum(self.sn1[:,1]) * self.df\n \n #calculo da altura significativa\n self.hm0 = 4.01 * np.sqrt(self.m0)\n \n #direcao do periodo de pico\n self.dp = self.dire1[ind][0]\n \n #Espalhamento direcional\n #Formula do sigma1 do livro Tucker&Pitt(2001) \"Waves in Ocean Engineering\" pags 196-198\n c1 = np.sqrt(self.a1 ** 2 + self.b1 ** 2)\n c2 = np.sqrt(self.a2 ** 2 + self.b2 ** 2)\n \n s1 = c1 / (1-c1)\n s2 = (1 + 3 * c2 + np.sqrt(1 + 14 * c2 + c2 ** 2)) / (2 * (1 - c2))\n \n self.sigma1 = np.sqrt(2 - 2 * c1) * 180 / np.pi\n self.sigma2 = np.sqrt((1 - c2) / 2) * 180 / np.pi\n \n self.sigma1p = np.real(self.sigma1[ind])[0]\n self.sigma2p = np.real(self.sigma2[ind])[0]\n \n # pondaf = np.array([hm0, tp, dp, sigma1p, sigma2p])\n \n #hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2\n #return hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def spectral_roll_on(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.05*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def refractive_index_glass(wavelength, type='soda-lime-low-iron'):\n\n if type.lower() == 'soda-lime-low-iron':\n wavelength = wavelength / 1000\n n = 1.5130 - 0.003169 * wavelength ** 2 + 0.003962 * wavelength ** -2 + 0 * 1j\n\n # n[wavelength < 0.3] = n[wavelength < 0.3] + 1j*0\n elif type.upper() == 'BK7':\n wavelength = wavelength / 1000\n n = np.sqrt(1 + \\\n (1.03961212 * wavelength ** 2) / (\n wavelength ** 2 - 0.00600069867) + \\\n (0.231792344 * wavelength ** 2) / (\n wavelength ** 2 - 0.0200179144) + \\\n (1.01046945 * wavelength ** 2) / (\n wavelength ** 2 - 103.560653)\n )\n\n return n", "def stitchSpectra(lamb_list,count_list, method=\"scale\", edgeremove=(0, 0), shiftToPositive=False, dlambda=None):\r\n rawData=np.array([np.array(lamb_list),np.array(count_list)])\r\n rawData=rawData.swapaxes(0,1)\r\n coefficients = []\r\n print(\"Removing edges for stitching:\", *edgeremove)\r\n omitRight = rawData[0].shape[1] - math.floor(rawData[0].shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", 0, omitRight)\r\n processed = np.array(rawData[0][:, 0:omitRight]) \r\n if dlambda is None:\r\n dlambda = math.fabs(processed[0, 1] - processed[0, 0]) ## lambda steps of first spectrum are kept\r\n for i, spec in enumerate(rawData[1:]):\r\n omitLeft = math.floor(spec.shape[1] * edgeremove[0])\r\n omitRight = spec.shape[1] - math.floor(spec.shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", omitLeft, omitRight)\r\n if i == len(rawData)-2:\r\n spec = np.array(spec[:, omitLeft:]) ## do not shorten last array at end\r\n else:\r\n spec = np.array(spec[:, omitLeft:omitRight]) # shorten middle arrays at both sides\r\n print(\"Stitching spectrum in range\", np.min(spec[0,]), np.max(spec[0,]))\r\n # calculate overlap\r\n overlap = (np.min(spec[0,]), np.max(processed[0,])) \r\n #lambdas = np.arange(*overlap, dlambda)\r\n #leftfun = interp1d(processed[0,], processed[1,])\r\n #rightfun = interp1d(spec[0,], spec[1,])\r\n left = np.mean(processed[1, processed[0,] > overlap[0]]) ##mean of counts of overlap\r\n right = np.mean(spec[1, spec[0,] < overlap[1]])\r\n if method == \"shift\":\r\n # calculate offset in overlap region\r\n offset = left - right\r\n print(\"Stitching offset %s in overlap\", offset, *overlap)\r\n # add shifted spectrum\r\n spec[1,] = spec[1,] + offset\r\n coefficients.append(offset)\r\n elif method == \"scale\":\r\n # calculate factor in overlap region\r\n factor = left/right\r\n print(\"Stitching factor\"+str(factor)+\" in overlap \", *overlap)\r\n spec[1,] = spec[1,] * factor\r\n coefficients.append(factor)\r\n processed = np.concatenate([processed, spec], axis=1)\r\n # interpolate data on grid\r\n interpolated = interp1d(processed[0,], processed[1,])\r\n lambdas = np.arange(processed[0, 0], processed[0, -1], dlambda)\r\n specdata = interpolated(lambdas)\r\n # shift above 0\r\n if shiftToPositive:\r\n minimum = np.min(specdata)\r\n if minimum < 0:\r\n specdata += math.fabs(minimum)\r\n \r\n return (lambdas,specdata,coefficients)", "def wvd(fx,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n fn=fn[0]\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa.reshape(fn)\r\n fb=fa.copy()\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1./df\r\n tau=(nh-1)/2\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')\r\n \r\n #create an empty array to put the tf in \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,nn in enumerate(tlst):\r\n #calculate the smallest timeshift possible\r\n taun=min(nn,tau,fn-nn-1)\r\n #make a timeshift array\r\n taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')\r\n #calculate rectangular windowed correlation function of analytic signal\r\n Rnn=4*np.conjugate(fa[nn-taulst])*fb[nn+taulst] \r\n #calculate fft of windowed correlation function\r\n FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))\r\n #put into tfarray\r\n tfarray[:,point]=FTRnn[::-1]\r\n \r\n #normalize\r\n tfarray=tfarray/nh\r\n \r\n return tfarray,tlst,flst", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num, max_duration=None):\n Sxx = spectrogram_converter.mel_spectrogram(wav_path, max_duration)\n for i in range(Sxx.shape[0]):\n for j in range(Sxx.shape[1]):\n X[index, 0, i, j] = Sxx[i, j]\n y[index] = curr_speaker_num\n return 1", "def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range", "def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat", "def get_beat_sync_spectrums(audio):\n y, sr = core.load(audio, sr=44100)\n eql_y = EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr)\n np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return np.vstack([band1list, band2list, band3list])", "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def dseries(temp, wavelength):\n if wavelength < 300 or wavelength > 830:\n return 0\n mm=wavelength%10\n s=_dseriesd(temp, wavelength-mm)\n if mm==0:\n return s\n m=mm*0.1\n e=_dseriesd(temp, (wavelength-mm)+10)\n return s+(e-s)*m", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def test_compute_spectral_norms(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, svd_method=ACCURATE_SVD)\n\n\t\t# SLOW method\n\t\ta = details.spectral_norm.to_numpy()\n\t\tself.assertAlmostEqual(a[0],20.2149, places=3)\n\t\tself.assertAlmostEqual(a[1],24.8158, places=3)\n\t\tself.assertAlmostEqual(a[2],19.3795, places=3)", "def snv(spectra):\n\n return (spectra - np.mean(spectra, axis=0)) / np.std(spectra, axis=0)", "def sincint(x, nres, speclist) :\n\n dampfac = 3.25*nres/2.\n ksize = int(21*nres/2.)\n if ksize%2 == 0 : ksize +=1\n nhalf = ksize//2 \n\n #number of output and input pixels\n nx = len(x)\n nf = len(speclist[0][0])\n\n # integer and fractional pixel location of each output pixel\n ix = x.astype(int)\n fx = x-ix\n\n # outputs\n outlist=[]\n for spec in speclist :\n if spec[1] is None :\n outlist.append([np.full_like(x,0),None])\n else :\n outlist.append([np.full_like(x,0),np.full_like(x,0)])\n\n for i in range(len(x)) :\n xkernel = np.arange(ksize)-nhalf - fx[i]\n # in units of Nyquist\n xkernel /= (nres/2.)\n u1 = xkernel/dampfac\n u2 = np.pi*xkernel\n sinc = np.exp(-(u1**2)) * np.sin(u2) / u2\n sinc /= (nres/2.)\n\n lobe = np.arange(ksize) - nhalf + ix[i]\n vals = np.zeros(ksize)\n vars = np.zeros(ksize)\n gd = np.where( (lobe>=0) & (lobe<nf) )[0]\n\n for spec,out in zip(speclist,outlist) :\n vals = spec[0][lobe[gd]]\n out[0][i] = (sinc[gd]*vals).sum()\n if spec[1] is not None : \n var = spec[1][lobe[gd]]\n out[1][i] = (sinc[gd]**2*var).sum()\n\n for out in outlist :\n if out[1] is not None : out[1] = np.sqrt(out[1])\n \n return outlist", "def spectral_spread(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not np.sum(ff):\n return 0\n else:\n return np.dot(((f-spect_centr)**2), (ff / np.sum(ff)))", "def find_saturated(spectra, saturation_limit=90000):\n\n razlika = np.abs(\n np.diff(spectra, n=1, axis=-1,\n append=spectra[:,-2][:,None]))\n\n saturated_indices = np.unique(\n np.where(razlika > saturation_limit)[0])\n\n if len(saturated_indices)==0 and np.any(spectra==0):\n print(\"No saturated spectra is found;\\n\"\n \"Please make sure to apply this function before any scaling is done!\")\n else:\n return saturated_indices", "def test_best_result(origianl_waveform):\n origianl_waveform = origianl_waveform.flatten()\n recovery_waveform = []\n audio_length = len(origianl_waveform)\n noise = np.random.random_sample((audio_length,))\n noise_list = [x / 100 for x in noise]\n noise_count = 0\n \n for n in origianl_waveform:\n difference = n - noise_list[noise_count]\n recovery_waveform.append(difference)\n noise_count += 1\n \n return np.asarray(recovery_waveform)", "def getLatestSpectrumMeasurements(self): \n return self.spectrum[len(self.spectrum)-1]", "def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def _computeIndex(value, slices_nb):\n \n if doImg >= 0 or doStack or doVideo:\n return value + 1\n else:\n return value % (slices_nb+1)", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def get_beat_sync_chroma_and_spectrum(audio, sr=None, bpm=None):\n if not isinstance(audio, np.ndarray):\n sr = 44100\n y = std.MonoLoader(filename=audio, samplerate=44100)()\n else:\n y = audio\n eql_y = std.EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr, tempo=bpm)\n if framed_dbn.shape[0] % 4 == 0:\n framed_dbn = np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n chromas = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n stft = abs(core.stft(y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n chroma = np.mean(feature.chroma_stft(y=None, S=stft ** 2), axis=1)\n chromas.append(chroma)\n chromas = np.array(chromas).transpose()\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return (chromas, np.vstack([band1list, band2list, band3list]))", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def spectral_roll_off(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.95*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def convolution_spectrum(self, spectrum):\n ret = []\n for i in range(len(spectrum)):\n for j in range(i+1, len(spectrum)):\n diff = abs(spectrum[j] - spectrum[i])\n if diff > 0:\n ret.append(diff)\n return ret" ]
[ "0.6287113", "0.6141516", "0.59084785", "0.5892982", "0.58204657", "0.58018076", "0.5760177", "0.5751786", "0.5741839", "0.57074076", "0.5688546", "0.5685407", "0.5644722", "0.5622003", "0.55865675", "0.5584795", "0.55340075", "0.55338055", "0.55332595", "0.5530644", "0.55118614", "0.5485366", "0.5480902", "0.5451791", "0.5440077", "0.54110503", "0.5403925", "0.5394631", "0.5392846", "0.5390153", "0.53898144", "0.5373445", "0.53566235", "0.5341853", "0.5337421", "0.5333299", "0.5331422", "0.5329944", "0.532638", "0.5317189", "0.53068876", "0.5304885", "0.53033763", "0.5293819", "0.529041", "0.5285414", "0.5282521", "0.52806675", "0.5278986", "0.5275052", "0.5272193", "0.52626765", "0.5256638", "0.5245418", "0.5235681", "0.52337945", "0.52304703", "0.52159774", "0.52137893", "0.52023554", "0.519671", "0.5194344", "0.51924473", "0.51917034", "0.51795745", "0.5179019", "0.5176806", "0.5176143", "0.51746505", "0.5171886", "0.5167099", "0.5165039", "0.5164834", "0.5164264", "0.5163133", "0.5158835", "0.5155187", "0.5154681", "0.5153827", "0.5146746", "0.51430076", "0.5137407", "0.5135476", "0.51348203", "0.5133938", "0.5133912", "0.5120118", "0.5117745", "0.51087564", "0.5102916", "0.50984097", "0.5095239", "0.5094557", "0.5088199", "0.50847673", "0.50816363", "0.5074115", "0.5073009", "0.5073009", "0.5071997", "0.5069294" ]
0.0
-1
compute spectral index after continuum subtraction
def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1, **kwargs): wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue, red, band=band, degree=degree) if unit in (0, 'ew', 'EW'): return np.trapz(1. - fi, wi, axis=-1) else: m = np.trapz(fi, wi, axis=-1) m = -2.5 * np.log10(m / np.ptp(wi)) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = 8.801830992E-03\n A4 = 8.435237228E-05\n A5 = 1.681656789E-06\n A6 = -1.675425449E-08\n A7 = 8.326602461E-10\n\n n = np.sqrt( A0 + A1 * wavelength_um ** 4 + A2 * wavelength_um ** 2 + A3 * wavelength_um ** -2 + \\\n A4 * wavelength_um ** -4 + A5 * wavelength_um ** -6 + A6 * wavelength_um ** -8 + A7 * wavelength_um ** -10 )\n\n return n", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def spectral():\n c = _si.c.value\n h = _si.h.value\n hc = h * c\n two_pi = 2.0 * np.pi\n inv_m_spec = si.m**-1\n inv_m_ang = si.radian / si.m\n\n return Equivalency(\n [\n (si.m, si.Hz, lambda x: c / x),\n (si.m, si.J, lambda x: hc / x),\n (si.Hz, si.J, lambda x: h * x, lambda x: x / h),\n (si.m, inv_m_spec, lambda x: 1.0 / x),\n (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x),\n (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x),\n (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi),\n (si.m, inv_m_ang, lambda x: two_pi / x),\n (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi),\n (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi),\n ],\n \"spectral\",\n )", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def spectral_decrease(sign, fs):\n f, ff = plotfft(sign, fs)\n\n k = len(ff)\n soma_num = 0\n for a in range(2, k):\n soma_num = soma_num + ((ff[a]-ff[1])/(a-1))\n\n ff2 = ff[2:]\n if not np.sum(ff2):\n return 0\n else:\n soma_den = 1 / np.sum(ff2)\n return soma_den * soma_num", "def baseline(spectra):\n\n return spectra - np.mean(spectra, axis=0)", "def spectralIndices(\n x: Union[ee.Image, ee.ImageCollection],\n index: Union[str, List[str]] = \"NDVI\",\n G: Union[float, int] = 2.5,\n C1: Union[float, int] = 6.0,\n C2: Union[float, int] = 7.5,\n L: Union[float, int] = 1.0,\n cexp: Union[float, int] = 1.16,\n nexp: Union[float, int] = 2.0,\n alpha: Union[float, int] = 0.1,\n slope: Union[float, int] = 1.0,\n intercept: Union[float, int] = 0.0,\n gamma: Union[float, int] = 1.0,\n kernel: str = \"RBF\",\n sigma: Union[float, str] = \"0.5 * (a + b)\",\n p: Union[float, int] = 2,\n c: Union[float, int] = 1.0,\n online: bool = False,\n drop: bool = False,\n) -> Union[ee.Image, ee.ImageCollection]:\n platformDict = _get_platform_STAC(x)\n\n if isinstance(sigma, int) or isinstance(sigma, float):\n if sigma < 0:\n raise Exception(f\"[sigma] must be positive! Value passed: sigma = {sigma}\")\n\n if p <= 0 or c < 0:\n raise Exception(\n f\"[p] and [c] must be positive! Values passed: p = {p}, c = {c}\"\n )\n\n additionalParameters = {\n \"g\": float(G),\n \"C1\": float(C1),\n \"C2\": float(C2),\n \"L\": float(L),\n \"cexp\": float(cexp),\n \"nexp\": float(nexp),\n \"alpha\": float(alpha),\n \"sla\": float(slope),\n \"slb\": float(intercept),\n \"gamma\": float(gamma),\n \"p\": float(p),\n \"c\": float(c),\n }\n\n spectralIndices = _get_indices(online)\n indicesNames = list(spectralIndices.keys())\n\n if not isinstance(index, list):\n if index == \"all\":\n index = list(spectralIndices.keys())\n elif index in [\n \"vegetation\",\n \"burn\",\n \"water\",\n \"snow\",\n \"drought\",\n \"urban\",\n \"kernel\",\n ]:\n temporalListOfIndices = []\n for idx in indicesNames:\n if spectralIndices[idx][\"type\"] == index:\n temporalListOfIndices.append(idx)\n index = temporalListOfIndices\n else:\n index = [index]\n\n for idx in index:\n if idx not in list(spectralIndices.keys()):\n warnings.warn(\n f\"Index {idx} is not a built-in index and it won't be computed!\"\n )\n else:\n\n def temporalIndex(img):\n lookupDic = _get_expression_map(img, platformDict)\n lookupDic = {**lookupDic, **additionalParameters}\n kernelParameters = _get_kernel_parameters(img, lookupDic, kernel, sigma)\n lookupDic = {**lookupDic, **kernelParameters}\n lookupDicCurated = _remove_none_dict(lookupDic)\n if all(\n band in list(lookupDicCurated.keys())\n for band in spectralIndices[idx][\"bands\"]\n ):\n return img.addBands(\n img.expression(\n spectralIndices[idx][\"formula\"], lookupDicCurated\n ).rename(idx)\n )\n else:\n warnings.warn(\n f\"This platform doesn't have the required bands for {idx} computation!\"\n )\n return img\n\n if isinstance(x, ee.imagecollection.ImageCollection):\n x = x.map(temporalIndex)\n elif isinstance(x, ee.image.Image):\n x = temporalIndex(x)\n\n if drop:\n x = x.select(index)\n\n return x", "def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names", "def diff_index_calc(oct_abund_list1, oct_abund_list2):\n rel_index_list = []\n abs_index_list = []\n smty_index_list = []\n for i in range(10):\n abund_data_array = sc.asarray(oct_abund_list1[i], dtype='double')\n abund_sim_array = sc.asarray(oct_abund_list2[i], dtype = 'double')\n \n # make the length of the arrays similar to each other\n if len(abund_data_array) < len(abund_sim_array):\n small_len = abund_data_array\n long_len = abund_sim_array\n else:\n small_len = abund_sim_array\n long_len = abund_data_array\n diff = len(long_len) - len(small_len) \n small_len = sc.append(small_len, [0]*diff)\n \n relative_index_vect = abs(long_len - small_len)/long_len \n rel_index_list.append(sum(relative_index_vect)/len(relative_index_vect))\n \n absolute_index_vect = abs(long_len - small_len)\n abs_index_list.append(sum(absolute_index_vect)/len(absolute_index_vect))\n \n similarity_index_vect = []\n for i in range(len(long_len)):\n similarity_index_vect.append(sc.minimum(long_len[i], small_len[i])/sc.amax([long_len[i], small_len[i]]))\n \n smty_index_list.append(sum(similarity_index_vect)/len(similarity_index_vect)) \n \n rel_index_final = sum(rel_index_list)/10\n abs_index_final = sum(abs_index_list)/10\n smty_index_final = sum(smty_index_list)/10\n \n return (rel_index_final, abs_index_final, smty_index_final)", "def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def apply_electronics_gain(full_frame, difference):\n #electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n #electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n all_quads = []\n num_quads = full_frame.shape[0]\n for quads in range(0, num_quads):\n active_quad = full_frame[quads, :, :]\n if difference[quads] < 0: # Note: Difference is odd-even\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n elif difference[quads] > 0:\n gain_even = 1/electronics_gain_odd[quads]\n gain_odd = 1/electronics_gain_even[quads]\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n spec_pix, spat_pix = active_quad.shape\n gain_applied_quad = np.array([[0]*spec_pix]*spat_pix)\n even_detector_active_quad = gain_even*active_quad[:, ::2]\n odd_detector_active_quad = gain_odd*active_quad[:, 1::2]\n\n gain_applied_quad = np.reshape(gain_applied_quad, (spec_pix, spat_pix))\n gain_applied_quad[:, ::2] = even_detector_active_quad\n gain_applied_quad[:, 1::2] = odd_detector_active_quad\n #print(np.max(gain_applied_quad))\n #cc\n all_quads.append(gain_applied_quad)\n #cc\n return np.array(all_quads)", "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def refractive_index_porous_silica(wavelength, porosity=0.5):\n wavelength_um = wavelength / 1000\n n = np.sqrt(1 + \\\n (0.6961663 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.06840432 ** 2) + \\\n (0.4079426 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 0.11624142 ** 2) + \\\n (0.8974794 * wavelength_um ** 2) / (\n wavelength_um ** 2 - 9.8961612 ** 2)\n )\n n_air = 1.00029\n\n n_total = np.sqrt(n ** 2 * (1 - porosity) + n_air ** 2 * (porosity)) + 0 * 1j\n\n # k0 = 5e-6\n # k1 = 5e-7\n # wavelength0 = 0.31\n # wavelength1 = 0.36\n\n # n_total = n_total + 1j*refractive_index_imaginary_silica(wavelength)*1e4\n # n_total = n_total + 1j*np.exp( np.log(k0) + np.log(k1) * (wavelength - wavelength0)/(wavelength1-wavelength0))\n\n return n_total", "def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def coherence_from_spectral(Sw):\r\n\r\n Sxx = Sw[0, 0].real\r\n Syy = Sw[1, 1].real\r\n\r\n Sxy_mod_sq = (Sw[0, 1] * Sw[1, 0]).real\r\n Sxy_mod_sq /= Sxx\r\n Sxy_mod_sq /= Syy\r\n return Sxy_mod_sq", "def spect(self):\n return 1", "def fft_index(fft, frequency):\n\treturn 2 * int(len(fft) * frequency / AUDIO_RATE) # Not entirely clear on why I need to multiply by 2 here. I don't need to if I use fft instead of rfft, but then I get a bunch of crazy high frequency FFT data, or is it complex numbers or something...", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def get_sound_index(self):\n # Return difference between the two last compared elements\n lhs = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_left\n #rhs = ThreadManagment.last_cmp_right_by_thread.get(self.thread.ident, 0)\n #return round((lhs + rhs) / 2)\n return lhs", "def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}", "def refractive_index(self, theta):\n sin2th = np.sin(theta)**2\n cos2th = np.cos(theta)**2\n\n A = self.S * sin2th + self.P * cos2th\n B = self.R * self.L * sin2th + self.P * self.S * (1 + cos2th)\n F = np.sqrt(((self.R * self.L - self.P * self.S) * sin2th)**2\n + (2 * self.P * self.D)**2 * cos2th) # contents can never be negative\n n_fast = np.sqrt((B - F) / (2 * A))\n n_slow = np.sqrt((B + F) / (2 * A))\n return np.concatenate((n_fast[...,np.newaxis], n_slow[...,np.newaxis]), axis=-1)", "def stZCR(frame):\n count = len(frame)\n countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2\n return (np.float64(countZ) / np.float64(count-1.0))", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def stZCR(frame):\n count = len(frame)\n countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2\n return (numpy.float64(countZ) / numpy.float64(count-1.0))", "def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num):\r\n Sxx = spectrogram_converter.mel_spectrogram(wav_path)\r\n for i in range(Sxx.shape[0]):\r\n for j in range(Sxx.shape[1]):\r\n X[index, 0, i, j] = Sxx[i, j]\r\n y[index] = curr_speaker_num\r\n return 1", "def calc(self, wavelength):\n if wavelength < self.minWavelength or wavelength > self.maxWavelength:\n return 0\n mm=wavelength%self.interval\n s=self._calcd(wavelength-mm)\n if mm==0:\n return s\n m=mm*1.0/self.interval\n e=self._calcd((wavelength-mm)+self.interval)\n return s+(e-s)*m", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def create_spectrum(warr, farr, earr=None, sub_cont=False):\n spec=Spectrum.Spectrum(warr, farr, earr, stype='continuum')\n #normalize the spectra to 5500\n n5500 = np.interp(5500, spec.wavelength, spec.flux)\n spec.flux = spec.flux/n5500\n if earr is not None:\n spec.var = spec.var/n5500\n #add in continuum subtraction\n if sub_cont:\n coef = np.polyfit(spec.wavelength, spec.flux, 9)\n spec.flux = spec.flux - np.polyval(coef, spec.wavelength) \n return spec", "def findSpectralAxis(img):\n if (type(img) == str):\n myia = createCasaTool(iatool)\n myia.open(img)\n else:\n myia = img\n mycs = myia.coordsys()\n try:\n iax = mycs.findaxisbyname(\"spectral\")\n except:\n print \"ERROR: can't find spectral axis. Assuming it is 3.\"\n iax = 3\n mycs.done()\n return iax", "def offset(freqs, re0, im0):\n return re0 + 1j * im0", "def to_spectral_img(data):\n assert data.size(-1) == 2\n\n spectral_vol = torch.zeros([data.size(-2), data.size(-2), data.size(-2)])\n\n for i in range(data.size(-2)):\n kspc1 = torch.zeros(data.size())\n kspc1[:, i, :] = data[:, i, :]\n img1 = ifft2(kspc1)\n img1_abs = complex_abs(img1)\n\n spectral_vol[i, :, :] = img1_abs\n\n return spectral_vol", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def wave_samples(self):\n return self._quantized_subsamples", "def getSpectralEnergyFrame(datatype, traceList, outfile, channelStart, channelEnd, winlen=1000):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n\r\n wlen = 256\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n nperlen = len(traceList[0].data)\r\n if winlen>=nperlen:\r\n nFrames=1\r\n else:\r\n nFrames = int(nperlen/winlen)\r\n\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n\r\n for iframe in range(nFrames): \r\n Emat = None\r\n for itr in range(0,nTraces):\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data[iframe*winlen:(iframe+1)*winlen]), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n #energy = np.abs(np.log10(np.abs(energy/np.max(energy)))*10.0)\r\n #energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n \r\n #datafile = 'spectralenergy_{0}_ch{1}_{2}.npy'.format(outfile,channelStart,channelEnd)\r\n #np.save(datafile,Emat)\r\n #scale to 0 255\r\n print (Emat.max())\r\n Emat = (255.0 / Emat.max() * (Emat - Emat.min())).astype(np.uint8)\r\n im = Image.fromarray(Emat, 'L')\r\n imgfile = 'spectralenergy_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n im.save(imgfile)\r\n histogram = im.histogram()\r\n imgfile = 'spectralhist_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n plt.figure()\r\n plt.plot(histogram)\r\n plt.savefig(imgfile)", "def _get_spectrograms(self, index):\n file = self._waves[index]\n\n # get hyper-parameters\n hp = self.hparams\n\n w, _ = lr.load(file, sr=hp.sr)\n w, _ = lr.effects.trim(w) # triming\n\n linear = audio.wave2spec(w, hp)\n\n return linear, w", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, [i, (i + 1) % N]], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def amplitude_diff(config, i):\n flipped = jax.ops.index_mul(config, jax.ops.index[:, i], -1)\n logpsi_flipped = log_amplitude(model, flipped)\n return jnp.exp(logpsi_flipped - logpsi)", "def modulation_index(phase: np.ndarray, amplitude: np.ndarray) -> float:\n indices = indices_of_binned_phase(phase, num_bins=12)\n avg_amps = np.array([np.median(amplitude[idx]) for idx in indices],\n dtype=np.float64)\n return _modulation_index(avg_amps)", "def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)", "def spectate(self):\n pass", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def calculate_dark_current(image, i, int_time):\n dark_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Integration_Sweep\\Dark'\n data_path_name_split = image.split('_')\n #print(data_path_name_split)\n all_int_files = [each for each in os.listdir(dark_data_dir) \\\n if each.endswith('_'+data_path_name_split[-1])] \n print(all_int_files)\n \n dark_data_file = os.path.join(dark_data_dir, all_int_files[0])\n IDL_variable = readsav(dark_data_file) \n all_full_frame = IDL_variable.q \n quad = all_full_frame[:, i, :, :]\n active_quad = np.mean(quad[:, 4:1028, 10:1034], axis=0) \n tsoc = np.mean(quad[:, 4:1028, 1034:1056], axis=0)\n bias_subtracted_quad = perform_bias_subtraction_ave(active_quad, tsoc)\n smear_subtracted_quad, smear_signal = perform_smear_subtraction(bias_subtracted_quad[10:1000, :], int_time)\n return smear_subtracted_quad", "def get_refractive_index(freq,medium):\n epsi_t=get_Permittivty_from_Medium(freq,medium)\n epsi=reduce_matrix_to_scalar(epsi_t)\n return cmath.sqrt(epsi)", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def mod_ave2(z):\r\n x = np.mean(np.sin(TAU*z), 0) # col ave\r\n y = np.mean(np.cos(TAU*z), 0) # col ave\r\n phi = np.arctan(x/y) / TAU\r\n calc = (phi + np.where(y < 0, -0.5, 0) + 0.5) % 1 - 0.5\r\n return calc", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def spectral_check(self, ):\r\n a, b = self.dfa, self.dfm.copy()\r\n b['ts_a']=a.ts\r\n b['flux_a'] = a.flux\r\n b['dflux'] = (b.flux-b.flux_a)/b.flux_unc\r\n b['eflux100_a'] = a.eflux100\r\n b['deflux'] = (b.eflux100-b.eflux100_a)/b.eflux100_unc\r\n b['pindex_a'] = a.pindex\r\n b['gdelta'] = (b.pindex-b.pindex_a)/b.pindex_unc\r\n self.dfm = b # since copy\r\n\r\n fig,axx = plt.subplots(1,2, figsize=(10,5), sharey=True)\r\n hkw = dict(bins=np.linspace(-5,5,51), histtype='step', lw=2, density=True)\r\n\r\n cut = (b.ts>50) & ~pd.isnull(b.deflux) & ~pd.isnull(b.gdelta) &\\\r\n (b.modelname==\"LogParabola\") & (b.pindex<3) & (b.pindex>0.5) &\\\r\n (b.e0>500) &(b.eflux100_unc>0) &(b.pindex_unc>0)\r\n self.check_total = sum(cut)\r\n for ax, title, val in zip(axx.flatten(), ['Energy Flux', 'Spectral index'], [b.deflux, b.gdelta]): \r\n\r\n df=val[cut]\r\n ax.hist(df.clip(-5,5), label='mean {:5.2f}\\nstd {:5.2f}'.format(df.mean(),df.std()), **hkw);\r\n ax.grid(alpha=0.5); \r\n x=np.linspace(-4,4)\r\n ax.plot(x, stats.norm.pdf(x), '--g' );\r\n ax.set(xlabel='normalized fit deviation', title=title, )\r\n ax.legend(loc='upper left',prop=dict(family='monospace'))\r\n fig.suptitle('Normalized devations of fit from model', fontsize=16);\r\n\r\n return fig", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def calc_ind(sel_lines):\n\n print()\n print(\"CALCULATING INDICES\")\n print(\"-------------------\")\n\n # remove duplicates of ind_id and gives a list of selected indices\n sel_ind = list(set(sel_lines['ind_id']))\n sel_ind = np.asarray(sel_ind)\n\n index = {}\n index['index'] = []\n index['value'] = []\n index['error'] = []\n index['flg'] = []\n index['mfrac_neg'] = []\n index['snr'] = []\n\n print(\"index\\tvalue\\terror\\t\\tsnr\\tflag\\tmfrac_neg\")\n print(\"-----\\t-----\\t-----\\t\\t---\\t----\\t---------\")\n\n ind_ids = np.asarray(sel_lines['ind_id'])\n rows = len(sel_lines['ln_id'])\n for i in range(len(sel_ind)): # each index\n\n var = [sel_lines['ind_var'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flux = [sel_lines['flux'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n err = [sel_lines['error'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flg = [sel_lines['flg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n frac_neg = [sel_lines['frac_neg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n snr = [sel_lines['snr'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n ln_c = [sel_lines['ln_c'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n\n # Maximum fraction of flux with negative values of all lines in index\n mfrac_neg = max(frac_neg)\n\n if \"negFlux\" in flg: flg_ind = 'negFlux'\n else: flg_ind = None\n\n # Median snr of index bandpasses:\n if snr is None or snr[0] is None:\n snr_ind = None\n else:\n snr_ind = np.median(snr)\n\n for k in range(len(var)):\n if 'L' not in var[k] and 'R' not in var[k]:\n msg=\"*** ERROR: 'ind_var' variable (in config file config_lines.txt) must start with either an 'L' for core line or 'R' for reference line. Value given was '{}'\".format(var[k])\n sys.exit(msg)\n\n # Add line variables for numerator or denominator:\n num = [ln_c[k]*flux[k] for k in range(len(var)) if 'L' in var[k]]\n num_err = [ln_c[k]*err[k] for k in range(len(var)) if 'L' in var[k]]\n denom = [ln_c[k]*flux[k] for k in range(len(var)) if 'R' in var[k]]\n denom_err = [ln_c[k]*err[k] for k in range(len(var)) if 'R' in var[k]]\n\n num = np.asarray(num)\n denom = np.asarray(denom)\n num_err = np.asarray(num_err)\n denom_err = np.asarray(denom_err)\n\n ind = sum(num) / sum(denom)\n\n # Error using propagation of errors for lines and ref lines\n ind_err = np.sqrt(sum(num_err**2) + ind**2 * sum(denom_err**2)) /sum(denom)\n\n if snr_ind: snr_ind = round(snr_ind, 2)\n\n index['index'].append(sel_ind[i])\n index['value'].append(ind)\n index['error'].append(ind_err)\n index['flg'].append(flg_ind)\n index['mfrac_neg'].append(mfrac_neg)\n index['snr'].append(snr_ind)\n\n print(\"{}\\t{:.4f}\\t{:.6f}\\t{}\\t{}\\t{:.4f}\".format(index['index'][i], index['value'][i], index['error'][i], index['snr'][i], index['flg'][i], index['mfrac_neg'][i]))\n\n return index", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def spectral_diff_matrix(n, xmin=0, xmax=2*np.pi):\n\n h = 2 * np.pi / n\n kk = np.arange(1, n)\n n1 = int(np.floor((n - 1) / 2))\n n2 = int(np.ceil((n - 1) / 2))\n if np.mod(n, 2) == 0:\n topc = 1 / np.tan(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, -np.flip(topc[0:n1])))\n else:\n topc = 1 / np.sin(np.arange(1, n2 + 1) * h / 2)\n temp = np.concatenate((topc, np.flip(topc[0:n1])))\n\n col1 = np.concatenate(([0], 0.5 * ((-1) ** kk) * temp))\n row1 = -col1\n D = 2 * np.pi / (xmax - xmin) * toeplitz(col1, r=row1)\n return D", "def integrate_spectrum(self):\n flux = sum(self.spectrum)\n return flux", "def sincbroad(w, s, hwhm):\n \"\"\"\n History\n -------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n 14-Nov-93 JAV\n Adapted from macbro.pro\n 23-Apr-93 JAV\n Verified that convolution kernel has specified hwhm. For IR FTS\n spectra: hwhm=0.0759 Angstroms, max change in profile is 0.4% of continuum.\n Oct-18 AW\n Python Version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n dw = (w[-1] - w[0]) / (nw - 1) # wavelength change per pixel\n\n # Make sinc function out to 20th zero-crossing on either side. Error due to\n # ignoring additional lobes is less than 0.2% of continuum. Reducing extent\n # to 10th zero-crossing doubles maximum error.\n fwhm = 2.0 * hwhm # full width at half maximum\n rperfw = 0.26525 # radians per fwhm of sinc\n xrange = 20 * np.pi # 20th zero of sinc (radians)\n wrange = xrange * fwhm * rperfw # 20th zero of sinc (wavelength)\n nhalf = int(wrange / dw + 0.999) ## points in half sinc\n nsinc = 2 * nhalf + 1 ## points in sinc (odd!)\n wsinc = (np.arange(nsinc, dtype=float) - nhalf) * dw # absissca (wavelength)\n xsinc = wsinc / (fwhm * rperfw) # absissca (radians)\n xsinc[nhalf] = 1.0 # avoid divide by zero\n sinc = np.sin(xsinc) / xsinc # calculate sinc\n sinc[nhalf] = 1.0 # insert midpoint\n xsinc[nhalf] = 0.0 # fix xsinc\n sinc = sinc / np.sum(sinc) # normalize sinc\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, sinc, mode=\"nearest\")\n\n return sout", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def stimulus_response_coherence(filename, segment_length):\n data, stimulus, sampling_interval, time = load_data(filename)\n nyquist = 1./(sampling_interval * 2.)\n f_step = 1./(sampling_interval * segment_length)\n f = np.arange(0, nyquist + f_step, f_step)\n noOfSamples = data.shape[0]\n noOfSegments = int(np.floor(noOfSamples/segment_length))\n kernel = gauss_kernel(0.001, 1./sampling_interval, 0.01)\n window = np.hanning(segment_length)\n coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n exp_coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n # we will need the psth for the expected coherence \n psth = np.zeros(data.shape[0])\n for i in range(data.shape[1]):\n psth = psth + np.convolve(data[:,i], kernel, mode='same') * (1./sampling_interval)\n psth = psth/data.shape[1]\n # go and calculate the spectra\n for i in range(data.shape[1]):\n trace = data[:,i]/sampling_interval\n trace = np.convolve(trace, kernel, mode=\"same\")\n f_resp = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_psth = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_stim = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n for n in range(noOfSegments):\n start\t= n * segment_length\n end \t= start + segment_length\n resp_segment = trace[start:end]\n resp_segment = resp_segment - np.mean(resp_segment)\n resp_segment = resp_segment * window\n psth_segment = psth[start:end]\n psth_segment = psth_segment - np.mean(psth_segment)\n psth_segment = psth_segment * window\n stim_segment = stimulus[start:end]\n stim_segment = stim_segment - np.mean(stim_segment)\n stim_segment = stim_segment * window\n \n f_resp[:, n] = np.fft.fft(resp_segment, segment_length)\n f_stim[:, n] = np.fft.fft(stim_segment, segment_length)\n f_psth[:, n] = np.fft.fft(psth_segment, segment_length)\n\n f_resp_conj = np.conjugate(f_resp) # complex conjugate spectrum of response segments\n f_stim_conj = np.conjugate(f_stim) # complex conjugate spectra of stimulus segments\n f_psth_conj = np.conjugate(f_psth) # complex conjugate spectra of psth segments\n\n sr_cross_spectrum = np.mean(f_stim_conj * f_resp, axis=1) # cross spectrum S*R\n ss_auto_spectrum = np.mean(f_stim_conj * f_stim, axis=1) # auto spectrum S*S\n\n rs_cross_spectrum = np.mean(f_resp_conj * f_stim, axis=1) # cross spectrum R*S\n rr_auto_spectrum = np.mean(f_resp_conj * f_resp, axis=1) # auto spectrum R*R\n \n pr_cross_spectrum = np.mean(f_psth_conj * f_resp, axis=1) # cross spectrum PSTH*R\n pp_auto_spectrum = np.mean(f_psth_conj * f_psth, axis=1) # auto spectrum PSTH*PSTH\n rp_cross_spectrum = np.mean(f_resp_conj * f_psth, axis=1) # cross spectrum R*PSTH\n \n coherence_spectra[:, i] = (sr_cross_spectrum * rs_cross_spectrum) / (ss_auto_spectrum * rr_auto_spectrum)\n exp_coherence_spectra[:, i] = (pr_cross_spectrum * rp_cross_spectrum) / (pp_auto_spectrum * rr_auto_spectrum)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(f, np.mean(coherence_spectra[:len(f),:], axis=1), color='dodgerblue', label=\"r-s coherence\")\n ax.plot(f, np.mean(exp_coherence_spectra[:len(f),:], axis=1), color='silver', label=\"r-r coherence\")\n ax.set_xlim([0, 300])\n ax.set_ylim([0, 1])\n ax.set_xlabel('frequency [Hz]')\n ax.set_ylabel('coherence')\n ax.legend(fontsize=9)\n plt.show()", "def calculate_index_and_derivative(wl):\n index = np.sqrt(1 + (0.6961663 * wl * wl) / (wl * wl - 0.0684043 * 0.0684043)\n + (0.4079426 * wl * wl) / (wl * wl - 0.1162414 * 0.1162414)\n + (0.8974794 * wl * wl) / (wl * wl - 9.896161 * 9.896161)\n )\n\n index_derivative = \\\n (\n - (1.79496 * wl * wl * wl) / (pow(-97.934 + wl * wl, 2))\n + (1.79496 * wl) / (-97.934 + wl * wl)\n\n - (0.815885 * wl * wl * wl) / (pow(-0.0135121 + wl * wl, 2))\n + (0.815885 * wl) / (-0.0135121 + wl * wl)\n\n - (1.39233 * wl * wl * wl) / (pow(-0.00467915 + wl * wl, 2))\n + (1.39233 * wl) / (-0.00467915 + wl * wl)\n ) \\\n / \\\n (2 * np.sqrt(\n 1\n + (0.897479 * wl * wl) / (-97.934 + wl * wl)\n + (0.407943 * wl * wl) / (-0.0135121 + wl * wl)\n + (0.696166 * wl * wl) / (-0.00467915 + wl * wl)\n )\n )\n return index, index_derivative", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def calcumul_index(path,x,name_champ_label,indice2,list_drop,pathlist_names_feature):\n sql=sqlite3.connect(path)\n df=pd.read_sql_query(\"SELECT * FROM output\", sql)\n df=df.groupby(\"originfid\").mean()\n if 'band' in df.columns[6] :\n globals()[\"df%s\"%x]=col_sqlite(path,x,list_drop,pathlist_names_feature)\n label = globals()[\"df%s\"%x][name_champ_label]\n globals()[\"%s\"%x]=globals()[\"df%s\"%x].astype(float)\n print(indice2)\n if indice2 not in ['NDVI', 'NDWI','SM','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = globals()[\"%s\"%x].filter(like=band1_indice)\n df_b2 = globals()[\"%s\"%x].filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = globals()[\"df%s\"%x].filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n df_indice_col=df_indice_col.iloc[:-1]\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_%s\"%indice2]=globals()[\"df_%s\"%indice2].astype(float)\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T \n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n else :\n label = df[name_champ_label]\n print(indice2)\n if indice2 not in ['ndvi', 'ndwi','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1','SM']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = df.filter(like=band1_indice)\n df_b2 = df.filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = df.filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T\n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n return globals()[\"df_mean_%s\"%indice2], globals()[\"df_%s\"%indice2]", "def freqdomain(self):\n \n\n #self.df = self.f[1] - self.f[0]\n #frequency vector\n #fv = fftshift(fftfreq(len(eta),1./fs))\n #fv = fv[len(fv)/2:]\n \n #spectral analysis\n self.sn1 = self.espec1(self.n1)\n self.sn2 = self.espec1(self.n2)\n self.sn3 = self.espec1(self.n3)\n self.sn12 = self.espec2(self.n1,self.n2)\n self.sn13 = self.espec2(self.n1,self.n3)\n self.sn23 = self.espec2(self.n2,self.n3)\n \n #delta freq\n self.df = self.f[3] - self.f[2]\n\n #calculo do numero de onda\n #self.wavenumber()\n #k = numeronda(h,f,len(f))\n #k = np.array(k)\n\n #calculo dos coeficientes de fourier - NDBC 96_01 e Steele (1992)\n c = self.sn2[:,1] + self.sn3[:,1]\n cc = np.sqrt(self.sn1[:,1] * (c))\n \n self.a1 = self.sn12[:,3] / cc\n self.b1 = self.sn13[:,3] / cc\n \n self.a2 = (self.sn2[:,1] - self.sn3[:,1]) / c\n self.b2 = 2 * self.sn12[:,2] / c\n \n #calcula direcao de onda\n #mean direction\n self.dire1 = np.array([np.angle(np.complex(self.b1[i],self.a1[i]),deg=True) for i in range(len(self.a1))])\n \n #principal direction\n self.dire2 = 0.5 * np.array([np.angle(np.complex(self.b2[i],self.a2[i]),deg=True) for i in range(len(self.a2))])\n \n #condicao para valores maiores que 360 e menores que 0\n self.dire1[np.where(self.dire1 < 0)] = self.dire1[np.where(self.dire1 < 0)] + 360\n self.dire1[np.where(self.dire1 > 360)] = self.dire1[np.where(self.dire1 > 360)] - 360\n self.dire2[np.where(self.dire2 < 0)] = self.dire2[np.where(self.dire2 < 0)] + 360\n self.dire2[np.where(self.dire2 > 360)] = self.dire2[np.where(self.dire2 > 360)] - 360\n \n #acha o indice da frequencia de pico\n ind = np.where(self.sn1[:,1] == np.max(self.sn1[:,1]))[0]\n \n #periodo de pico\n self.tp = (1. / self.f[ind])[0]\n \n #momento espectral de ordem zero total - m0\n self.m0 = np.sum(self.sn1[:,1]) * self.df\n \n #calculo da altura significativa\n self.hm0 = 4.01 * np.sqrt(self.m0)\n \n #direcao do periodo de pico\n self.dp = self.dire1[ind][0]\n \n #Espalhamento direcional\n #Formula do sigma1 do livro Tucker&Pitt(2001) \"Waves in Ocean Engineering\" pags 196-198\n c1 = np.sqrt(self.a1 ** 2 + self.b1 ** 2)\n c2 = np.sqrt(self.a2 ** 2 + self.b2 ** 2)\n \n s1 = c1 / (1-c1)\n s2 = (1 + 3 * c2 + np.sqrt(1 + 14 * c2 + c2 ** 2)) / (2 * (1 - c2))\n \n self.sigma1 = np.sqrt(2 - 2 * c1) * 180 / np.pi\n self.sigma2 = np.sqrt((1 - c2) / 2) * 180 / np.pi\n \n self.sigma1p = np.real(self.sigma1[ind])[0]\n self.sigma2p = np.real(self.sigma2[ind])[0]\n \n # pondaf = np.array([hm0, tp, dp, sigma1p, sigma2p])\n \n #hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2\n #return hm0, tp, dp, sigma1, sigma2, sigma1p, sigma2p, f, df, k, sn, snx, sny, snn, snnx, snny, snxny, snxnx, snyny, a1, b1, a2, b2, dire1, dire2", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def stitchSpectra(lamb_list,count_list, method=\"scale\", edgeremove=(0, 0), shiftToPositive=False, dlambda=None):\r\n rawData=np.array([np.array(lamb_list),np.array(count_list)])\r\n rawData=rawData.swapaxes(0,1)\r\n coefficients = []\r\n print(\"Removing edges for stitching:\", *edgeremove)\r\n omitRight = rawData[0].shape[1] - math.floor(rawData[0].shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", 0, omitRight)\r\n processed = np.array(rawData[0][:, 0:omitRight]) \r\n if dlambda is None:\r\n dlambda = math.fabs(processed[0, 1] - processed[0, 0]) ## lambda steps of first spectrum are kept\r\n for i, spec in enumerate(rawData[1:]):\r\n omitLeft = math.floor(spec.shape[1] * edgeremove[0])\r\n omitRight = spec.shape[1] - math.floor(spec.shape[1] * edgeremove[1])\r\n print(\"Stitching index range is \", omitLeft, omitRight)\r\n if i == len(rawData)-2:\r\n spec = np.array(spec[:, omitLeft:]) ## do not shorten last array at end\r\n else:\r\n spec = np.array(spec[:, omitLeft:omitRight]) # shorten middle arrays at both sides\r\n print(\"Stitching spectrum in range\", np.min(spec[0,]), np.max(spec[0,]))\r\n # calculate overlap\r\n overlap = (np.min(spec[0,]), np.max(processed[0,])) \r\n #lambdas = np.arange(*overlap, dlambda)\r\n #leftfun = interp1d(processed[0,], processed[1,])\r\n #rightfun = interp1d(spec[0,], spec[1,])\r\n left = np.mean(processed[1, processed[0,] > overlap[0]]) ##mean of counts of overlap\r\n right = np.mean(spec[1, spec[0,] < overlap[1]])\r\n if method == \"shift\":\r\n # calculate offset in overlap region\r\n offset = left - right\r\n print(\"Stitching offset %s in overlap\", offset, *overlap)\r\n # add shifted spectrum\r\n spec[1,] = spec[1,] + offset\r\n coefficients.append(offset)\r\n elif method == \"scale\":\r\n # calculate factor in overlap region\r\n factor = left/right\r\n print(\"Stitching factor\"+str(factor)+\" in overlap \", *overlap)\r\n spec[1,] = spec[1,] * factor\r\n coefficients.append(factor)\r\n processed = np.concatenate([processed, spec], axis=1)\r\n # interpolate data on grid\r\n interpolated = interp1d(processed[0,], processed[1,])\r\n lambdas = np.arange(processed[0, 0], processed[0, -1], dlambda)\r\n specdata = interpolated(lambdas)\r\n # shift above 0\r\n if shiftToPositive:\r\n minimum = np.min(specdata)\r\n if minimum < 0:\r\n specdata += math.fabs(minimum)\r\n \r\n return (lambdas,specdata,coefficients)", "def spectral_roll_on(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.05*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def refractive_index_glass(wavelength, type='soda-lime-low-iron'):\n\n if type.lower() == 'soda-lime-low-iron':\n wavelength = wavelength / 1000\n n = 1.5130 - 0.003169 * wavelength ** 2 + 0.003962 * wavelength ** -2 + 0 * 1j\n\n # n[wavelength < 0.3] = n[wavelength < 0.3] + 1j*0\n elif type.upper() == 'BK7':\n wavelength = wavelength / 1000\n n = np.sqrt(1 + \\\n (1.03961212 * wavelength ** 2) / (\n wavelength ** 2 - 0.00600069867) + \\\n (0.231792344 * wavelength ** 2) / (\n wavelength ** 2 - 0.0200179144) + \\\n (1.01046945 * wavelength ** 2) / (\n wavelength ** 2 - 103.560653)\n )\n\n return n", "def wvd(fx,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n fn=fn[0]\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa.reshape(fn)\r\n fb=fa.copy()\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1./df\r\n tau=(nh-1)/2\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')\r\n \r\n #create an empty array to put the tf in \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,nn in enumerate(tlst):\r\n #calculate the smallest timeshift possible\r\n taun=min(nn,tau,fn-nn-1)\r\n #make a timeshift array\r\n taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')\r\n #calculate rectangular windowed correlation function of analytic signal\r\n Rnn=4*np.conjugate(fa[nn-taulst])*fb[nn+taulst] \r\n #calculate fft of windowed correlation function\r\n FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))\r\n #put into tfarray\r\n tfarray[:,point]=FTRnn[::-1]\r\n \r\n #normalize\r\n tfarray=tfarray/nh\r\n \r\n return tfarray,tlst,flst", "def extract_mel_spectrogram(wav_path, X, y, index, curr_speaker_num, max_duration=None):\n Sxx = spectrogram_converter.mel_spectrogram(wav_path, max_duration)\n for i in range(Sxx.shape[0]):\n for j in range(Sxx.shape[1]):\n X[index, 0, i, j] = Sxx[i, j]\n y[index] = curr_speaker_num\n return 1", "def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range", "def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat", "def get_beat_sync_spectrums(audio):\n y, sr = core.load(audio, sr=44100)\n eql_y = EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr)\n np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return np.vstack([band1list, band2list, band3list])", "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def dseries(temp, wavelength):\n if wavelength < 300 or wavelength > 830:\n return 0\n mm=wavelength%10\n s=_dseriesd(temp, wavelength-mm)\n if mm==0:\n return s\n m=mm*0.1\n e=_dseriesd(temp, (wavelength-mm)+10)\n return s+(e-s)*m", "def snv(spectra):\n\n return (spectra - np.mean(spectra, axis=0)) / np.std(spectra, axis=0)", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def test_compute_spectral_norms(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, svd_method=ACCURATE_SVD)\n\n\t\t# SLOW method\n\t\ta = details.spectral_norm.to_numpy()\n\t\tself.assertAlmostEqual(a[0],20.2149, places=3)\n\t\tself.assertAlmostEqual(a[1],24.8158, places=3)\n\t\tself.assertAlmostEqual(a[2],19.3795, places=3)", "def sincint(x, nres, speclist) :\n\n dampfac = 3.25*nres/2.\n ksize = int(21*nres/2.)\n if ksize%2 == 0 : ksize +=1\n nhalf = ksize//2 \n\n #number of output and input pixels\n nx = len(x)\n nf = len(speclist[0][0])\n\n # integer and fractional pixel location of each output pixel\n ix = x.astype(int)\n fx = x-ix\n\n # outputs\n outlist=[]\n for spec in speclist :\n if spec[1] is None :\n outlist.append([np.full_like(x,0),None])\n else :\n outlist.append([np.full_like(x,0),np.full_like(x,0)])\n\n for i in range(len(x)) :\n xkernel = np.arange(ksize)-nhalf - fx[i]\n # in units of Nyquist\n xkernel /= (nres/2.)\n u1 = xkernel/dampfac\n u2 = np.pi*xkernel\n sinc = np.exp(-(u1**2)) * np.sin(u2) / u2\n sinc /= (nres/2.)\n\n lobe = np.arange(ksize) - nhalf + ix[i]\n vals = np.zeros(ksize)\n vars = np.zeros(ksize)\n gd = np.where( (lobe>=0) & (lobe<nf) )[0]\n\n for spec,out in zip(speclist,outlist) :\n vals = spec[0][lobe[gd]]\n out[0][i] = (sinc[gd]*vals).sum()\n if spec[1] is not None : \n var = spec[1][lobe[gd]]\n out[1][i] = (sinc[gd]**2*var).sum()\n\n for out in outlist :\n if out[1] is not None : out[1] = np.sqrt(out[1])\n \n return outlist", "def spectral_spread(sign, fs):\n f, ff = plotfft(sign, fs)\n spect_centr = spectral_centroid(sign, fs)\n if not np.sum(ff):\n return 0\n else:\n return np.dot(((f-spect_centr)**2), (ff / np.sum(ff)))", "def find_saturated(spectra, saturation_limit=90000):\n\n razlika = np.abs(\n np.diff(spectra, n=1, axis=-1,\n append=spectra[:,-2][:,None]))\n\n saturated_indices = np.unique(\n np.where(razlika > saturation_limit)[0])\n\n if len(saturated_indices)==0 and np.any(spectra==0):\n print(\"No saturated spectra is found;\\n\"\n \"Please make sure to apply this function before any scaling is done!\")\n else:\n return saturated_indices", "def test_best_result(origianl_waveform):\n origianl_waveform = origianl_waveform.flatten()\n recovery_waveform = []\n audio_length = len(origianl_waveform)\n noise = np.random.random_sample((audio_length,))\n noise_list = [x / 100 for x in noise]\n noise_count = 0\n \n for n in origianl_waveform:\n difference = n - noise_list[noise_count]\n recovery_waveform.append(difference)\n noise_count += 1\n \n return np.asarray(recovery_waveform)", "def getLatestSpectrumMeasurements(self): \n return self.spectrum[len(self.spectrum)-1]", "def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def _computeIndex(value, slices_nb):\n \n if doImg >= 0 or doStack or doVideo:\n return value + 1\n else:\n return value % (slices_nb+1)", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def get_beat_sync_chroma_and_spectrum(audio, sr=None, bpm=None):\n if not isinstance(audio, np.ndarray):\n sr = 44100\n y = std.MonoLoader(filename=audio, samplerate=44100)()\n else:\n y = audio\n eql_y = std.EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr, tempo=bpm)\n if framed_dbn.shape[0] % 4 == 0:\n framed_dbn = np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n chromas = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n stft = abs(core.stft(y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n chroma = np.mean(feature.chroma_stft(y=None, S=stft ** 2), axis=1)\n chromas.append(chroma)\n chromas = np.array(chromas).transpose()\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return (chromas, np.vstack([band1list, band2list, band3list]))", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def spectral_roll_off(sign, fs):\n output = 0\n f, ff = plotfft(sign, fs)\n cum_ff = np.cumsum(ff)\n value = 0.95*(sum(ff))\n\n for i in range(len(ff)):\n if cum_ff[i] >= value:\n output = f[i]\n break\n return output", "def convolution_spectrum(self, spectrum):\n ret = []\n for i in range(len(spectrum)):\n for j in range(i+1, len(spectrum)):\n diff = abs(spectrum[j] - spectrum[i])\n if diff > 0:\n ret.append(diff)\n return ret" ]
[ "0.6286422", "0.61412746", "0.59083164", "0.58922887", "0.58213574", "0.580119", "0.57615376", "0.5752075", "0.5741933", "0.5708254", "0.56879544", "0.56858265", "0.56429803", "0.56224555", "0.55869263", "0.558585", "0.55347013", "0.5534027", "0.5532789", "0.553044", "0.5509682", "0.54849166", "0.5479497", "0.5450436", "0.54399824", "0.54090726", "0.54040915", "0.5393944", "0.5392326", "0.53905207", "0.5389336", "0.5374662", "0.5356195", "0.53420055", "0.5336706", "0.5334264", "0.53312767", "0.5330154", "0.532754", "0.53169596", "0.5306396", "0.530615", "0.53027904", "0.5293472", "0.5291168", "0.5286285", "0.52819", "0.5280592", "0.5280001", "0.5274774", "0.5272265", "0.5263673", "0.5254975", "0.5245152", "0.523513", "0.5234781", "0.5230276", "0.5215282", "0.5213723", "0.52019244", "0.5197112", "0.5194411", "0.5192427", "0.519164", "0.5179992", "0.5178925", "0.5176036", "0.51756096", "0.517552", "0.5171179", "0.5166254", "0.51661646", "0.51650167", "0.5164955", "0.51635325", "0.51585054", "0.5155229", "0.5154629", "0.5152678", "0.51459175", "0.51424426", "0.5137525", "0.5135292", "0.5134138", "0.5133596", "0.5133495", "0.51190275", "0.5117601", "0.5107698", "0.5103457", "0.5097569", "0.5095512", "0.5094781", "0.5086614", "0.50845593", "0.50820655", "0.50741315", "0.5072907", "0.5072907", "0.50726074", "0.5069764" ]
0.0
-1
cut out and normalize flux around a line
def continuum_normalized_region_around_line(cls, wi, fi, blue, red, band=None, degree=1): w = np.asarray(wi) flux = np.atleast_2d(fi) # index is true in the region where we fit the polynomial indcont = (((w >= blue[0]) & (w <= blue[1])) | ((w >= red[0]) & (w <= red[1])) ) # index of the region we want to return if band is None: band = blue[0], red[1] indrange = (w > band[0]) & (w < band[1]) wnew = w[indrange] wcont = w[indcont] # make a flux array of shape # (number of spectra, number of points in indrange) f = np.zeros((flux.shape[0], indrange.sum())) for i in range(flux.shape[0]): # fit polynomial of second order to the continuum region linecoeff = np.polyfit(wcont, flux[i, indcont], degree) # divide the flux by the polynomial and put the result in our new flux # array f[i, :] = flux[i, indrange] / np.polyval(linecoeff, wnew) return wnew, np.squeeze(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def line_lum(line_flux, dist): \n line_lum = 4 * pi * (dist*u.pc)**2 * line_flux * u.erg / (u.s * (u.cm)**2)\n line_lum = line_lum.decompose().to(u.W)\n return line_lum/u.W", "def normalize_vector(line):\n if isinstance(line, pd.DataFrame):\n line = line.values\n try:\n n = np.sqrt(line[:, 0]**2 + line[:, 1]**2).reshape(-1, 1)\n except:\n n = np.sqrt(line[0]**2 + line[1]**2)\n line = line / n\n return line", "def emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100., nsig=4):\n res = self.resample_func(self.spec_wobs,\n self.spec_R_fwhm*scale_disp, \n self.xline*line_um,\n self.yline,\n velocity_sigma=velocity_sigma,\n nsig=nsig)\n \n return res*line_flux/line_um", "def _remove_flux_extinction(self):\n self.fluxUnred = self.flux.copy()\n self.fluxErrUnred = self.fluxErr.copy()\n self.fluxRenorm = self.flux.copy()\n self.fluxErrRenorm = self.fluxErr.copy()\n\n # Using negative a_v so that extinction.apply works in reverse and removes the extinction\n if self.mwebv:\n extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \\\n a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')\n\n for i, pb in enumerate(self._good_filters):\n mask = (self.passband == pb)\n\n flux_pb = self.flux[mask]\n fluxerr_pb = self.fluxErr[mask]\n npbobs = len(flux_pb)\n\n if npbobs < 1:\n return\n\n if self.mwebv:\n flux_out = extinction.apply(extinctions[i], flux_pb, inplace=False)\n fluxerr_out = extinction.apply(extinctions[i], fluxerr_pb, inplace=False)\n else:\n flux_out = flux_pb\n fluxerr_out = fluxerr_pb\n self.fluxUnred[mask] = flux_out\n self.fluxErrUnred[mask] = fluxerr_out\n\n if npbobs > 1:\n # there's at least enough observations to find minimum and maximum\n minfluxpb = flux_out.min()\n maxfluxpb = flux_out.max()\n norm = maxfluxpb - minfluxpb\n self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm\n self.fluxErrRenorm[mask] = fluxerr_out / norm\n elif npbobs == 1:\n # deal with the case with one observation in this passband by setting renorm = 0.5\n norm = self.fluxUnred[mask] / 0.5\n self.fluxRenorm[mask] /= norm\n self.fluxErrRenorm[mask] /= norm\n\n self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \\\n 'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']\n return", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize_flux(self):\n fmax = 0\n fmin = 1e99\n for n in self.graph:\n if n.flux > fmax:\n fmax = n.flux\n if n.flux < fmin:\n fmin = n.flux\n for n in self.graph:\n n.flux = (n.flux-fmin)/(fmax-fmin)", "def normalize(wav, flux):\n return flux / flux.max() # maximum flux = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm", "def normalise(self):\n\n # Find extrema\n xmin = self.segments[0].lower_bound\n xmax = self.segments[0].upper_bound\n for seg in self.segments[1:]:\n xmin = min(xmin, seg.lower_bound)\n xmax = max(xmax, seg.upper_bound)\n\n range = xmax-xmin \n\n # Normalise\n for seg in self.segments:\n seg.lower_bound = (seg.lower_bound-xmin)/range\n seg.upper_bound = (seg.upper_bound-xmin)/range\n\n # Return\n return xmin, xmax", "def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado", "def FilterLine(self, a_line):\n return a_line", "def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return", "def line_flux_simple(x, y, x1, x2, y0=None):\n # Only want the data within the integration limits\n idx = (x1 <= x) & (x <= x2)\n x, y = x[idx], y[idx]\n\n # Rough continuum subtraction\n y = y if y0 is None else y - y0\n\n dx = x[1:] - x[:-1]\n dy = y[1:] - y[:-1]\n area = np.sum(dx*y[:-1] + 0.5*dx*dy)\n\n return area", "def normaliseTracesMagnitude(traces, unfolded_level): \n return traces / np.abs(unfolded_level)", "def cut_line(self):\r\n self.parachute.pop(0)", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normbands(line, E_fermi, kpoints_norm):\n\tls = line.split()\n\tif len(ls) == 2:\n\t\tx = float(ls[0])/kpoints_norm\n\t\ty = float(ls[1])-E_fermi\n\t\tline = \" {:.4f} {: 8.4f}\\n\".format(x,y) # :P\n\treturn line", "def normalizeObservedData(line, id_target, x_target, y_target, r_target, dbh=True):\n\n treeID = line[id_target]\n x = line[x_target]\n y = line[y_target]\n if dbh == True:\n r = line[r_target]/2.\n else:\n r = line[r_target]\n \n return treeID, x, y, r", "def decomposing_line_cut_by_splicing(P, v, w):\n\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_cut_as_line: W not on chain\")\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\tif distance_to_w >= chain.length or distance_to_w == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_v)\n\n\t\tp_l = left_chain.coords[:]\n\t\tp_r = right_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\tif distance_to_v >= chain.length or distance_to_v == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_w)\n\n\t\tp_l = right_chain.coords[:]\n\t\tp_r = left_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\n\tif distance_to_w > distance_to_v:\n\n\t\tleft_v_cut, right_v_cut = cut_linestring(chain, distance_to_v)\n\n\t\tdistance_to_w = right_v_cut.project(w_Point)\n\t\tleft_w_chain, right_w_chain = cut_linestring(right_v_cut, distance_to_w)\n\n\t\tp_l = left_v_cut.coords[:]+right_w_chain.coords[:-1]\n\t\tp_r = left_w_chain.coords[:]\n\n\t\treturn p_l, p_r\n\n\telse:\n\n\t\tleft_w_cut, right_w_cut = cut_linestring(chain, distance_to_w)\n\n\t\tdistance_to_v = right_w_cut.project(v_Point)\n\t\tleft_v_chain, right_v_chain = cut_linestring(right_w_cut, distance_to_v)\n\n\t\tp_l = left_w_cut.coords[:]+right_v_chain.coords[:-1]\n\t\tp_r = left_v_chain.coords[:]\n\n\t\treturn p_l, p_r", "def normalise(image):", "def _normalize(self, offset=0):\n if self.t.min() < offset:\n self.t += offset - abs(self.t.min())", "def trim_line(x1, y1, x2, y2, a, b):\n\tm = (y2 - y1)/(x2 - x1)\n\n\tif x1 < a:\n\t\ty1 += m * (a - x1)\n\n\tif x2 > b: \n\t\ty2 += m * (b - x2)\n\n\treturn x1, y1, x2, y2", "def surface_runoff_flux(runoff, drain):\n return runoff - drain", "def normalize_scl(self,x):\n max_val = np.max(x['data'][0])\n last_val = x['data'][0][-1]\n return last_val/max_val", "def line_moved(self):\n\n # The line is supposed to be moved by hand to the beginning of first wrinkle.\n # The optimal spot is local maximum (not always visible)\n ext_index = self.index_of_drop + int(self.line.value() * 10000)\n ext_value = self.data[ext_index]\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n\n n = toolbox_2.particle_count_2(ext_value)\n\n # measurement series 1\n if self.selected_data == 3 and 7 <= self.meas_selected_number <= 17 and self.meas_selected_series == 1:\n index = self.meas_selected_number - 7 # Assumes that first measurement is number 7\n self.smallest_particles[index] = smallest_growing_particle\n self.number_counts[index] = n\n\n self.update_distribution()\n # Update plot\n self.curve_distribution.setData(self.particle_distribution_x, self.particle_distribution_y*1e-10)\n self.curve_distribution_cumulative.setData(self.smallest_particles, self.number_counts*1e-10)\n\n # measurement series 2\n elif self.selected_data == 3 and self.meas_selected_series == 2:\n index = self.meas_selected_number - 1 # begins from 1, 0th measurement is just copy of 8th\n self.number_counts_2[index] = n\n\n self.curve_rotatometer.setData(np.array([4, 6, 8, 10, 12, 14, 16, 18]), self.number_counts_2*1e-10)\n x = np.linspace(3.5, 20, 100)\n self.curve_rotatometer_fit.setData(x, self.number_counts_2[0] * 4 * (1 / x) *1e-10)\n\n #print(\"N\", \"%.2e\"%n, \"dpres\", round(p_i - p_f))", "def Apply_Line_Filter( self ):\r\n self.system.Filter_By_Protein_Distance( self.filter_distance )", "def general(self):\n return -self.line[[0, 2]] / self.line[1]", "def _remove_baseline(x, axis=None):\n x -= np.mean(x, axis=axis, keepdims=True)\n return x", "def filter(self):\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_", "def normal(self, t=0):\n p = self.lerp(t)\n if self.da < 0:\n return Line(p, self.c - p)\n else:\n return Line(p, p - self.c)", "def bin_normalize_moving(self, x):\n return _bin_normalize(x, self.mmin, self.mdelta)", "def fast_emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100.):\n res = self.sample_line_func(self.spec_wobs,\n self.spec_R_fwhm*scale_disp, \n line_um,\n line_flux=line_flux,\n velocity_sigma=velocity_sigma, \n )\n return res", "def normalize(self, factor):", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]", "def _normalize_point(self, p):\n segment_right_index = 1\n while p.x > self._hull_points[segment_right_index].x and segment_right_index < len(self._hull_points) - 1:\n segment_right_index += 1\n p1, p2 = self._hull_points[segment_right_index - 1], self._hull_points[segment_right_index]\n k = (p2.y - p1.y) / (p2.x - p1.x)\n b = p1.y - k * p1.x\n return Point(p.x, p.y / (k * p.x + b))", "def normalise_to_magnitude(self, magnitude, band):\n\n from ..photometry import mag2flux\n\n mag_flux = mag2flux(magnitude, band)\n spec_flux = self.calculate_flux(band)\n norm = mag_flux / spec_flux\n self.flux *= norm", "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def normalise(self):\n for at in self.atoms:\n if at.x < 0. :\n at.x = self.coordx + at.x\n if at.y < 0. :\n at.y = self.coordy + at.y\n if at.z < 0. :\n at.z = self.coordz + at.z", "def normalize_mirage(wn, data, breaks=[(922,926), (1202,1206), (1448,1452)],\n endpoints=6, slopefactor=.5):\n data = data.copy()\n if not len(data):\n return data, None\n flipwn = wn[0] > wn[-1]\n if flipwn:\n data = data[:, ::-1]\n wn = wn[::-1]\n breaks = find_wn_ranges(wn, np.array(breaks))\n cuts = np.concatenate(([0], breaks.flatten(), [len(wn)])).reshape((-1, 2))\n\n # Straight baseline of each segment. shape=(ncuts, 2, nspectra)\n scale = np.zeros(cuts.shape + (len(data),))\n # Overall level in each segment\n dsum = np.zeros(len(cuts))\n slopes = np.zeros(cuts.shape)\n cutpoints = np.zeros(cuts.shape)\n\n def linreg(x, y):\n xm = x.mean()\n ym = y.mean(1)\n rx = x - xm\n s = (rx * (y.T - ym).T).sum(1) / (rx * rx).sum()\n return xm, ym, s\n\n for i in range(len(cuts)):\n cb = cuts[i][0]\n ce = cuts[i][1]\n cwidth = min(endpoints, (ce - cb) // 2)\n\n wb = linreg(wn[cb:cb+cwidth], np.abs(data[:, cb:cb+cwidth]))\n we = linreg(wn[ce-cwidth:ce], np.abs(data[:, ce-cwidth:ce]))\n\n cutpoints[i, :] = [wb[0], we[0]]\n # sc = np.maximum([wb[1], we[1]], 1)\n sc = [wb[1], we[1]]\n scale[i,:,:] = sc\n slopes[i, :] = np.array([wb[2], we[2]]).mean(1)\n # need to handle negative values here!\n # dsum[i] = np.abs(data[:, cb:ce]).sum()\n dsum[i] = np.maximum(data[:, cb:ce], 0).sum()\n # Mean level of all spectra in each of the cut points\n means = scale.mean(2)\n # Make the mean levels identical on both sides of the cuts\n for i in range(len(means)-1):\n mm = min(means[i][1], means[i+1][0])\n ds = (slopes[i+1, 0] + slopes[i, 1]) / mm * (\n wn[cuts[i+1, 0]] - wn[cuts[i, 1]]) * slopefactor\n ds = max(-.5, min(.5, ds))\n means[i][1] = mm * (1 - ds)\n means[i+1][0] = mm * (1 + ds)\n # print('means', means)\n scale = (scale.T / means.T).T\n weights = dsum / dsum.mean()\n scale = scale / ((scale.min(1).T * weights).mean(1))\n # scale = scale / ((scale.mean(1).T * weights).mean(1))\n\n for i in range(len(cuts)):\n cb = cuts[i][0]\n ce = cuts[i][1]\n data[:, cb:ce] /= np.linspace(scale[i][0], scale[i][1], ce-cb, axis=-1)\n if i:\n pce = cuts[i-1][1]\n data[:, pce:cb] = np.linspace(data[:, pce-1], data[:, cb], cb-pce+1,\n endpoint=False, axis=-1)[:,1:]\n scale = scale.reshape((-1, len(data))).T\n if flipwn:\n data = data[:,::-1]\n return data, scale", "def normalize(image, ww=1500, wl=-400):\n\n low, high = wl - ww / 2, wl + ww / 2\n image = np.clip(image, low, high)\n image = (image - low) / ww\n return image", "def _to_reduced_space(self, value: np.ndarray) -> np.ndarray:\n sigma = self.sigma.value\n if self.bound_transform is not None:\n value = self.bound_transform.backward(value)\n distribval = value if self.exponent is None else np.log(value) / np.log(self.exponent)\n reduced = distribval / sigma\n return reduced.ravel() # type: ignore", "def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))", "def normalized(first):\n if isinstance(first,FreeCAD.Vector):\n l=length(first)\n return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)", "def _normalize_measure(value, maximum=1.0, center=0.0):\n if isiterable(value):\n value = np.asarray(value)\n if isiterable(center):\n center = np.asarray(center)\n if isiterable(maximum):\n maximum = np.asarray(maximum)\n return np.divide(value - center, maximum - center)", "def denormalize(self, x):\n raise NotImplementedError", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado", "def remove_point(x): # 'lin' 'log'\n dif = np.diff(x) / x[1:]\n idx = np.argmin(dif)\n xn = np.delete(x, idx + 1)\n if idx+2 == len(x):\n logger.debug(' Remove right point {} {:.8e} [dx= {:.8e}] '.format(idx+1, x[idx + 1], dif[idx]))\n else:\n logger.debug(' Remove point {} {:.8e} [dx= {:.8e}] next: {:.8e}'.format(idx+1, x[idx + 1], dif[idx], x[idx + 2])) \n return xn", "def vanishing_line(n, focal):\n return (n[0], n[1], n[2] * focal)", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def update(self, line, autoscale=False):\n # compute knl as function of s\n values = {p: np.zeros(self.S.size) for p in self.on_y_unique}\n orders = {p: order(p) for p in self.on_y_unique}\n Smax = line.get_length()\n for name, el, s0, s1 in iter_elements(line):\n if hasattr(el, \"knl\"):\n if 0 <= s0 <= Smax:\n mask = (self.S >= s0) & (self.S < s1)\n else:\n # handle wrap around\n mask = (self.S >= s0 % Smax) | (self.S < s1 % Smax)\n for knl, n in orders.items():\n if n <= el.order:\n values[knl][mask] += el.knl[n]\n\n # plot\n s = self.factor_for(\"s\")\n changed = []\n for i, ppp in enumerate(self.on_y):\n for j, pp in enumerate(ppp):\n for k, p in enumerate(pp):\n art = self.artists[i][j][k]\n y = self.factor_for(p) * values[p]\n if self.filled:\n art.get_paths()[0].vertices[1 : 1 + y.size, 1] = y\n else:\n art.set_data((s * self.S, y))\n changed.append(art)\n\n if autoscale:\n ax = self.axis(i, j)\n if self.filled: # At present, relim does not support collection instances.\n ax.update_datalim(\n mpl.transforms.Bbox.union(\n [a.get_datalim(ax.transData) for a in self.artists[i][j]]\n )\n )\n else:\n ax.relim()\n ax.autoscale()\n ax.set(xlim=(s * np.min(self.S), s * np.max(self.S)))\n\n return changed", "def normalized(self):\n d = self.magnitude()\n if d:\n return type(self)(self.x / d, self.y / d)\n return self.copy()", "def uniformize(self):\n\n self.len = len(self.x)\n\n if self.len > 1:\n # comput length of the shape:\n shape_length, scale = self.euclidian_length()\n\n # find new points:\n new_shape = Stroke()\n new_shape.x = []\n new_shape.y = []\n step = shape_length / float(self.len)\n biggest_smoller_point = 0\n new_shape.append(self.x[0], self.y[0])\n for i in 1 + np.array(range(len(self.x) - 1)):\n try:\n while i * step > scale[biggest_smoller_point]:\n biggest_smoller_point += 1\n\n biggest_smoller_point -= 1\n x0 = self.x[biggest_smoller_point]\n y0 = self.y[biggest_smoller_point]\n x1 = self.x[biggest_smoller_point + 1]\n y1 = self.y[biggest_smoller_point + 1]\n diff = float(i * step - scale[biggest_smoller_point])\n dist = float(scale[biggest_smoller_point + 1] - scale[biggest_smoller_point])\n new_x = x0 + diff * (x1 - x0) / dist\n new_y = y0 + diff * (y1 - y0) / dist\n new_shape.append(new_x, new_y)\n\n except IndexError:\n print i * step\n print biggest_smoller_point\n print scale\n # new_shape.append(self.x[-1], self.y[-1])\n\n\n self.x = new_shape.x\n self.y = new_shape.y\n self.len = new_shape.len", "def fix_straight_lines(self):\r\n\r\n # Creates a vertical 1x5 kernel and applies binary closing based on that kernel\r\n vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, vertical_kernel, iterations=9)\r\n\r\n # Creates a horizontal 5x1 kernel and applies binary closing based on that kernel\r\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, horizontal_kernel, iterations=4)", "def detrend(x):\n\n t = x['t']\n f = x['f']\n t0 = np.mean(x['t'])\n time_since_transit = t - t0\n\n # select out just the continuum points\n continuum = x['continuum']==1\n\n pfit = np.polyfit(\n time_since_transit[continuum], f[continuum], poly_degree\n )\n\n fldt = f.copy()\n fldt -= np.polyval(pfit,time_since_transit)\n return fldt", "def msub(trace):\n \n return(trace - np.mean(trace))", "def normalized(self) -> 'NormalizedFXParam':\r\n ...", "def trend_reduce(sample_data):\n data = np.asarray(sample_data)*1e9\n width = 400\n slope = 0.070\n dataLength = len(data)\n Time = np.arange(0, dataLength)\n\n r1 = np.arange(dataLength, 0, -1)*slope\n r2 = -1*(slope*width/np.pi)*np.sin(np.arange(0,width+1)*(np.pi/width))\n r3 = np.arange(0, dataLength)*slope\n CompareLong = np.array(r1)\n CompareLong = np.append(CompareLong, r2)\n CompareLong = np.append(CompareLong, r3)\n\n upperBoundary = np.amax(data)+0*data\n Compare = []\n for i in range(0,len(CompareLong)-dataLength-1):\n Compare = CompareLong[i:-1]\n Compare = Compare[0:dataLength]\n Compare = Compare + np.amax(data-Compare)\n upperBoundary = np.minimum(upperBoundary, Compare)\n\n FindUpperPoints = abs(upperBoundary - data)<(slope/4)\n upperLine = interp1d(Time[np.where(FindUpperPoints)], upperBoundary[np.where(FindUpperPoints)], kind='quadratic', fill_value='extrapolate')\n \n \n\n lowerBoundary = np.amin(data)+0*data\n Compare = []\n for i in range(0,len(CompareLong)-dataLength-1):\n Compare = -CompareLong[i:-1]\n Compare = Compare[0:dataLength]\n Compare = Compare - np.amax(Compare-data)\n lowerBoundary = np.maximum(lowerBoundary, Compare)\n\n FindLowerPoints = abs(lowerBoundary - data)<(slope/4)\n\n lowerLine = interp1d(Time[np.where(FindLowerPoints)], lowerBoundary[np.where(FindLowerPoints)], kind='quadratic', fill_value='extrapolate')\n\n middleLine = (upperLine(Time)+lowerLine(Time))/2\n middle = (upperBoundary+lowerBoundary)/2\n\n '''\n fig_t0 = plt.figure()\n plt_t0 = fig_t0.add_subplot(111)\n plt_t0.plot(Time, data)\n plt_t0.scatter(Time[np.where(FindUpperPoints)], upperBoundary[np.where(FindUpperPoints)], c='r')\n plt_t0.plot(Time, upperLine(Time))\n plt_t0.plot(Time, lowerBoundary, c='r')\n plt_t0.plot(Time, upperBoundary, c='r')\n plt_t0.scatter(Time[np.where(FindLowerPoints)], lowerBoundary[np.where(FindLowerPoints)], c='r')\n plt_t0.plot(Time, lowerLine(Time))\n plt_t0.plot(Time, middleLine)\n plt_t0.plot(Time, middle)\n\n plt.close()\n '''\n return (sample_data - middle/1e9)", "def normalize(self,x,xmin,xmax):\n return (x-xmin)/(xmax-xmin)", "def normalise(self, spectrum):\n\n return spectrum", "def normalise(self, spectrum):\n\n return spectrum", "def continuum_normalized_region_around_line(cls, wi, fi, blue, red,\n band=None, degree=1):\n w = np.asarray(wi)\n flux = np.atleast_2d(fi)\n # index is true in the region where we fit the polynomial\n indcont = (((w >= blue[0]) & (w <= blue[1])) |\n ((w >= red[0]) & (w <= red[1]))\n )\n # index of the region we want to return\n if band is None:\n band = blue[0], red[1]\n\n indrange = (w > band[0]) & (w < band[1])\n wnew = w[indrange]\n wcont = w[indcont]\n\n # make a flux array of shape\n # (number of spectra, number of points in indrange)\n f = np.zeros((flux.shape[0], indrange.sum()))\n for i in range(flux.shape[0]):\n # fit polynomial of second order to the continuum region\n linecoeff = np.polyfit(wcont, flux[i, indcont], degree)\n # divide the flux by the polynomial and put the result in our new\n # flux array\n f[i, :] = flux[i, indrange] / np.polyval(linecoeff, wnew)\n return wnew, np.squeeze(f)", "def _normalize_detectors(self, data, index):\n vol = self.cell_volumes(index)\n for i, s in enumerate(data):\n data[i] = data[i] / vol\n full_index = list(range(len(self.axes)))\n other_axes = [item for item in full_index if item not in index]\n for i in other_axes:\n v = self.axes[i].total_volume\n data /= v\n return data", "def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def get_flux(self):\n # iterate through points from top to bottom\n points = sorted(self.graph,key=lambda n: -n.z)\n \n for n in points:\n if n.z >= self.sealevel:\n n.flux += 1\n zmin = n.z\n zmin2 = 1e99\n min_n = None\n for n2 in self.graph[n]:\n if n2.z < zmin:\n zmin = n2.z\n min_n = n2\n if n2.z < zmin2:\n zmin2 = n2.z\n if min_n != None:\n min_n.flux += n.flux\n else:\n n.flux = 0\n n.geo = 'lake'\n n.z = zmin2 + 1e-3\n self.normalize_flux()", "def strip(self, value):\n return value*self._scale + self._offset", "def splitLine(line):\n # Find a point where our line changes direction\n l = np.copy(line)\n change = l[2:] - l[:-2]\n # Create breaks where derivative equals 0\n break_indicies = np.unique(np.where(change == 0)[0])\n line_segments = []\n while break_indicies.size > 0:\n i = break_indicies[0]\n\n # Add the beginning of the line to our list\n new_line= l[0:i+1]\n line_segments.append(new_line)\n\n # The rest of the line becomes our new line\n l = l[i+1:]\n\n # Recalculate the change vector\n change = l[2:] - l[:-2]\n break_indicies = np.unique(np.where(change == 0)[0])\n\n line_segments.append(l)\n return line_segments", "def normalize_data(mjd, flux, ignore):\n\n t = mjd - np.min(mjd)\n y_err = np.sqrt(flux) \n y = flux[t > ignore]\n y_err = y_err[t > ignore]\n t = t[t > ignore]\n y_err /= np.max(y)\n y /= np.max(y)\n\n sorted_tups = sorted(list(zip(t, y, y_err)), key=lambda x: x[0])\n t = np.array([tup[0] for tup in sorted_tups])\n y = np.array([tup[1] for tup in sorted_tups])\n y_err = np.array([tup[2] for tup in sorted_tups])\n\n return t, y, y_err", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def normdos(line, E_fermi):\n\tls = line.split()\n\tif len(ls) == 3:\n\t\tls[0] = float(ls[0])-E_fermi\n\t\tline = \" {: 7.3f} {} {}\\n\".format(ls[0], ls[1], ls[2])\n\treturn line", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def sized_normal(self, t, size):\n p = self.lerp(t)\n if self.da < 0:\n v = self.c - p\n else:\n v = p - self.c\n return Line(p, size * v.normalized())", "def detrend(x, nmed):\n ntap = int(np.ceil(nmed / 2))\n xf = np.r_[np.zeros(ntap) + x[0], x, np.zeros(ntap) + x[-1]]\n\n import scipy.signal\n\n xf = scipy.signal.medfilt(xf, nmed)[ntap:-ntap]\n return x - xf", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge", "def _preprocess_state(self, state):\r\n if np.isinf(self.observation_space.low).any() or np.isinf(self.observation_space.high).any(): return state\r\n return (state - self.observation_space.low) /(self.observation_space.high)", "def remove_line(self, origin):\n current_tile = self.board[origin[0]][origin[1]]\n\n if current_tile.is_dot:\n temp = current_tile.next\n current_tile.next = None\n current_tile = temp\n\n # Remove color of all non dot tiles in line.\n while current_tile and current_tile.color and not current_tile.is_dot:\n temp = current_tile.next\n current_tile.color = None\n current_tile.next = None\n current_tile = temp", "def normalize_emission(self):\n self._e /= self._e.sum(0)", "def dehom(x):\n return x[..., :-1] / x[..., -1:]", "def preprocess_frame(self, frame):\n # Greyscale frame\n img = np.mean(frame,-1)\n\n # Remove black bar at the bottom\n cropped_img = img[:-12, :]\n\n # Normalize Pixel Values\n normalized_frame = cropped_img/255.0\n\n return normalized_frame", "def RecursiveLowPassFast(signal, coeff, self):\n # Creates running mean value of the input\n ml = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], signal) \n # Plot Running threshold value at the current plot\n self.p1.plot(self.t, ml, pen=pg.mkPen(color=(246, 178, 255), width=3))\n\n # Creates running square deviation from the mean\n vl = scipy.signal.lfilter([1 - coeff['a'], 0], [1, -coeff['a']], np.square(signal - ml))\n # Creates \"threshold line\". If current value < sl[i] -> i belongs to event. \n sl = ml - coeff['S'] * np.sqrt(vl)\n self.p1.plot(self.t, sl, pen=pg.mkPen(color=(173, 27, 183), width=3))\n # Finds the length of the initial signal\n Ni = len(signal)\n # Finds those points where signal less than \"threshold line\"\n points = np.array(np.where(signal<=sl)[0])\n to_pop=np.array([]) # Empty supplementary array for finding adjacent points \n # For loop for finding adjacent points \n for i in range(1,len(points)):\n if points[i] - points[i - 1] == 1:\n to_pop=np.append(to_pop, i)\n # Points contain only border points of events\n points = np.delete(points, to_pop)\n # Empty list for Event location storage\n RoughEventLocations = []\n NumberOfEvents=0 #Number of events\n\n # For Loop for finding separating edges of different events and satisfying Event length limits\n for i in points:\n if NumberOfEvents is not 0:\n if i >= RoughEventLocations[NumberOfEvents-1][0] and i <= RoughEventLocations[NumberOfEvents-1][1]:\n continue\n NumberOfEvents += 1\n start = i\n El = ml[i] - coeff['E'] * np.sqrt(vl[i])\n Mm = ml[i]\n Vv = vl[i]\n duration = 0\n while signal[i + 1] < El and i < (Ni - 2) and duration < coeff['eventlengthLimit']:\n duration += 1\n i += 1\n if duration >= coeff['eventlengthLimit'] or i > (Ni - 10):\n NumberOfEvents -= 1\n else:\n k = start\n while signal[k] < Mm and k > 1:\n k -= 1\n start = k - 1\n k2 = i + 1\n while signal[k2] > Mm:\n k2 -= 1\n endp = k2\n if start<0:\n start=0\n RoughEventLocations.append((start, endp, ml[start], vl[start]))\n\n return np.array(RoughEventLocations)", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def forward(self) -> Vec:\n return (self.emitters[0][1] - self.emitters[0][0]).norm()", "def simplify_line_vw(points, small_area=100):\r\n while len(points) > 3:\r\n \r\n # For each coordinate that forms the apex of a two-segment\r\n # triangle, find the area of that triangle and put it into a list\r\n # along with the index, ordered from smallest to largest.\r\n \r\n popped, preserved = set(), set()\r\n \r\n triples = zip(points[:-2], points[1:-1], points[2:])\r\n triangles = [Polygon((p1, p2, p3)) for (p1, p2, p3) in triples]\r\n areas = [(triangle.area, index) for (index, triangle) in enumerate(triangles)]\r\n \r\n # Reduce any segments that makes a triangle whose area is below\r\n # the minimum threshold, starting with the smallest and working up.\r\n # Mark segments to be preserved until the next iteration.\r\n\r\n for (area, index) in sorted(areas):\r\n if area > small_area:\r\n # nothing more can be removed on this iteration\r\n break\r\n \r\n if (index + 1) in preserved:\r\n # current index is too close to a previously-preserved one\r\n continue\r\n \r\n preserved.add(index)\r\n popped.add(index + 1)\r\n preserved.add(index + 2)\r\n \r\n if not popped:\r\n # nothing was removed so we are done\r\n break\r\n \r\n # reduce the line, then try again\r\n points = [point for (index, point) in enumerate(points) if index not in popped]\r\n \r\n return list(points)", "def renormalize(flux, ivar):\n\n # axis=1 corresponds to the rebinned spectral axis\n # Finding the weighted mean both for normalization and for the rms\n mean = np.average(flux, axis=1, weights=ivar)[:, None]\n rms = np.sqrt(np.average((flux - mean) ** 2, axis=1, weights=ivar))[:, None]\n\n # Normalize by subtracting the weighted mean and dividing by the rms\n # as prescribed in the original QuasarNet paper.\n return (flux - mean) / rms", "def undo_normalise(img):\n\treturn img + CONFIG.MEAN_PIXEL", "def normalize_conv(self, x):\n\n x = self.normalize_global(x)\n \n if self.right_context is None and self.left_context is None:\n return x\n\n if self.left_context is None:\n left_context = x.shape[0]\n else:\n left_context = self.left_context\n\n if self.right_context is None:\n right_context = x.shape[0]\n else:\n right_context = self.right_context\n\n total_context = left_context + right_context + 1\n\n if x.shape[0] <= min(right_context, left_context)+1:\n # if context is larger than the signal we still return global normalization\n return x\n\n v1 = np.ones((x.shape[0],1), dtype=float_cpu())\n h = np.ones((total_context,1), dtype=float_cpu())\n \n counts = convolve2d(v1, h)[right_context:right_context+x.shape[0]]\n m_x = convolve2d(x, h)[right_context:right_context+x.shape[0]]\n m_x /= counts\n\n if self.norm_var:\n m2_x = convolve2d(x*x, h)[right_context:right_context+x.shape[0]]\n m2_x /= counts\n s2_x = m2_x - m_x**2\n s2_x[s2_x<1e-5] = 1e-5\n s_x = np.sqrt(s2_x)\n\n\n if self.norm_mean:\n x -= m_x\n\n if self.norm_var:\n x /= s_x\n\n return x", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def detrend(ovar):\n\t\n\tovar1=anomaly(ovar)\n\t\n\tt1=c1=xr.DataArray(np.arange(len(ovar1.time)),dims='time',coords={'time': ovar1.time})\n\tslope=covmat(t1,ovar1)/np.std(t1)**2\n\t\n\tovar1 -= slope*t1 # remove linear trend\n\tovar2=anomaly(ovar1)\n\t\n\treturn ovar2", "def find_centers(line_complex):\n # There is a line where the flux is at a minimum, i.e., the second\n # derivative is positive.\n diff2 = numpy.diff(numpy.sign(numpy.diff(line_complex)))\n zero_crossings = numpy.where(diff2 > 0.)[0]\n return zero_crossings + 1", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def test__normalize_clip(self):\n # Setup\n data = pd.Series([-0.43, 0.1234, 1.5, -1.31])\n\n transformer = Mock()\n transformer.clip = True\n\n # Run\n result = CategoricalTransformer._normalize(transformer, data)\n\n # Asserts\n expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)\n\n pd.testing.assert_series_equal(result, expect)", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def flux(self, u):\n flu = np.zeros((3,2), dtype=np.float64)\n flu[0,0] = u[1]\n flu[1,0] = u[0] * (u[1]/u[0])**2 + 0.5 * 9.81*u[0]**2\n flu[2,0] = u[1] * u[2]/u[0] #FIXME attenzione che c'è il punto controllare se sono scalari o vettori'\n flu[0,1] = u[2]\n flu[1,1] = u[2] * u[1]/u[0]\n flu[2,1] = u[0] * (u[2]/u[0])**2 + 0.5 * 9.81*u[0]**2\n return flu", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))", "def normalize(self, slice, bottom=99, down=1):\n b = np.percentile(slice, bottom)\n t = np.percentile(slice, down)\n slice = np.clip(slice, t, b)\n\n image_nonzero = slice[np.nonzero(slice)]\n if np.std(slice) == 0 or np.std(image_nonzero) == 0:\n return slice\n else:\n tmp = (slice - np.mean(image_nonzero)) / np.std(image_nonzero)\n # since the range of intensities is between 0 and 5000 ,\n # the min in the normalized slice corresponds to 0 intensity in unnormalized slice\n # the min is replaced with -9 just to keep track of 0 intensities\n # so that we can discard those intensities afterwards when sampling random patches\n tmp[tmp == tmp.min()] = -9\n return tmp", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def clip_normalize(w):\n w_clip = np.clip(w, 0, np.inf)\n return w_clip / np.sum(w_clip, axis=0)" ]
[ "0.6464081", "0.59551424", "0.58137214", "0.5795742", "0.5787084", "0.57819617", "0.57134414", "0.5706386", "0.56656975", "0.5642102", "0.56365347", "0.55645335", "0.5524223", "0.5506707", "0.54946476", "0.5492588", "0.54654", "0.5452259", "0.5443912", "0.54263026", "0.54149455", "0.5409852", "0.5383635", "0.5371455", "0.53653175", "0.53363985", "0.5329418", "0.5317573", "0.52985865", "0.5280354", "0.5270475", "0.5264396", "0.5257081", "0.52532375", "0.52531624", "0.5251324", "0.52454793", "0.52450985", "0.5223436", "0.52213395", "0.5221271", "0.5215196", "0.5213176", "0.52041304", "0.52031827", "0.5200007", "0.5186073", "0.5178678", "0.51784986", "0.51616454", "0.516028", "0.5160136", "0.5132437", "0.5129637", "0.51239365", "0.5108667", "0.5102317", "0.5092036", "0.50786394", "0.50784034", "0.5077344", "0.50683945", "0.50683945", "0.50647026", "0.50590795", "0.5054566", "0.50457317", "0.5044743", "0.50272703", "0.502665", "0.50248206", "0.5020672", "0.50188404", "0.50155014", "0.5013812", "0.50135744", "0.5008186", "0.5003838", "0.4991367", "0.4989071", "0.4985826", "0.49843183", "0.4980287", "0.49777007", "0.49718082", "0.49662784", "0.4964245", "0.49607554", "0.49598357", "0.49458945", "0.49416006", "0.4938381", "0.4937179", "0.49338245", "0.49328154", "0.49317652", "0.49310547", "0.4930412", "0.4929237", "0.49266475" ]
0.51007783
57
any comment in the input file
def description(self): return self._hdr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def skip_comments(filepointer):\n\tcomments = []\n\tdata = '#'\n\ttry:\n\t\tpos = filepointer.tell()\n\texcept:\n\t\tprint(\"Could not read file.\")\n\t\treturn None\t\n\t\n\twhile data[0] == '#':\n\t\tdata = filepointer.readline()\n\t\tif not data:\n\t\t\traise Exception(\"Unexpected end of file while reading comments.\")\n\n\t\tif data[0] == '#':\n\t\t\tcomments.append(data)\n\t\t\tpos = filepointer.tell()\n\t\telse:\n\t\t\tfilepointer.seek(pos)\n\treturn comments", "def print_comments():\n with open('a_cpp_file.cpp', 'r') as file:\n data = file.read()\n to_print = ''\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '*' and data[i-2] == '/':\n should_print = True\n if char == '*' and data[i+1] == '/' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char\n should_print = False\n for i, char in enumerate(data):\n if i > 1:\n if data[i-1] == '/' and data[i-2] == '/':\n should_print = True\n if char == '\\n' and should_print:\n should_print = False\n print(to_print)\n to_print = ''\n if should_print:\n to_print += char", "def _readComments(self): \n self.NSCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readSpecialComments()\n self.NNCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readNormalComments()", "def comment():", "def parse_code_comment(self, filepath):\n raise NotImplementedError('Not Implemented')", "def test_req_file_parse_comment_start_of_line(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"# Comment \")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert not reqs", "def file_read(self):\r\n with open(self.source_text_path, 'r') as myfile:\r\n data = myfile.read() \r\n comments = re.compile(r'''\r\n (//[^\\n]*(?:\\n|$)) # Everything between // and the end of the line/file\r\n | # or \r\n (/\\*.*?\\*/) # Everything between /* and */\r\n |\r\n \\/\\*[\\s\\S]*?\\*\\/|([^:]|^)\\/\\/.*$ # Every comment between /** and */ \r\n ''', re.VERBOSE)\r\n self.modified_source_text = comments.sub('\\n', data) \r\n return self.modified_source_text", "def comments_from_file(file_path):\n comments = []\n analyze = False\n comment_block_begin = False\n with open(file_path, 'r') as config_file:\n lines = config_file.readlines()\n lines = [line.rstrip() for line in lines]\n for line in lines:\n if line.startswith('# THIS MUST PRECEDE DIRECTLY BEFORE LIST OF CONFIG OPTIONS!'):\n analyze = True\n continue\n if line.startswith('# THIS MUST FOLLOW DIRECTLY AFTER LIST OF CONFIG OPTIONS!'):\n break\n if analyze and line.startswith('#'):\n if line.startswith('# BEGIN'):\n comments.append(line)\n comment_block_begin = False\n continue\n if comment_block_begin:\n comments[-1] += line.lstrip('#') if not comments[-1].endswith('/') else line.lstrip('# ')\n continue\n comment_block_begin = True\n comments.append(line.lstrip('# '))\n else: # not comment\n if comment_block_begin:\n comment_block_begin = False\n return comments", "def tokenize_file_keep_comments(infilename):\n# reg expss\n\timport re\n\timport sys\n\timport fileinput\n\timport math\n\n#\n# open and parse input \n#\n\n\ttry:\n\t\tfp = open (infilename, 'r')\n\texcept IOError:\n\t\tprint \"Error opening file\"\n\t\traise\n\n\tlines = fp.readlines ()\n\n#\n# put all tokens into tokens and remove comments\n#\n\ttokens = []\n\tfor line in lines:\n\t\ttmp = re.split ('[ \\t\\n]*',line)\n#\t\tprint \"tmp = \", tmp\n\t\tfor tok in tmp:\n\t\t\tif (tok != ''):\n\t\t\t\ttokens.append(tok)\n#\tprint \"tokens = \", tokens\n\n\tfp.close()\n\n\treturn tokens", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def comment_used(comment):\n path = USED_PATH\n all_comments = read_from_file_into_lst(path)\n if comment in all_comments:\n return True\n else:\n return False", "def skipComment(self):\r\n\t\tch = self.nextChar()\r\n\t\twhile ch and ch != \"\\n\":\r\n\t\t\tch = self.nextChar()", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def skip(self):\n input = self.source\n startLine = self.line\n\n # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)\n startOfFile = self.cursor is 0\n \n indent = \"\"\n \n while (True):\n if len(input) > self.cursor:\n ch = input[self.cursor]\n else:\n return\n \n self.cursor += 1\n \n if len(input) > self.cursor:\n next = input[self.cursor]\n else:\n next = None\n\n if ch == \"\\n\" and not self.scanNewlines:\n self.line += 1\n indent = \"\"\n \n elif ch == \"/\" and next == \"*\":\n self.cursor += 1\n text = \"/*\"\n inline = startLine == self.line and startLine > 1\n commentStartLine = self.line\n if startLine == self.line and not startOfFile:\n mode = \"inline\"\n elif (self.line-1) > startLine:\n # distance before this comment means it is a comment block for a whole section (multiple lines of code)\n mode = \"section\"\n else:\n # comment for maybe multiple following lines of code, but not that important (no visual white space divider)\n mode = \"block\"\n \n while (True):\n try:\n ch = input[self.cursor]\n self.cursor += 1\n except IndexError:\n raise ParseError(\"Unterminated comment\", self.fileId, self.line)\n \n if ch == \"*\":\n next = input[self.cursor]\n if next == \"/\":\n text += \"*/\"\n self.cursor += 1\n break\n \n elif ch == \"\\n\":\n self.line += 1\n \n text += ch\n \n \n # Filter escaping on slash-star combinations in comment text\n text = text.replace(\"*\\/\", \"*/\")\n \n try:\n self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))\n except Comment.CommentException as commentError:\n Console.error(\"Ignoring comment in %s: %s\", self.fileId, commentError)\n \n \n elif ch == \"/\" and next == \"/\":\n self.cursor += 1\n text = \"//\"\n if startLine == self.line and not startOfFile:\n mode = \"inline\"\n elif (self.line-1) > startLine:\n # distance before this comment means it is a comment block for a whole section (multiple lines of code)\n mode = \"section\"\n else:\n # comment for maybe multiple following lines of code, but not that important (no visual white space divider)\n mode = \"block\"\n \n while (True):\n try:\n ch = input[self.cursor]\n self.cursor += 1\n except IndexError:\n # end of file etc.\n break\n\n if ch == \"\\n\":\n self.line += 1\n break\n \n text += ch\n \n try:\n self.comments.append(Comment.Comment(text, mode, self.line-1, \"\", self.fileId))\n except Comment.CommentException:\n Console.error(\"Ignoring comment in %s: %s\", self.fileId, commentError)\n\n # check for whitespace, also for special cases like 0xA0\n elif ch in \"\\xA0 \\t\":\n indent += ch\n\n else:\n self.cursor -= 1\n return", "def _parse_comments(reader):\n regex = r'\\s*(#|\\/{2}).*$'\n regex_inline = r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'\n\n pipe = []\n for line in reader:\n if re.search(regex, line):\n if re.search(r'^' + regex, line, re.IGNORECASE): continue\n elif re.search(regex_inline, line):\n pipe.append(re.sub(regex_inline, r'\\1', line))\n else:\n pipe.append(line)\n return \"\\n\".join(pipe)", "def get_code(file):\n\n for line in file:\n\n if not line.strip().startswith('#'):\n\n yield line", "def _is_comment_or_blank(line):\n return re.sub(\"#.*\", \"\", line).rstrip() == \"\"", "def test_remove_single_line_comments_noannotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def comment(self, content):\n pass", "def _strip_comments(file_contents):\n lines_without_comments = []\n for line in file_contents:\n comment_position = line.find(COMMENT_INDICATOR)\n if comment_position != -1:\n lines_without_comments.append(line[:comment_position])\n else:\n lines_without_comments.append(line)\n return lines_without_comments", "def test_remove_single_line_comments_annotation():\n\n\tinput_ = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t//comment\n\t\t\t\t//@Test //comment\n\t\t\t\t//comment\n\t\t\t\tline3 \"\"\"\n\n\texpect = \"\"\"line1\n\t\t\t\tline2 \n\t\t\t\t\n\t\t\t\t//@Test //comment\n\t\t\t\t\n\t\t\t\tline3 \"\"\"\n\n\tassert aunit.remove_single_line_comments(input_) == expect", "def parse(input):\n\t\n\toutput = []\n\t\n\t# Comment delimiter of the docstring\n\tcommentDelim = '\"\"\"'\n\t\n\t# Some regexes\n\ttriggerRe = re.compile(\"^(\\s*)(def .+:|class .+:)\")\n\tcommentStartRe = re.compile('^\\s*(%s)' % commentDelim)\n\tcommentEndRe = re.compile('(%s)\\s*$' % commentDelim)\n\temptyRe = re.compile(\"^\\s*$\")\n\thashLineRe = re.compile(\"^\\s*#.*$\")\n\timportLineRe = re.compile(\"^\\s*(import |from .+ import)\")\n\t\n\t# split input into lines\n\tlines = input.split(\"\\n\")\n\t\n\t# flags, buffers, ...\n\tfileHeadFlag = True\n\ttriggerWordFlag = False\n\tcommentFlag = False\n\tcomment = []\n\ttriggerWs = \"\"\n\ttriggerLines = None\n\t\n\t# process each line\n\tfor line in enumerate(lines):\n\n\t\tmatch = re.search(triggerRe, line[1])\n\t\tif match:\n\t\t\tif triggerWordFlag and triggerLines:\n\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\n\t\t\ttriggerWordFlag = True\n\t\t\ttriggerWs = match.group(1)\n\t\t\tfileHeadFlag = False\n\t\t\ttriggerLines = [line[1]]\n\t\t\tcontinue\n\n\t\t# file header or active keyword trigger?\n\t\tif fileHeadFlag or triggerWordFlag:\n\t\t\t# comment end of multiline comment found\n\t\t\tif re.search(commentEndRe, line[1]) and commentFlag:\n\t\t\t\tcomment.append( line[1][ : line[1].rfind(commentDelim) ] )\n\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs, (triggerLines is None)))\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\tcomment = []\n\t\t\t\tcommentFlag = False\n\t\t\t\ttriggerWs = \"\"\n\t\t\t\ttriggerLines = None\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\n\t\t\t# comment start found\n\t\t\telif re.search(commentStartRe, line[1]):\n\t\n\t\t\t\tif re.search(commentEndRe, line[1][line[1].find(commentDelim)+len(commentDelim) :]):\n\t\t\t\t\t# singleline comment\n\t\t\t\t\tcomment.append(line[1][line[1].find(commentDelim)+len(commentDelim) : line[1].rfind(commentDelim)])\n\t\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs))\n\t\t\t\t\t\n\t\t\t\t\tif triggerLines:\n\t\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\t\t\t\n\t\t\t\t\tcomment = []\n\t\t\t\t\tcommentFlag = False\n\t\t\t\t\ttriggerWs = \"\"\n\t\t\t\t\ttriggerLines = None\n\t\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# multiline comment begin\n\t\t\t\t\tcommentFlag = True\n\t\t\t\t\tcomment.append(\n\t\t\t\t\t\tline[1][line[1].find(commentDelim)+len(commentDelim):]\n\t\t\t\t\t)\n\t\n\t\t\t# active multiline comment -> append comment\n\t\t\telif commentFlag:\n\t\t\t\tcomment.append(line[1])\n\t\t\t\n\t\t\t# still searching for comment\n\t\t\telif re.search(emptyRe, line[1]):\n\t\t\t\tif triggerLines:\n\t\t\t\t\ttriggerLines.append(line[1])\n\t\t\t\telse:\n\t\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# searching for file header\n\t\t\telif fileHeadFlag:\n\t\t\t\tif not (re.search(hashLineRe, line[1]) or re.search(emptyRe, line[1]) or re.search(importLineRe, line[1])):\n\t\t\t\t\t# fileheader over -> disable search\n\t\t\t\t\tfileHeadFlag = False\n\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# no comment, disable comment search mode\n\t\t\telse:\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\ttriggerLines = None\n\t\t\t\toutput.append(line[1])\n\t\t\n\t\t# just append the line\n\t\telse:\n\t\t\toutput.append(line[1])\n\t\n\t# return output\n\treturn \"\\n\".join(output)", "def test_req_file_parse_comment_end_of_line_with_url(tmpdir):\n with open(tmpdir.join(\"req1.txt\"), \"w\") as fp:\n fp.write(\"https://example.com/foo.tar.gz # Comment \")\n\n finder = PackageFinder([], [], session=PipSession())\n reqs = list(parse_requirements(tmpdir.join(\"req1.txt\"), finder,\n session=PipSession()))\n\n assert len(reqs) == 1\n assert reqs[0].link.url == \"https://example.com/foo.tar.gz\"", "def do_comment(self, line):\n if line.strip() != \"\":\n self.review.comment(line)\n return\n\n with tempfile.NamedTemporaryFile(suffix='.revu.md') as temp:\n subprocess.call(['vim', temp.name])\n with open(temp.name, 'r') as fd:\n self.review.comment(fd.read())", "def __ingest_c_comment_start(self, line, pos):\n\n if line[pos] == '/' and len(line) > pos + 1:\n if line[pos + 1] == '/':\n return -1\n elif line[pos + 1] == '*':\n self._in_block_comment = True\n return 2\n return 0", "def test_comments(self):\n comment_example = os.path.join(here, 'comment-example.ini')\n manifest = ManifestParser(manifests=(comment_example,))\n self.assertEqual(len(manifest.tests), 8)\n names = [i['name'] for i in manifest.tests]\n self.assertFalse('test_0202_app_launch_apply_update_dirlocked.js' in names)", "def Comment(self, comment):\n self.script.append(\"\")\n for i in comment.split(\"\\n\"):\n self.script.append(\"# \" + i)\n self.script.append(\"\")", "def clean_comment(line):\n if line.startswith(\"#!\"):\n line = line[2:]\n else:\n line = line[1:]\n if line.startswith(\" \"):\n line = line[1:]\n if not line.endswith('\\n'):\n line += '\\n'\n return line", "def _read_until_end_of_comments(self, fileobj):\n offset = fileobj.tell()\n line = fileobj.readline()\n if not line:\n raise EOFError(\"Read until EOF\")\n\n line = line.strip()\n if line.startswith(\"#\"):\n return self._read_until_end_of_comments(fileobj)\n\n fileobj.seek(offset)", "def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]", "def __ingest_c_block_comments(self, line, position):\n\n pos = position\n while self._in_block_comment and pos < len(line):\n if pos + 1 < len(line) and line[pos] == '*' and line[pos + 1] == '/':\n self._in_block_comment = False\n pos += 2\n pos += 1\n return pos - position", "def process_all(self):\n global multi_comment_line_mode\n multi_comment_line_mode = False\n for line in self.fileToProcess:\n line = line.strip() # creating a strip line, with no whitespace in the beginning and in the end\n # multi_comment_line_mode = False\n # first, we want to filter all the lines which are comments or part of comments\n while line != '':\n ignoring_status,newline = self.shouldTheLineBeIgnored(line)\n if ignoring_status:\n break # we are ignoring the line\n elif (not ignoring_status) and (newline != '') and newline != '$endOfMultiLine':\n line = newline\n continue\n elif not ignoring_status and newline == '$endOfMultiLine':\n break\n else:\n line = self.isThereApartToIgnore(line) #getting the good line\n line = line.strip()\n if line.endswith('$endOfMultiLine'):\n # line = line[:-1]\n line = line[:-15]\n # in this case we don't want to ignore the current line\n # if multi_comment_line_mode:\n # # this comes from the\n list_of_line_strings = re.split('(\\W)', line) # the default of this method is to remove all the white spaces\n list_of_line_strings = list(filter(None, list_of_line_strings))\n global i\n i = 0\n global first_index\n first_index = 0\n global second_index\n second_index = 0\n len_of_list = len(list_of_line_strings)\n while i < len_of_list:\n # first adding the string literals\n if (list_of_line_strings[i] == '\"' and i == 0) or (i>0 and list_of_line_strings[i] == '\"' and\n list_of_line_strings[i-1]!='*'):\n first_index = i\n i = i + 1\n if i == len(list_of_line_strings):\n break\n while list_of_line_strings[i] != '\"':\n i = i + 1\n if i>=len(list_of_line_strings):\n # in case it's the end\n i = first_index\n break\n else:\n continue\n second_index = i\n list_of_line_strings[first_index:second_index + 1] = [\n ''.join(list_of_line_strings[first_index:second_index + 1])]\n i = i + 2\n len_of_list = len(list_of_line_strings)\n else:\n i = i + 1\n j=0\n global skip_mode\n skip_mode = False\n global counter\n counter = 0\n for string in list_of_line_strings:\n if j != len(list_of_line_strings)-1:\n j+=1\n if counter == 1:\n counter = 0\n continue\n if skip_mode and not (string == '*' and list_of_line_strings[j] == '/'):\n continue\n if skip_mode and string == '*' and list_of_line_strings[j] == '/':\n skip_mode = False\n counter = 1\n continue\n if string == \"/\" and (list_of_line_strings[j] == \"/\" ):\n # this is a comment that appeared in the line\n break # in this case, there are no more chars to read because it's a note\n if string == \"/\" and list_of_line_strings[j] == \"*\":\n skip_mode = True\n counter = 1\n continue # entering a skip mode\n if string.strip() == '':\n continue\n self.currStringToProcess = string\n type = self.tokenType()\n self.createToken(type,self.currStringToProcess)\n break", "def _dump_comment(comment: List[str]) -> List[str]:\n return [\"/**\"] + comment + [\"*/\"]", "def test_store_comments(parallel, read_basic):\n text = \"\"\"\n# header comment\na b c\n# comment 2\n# comment 3\n1 2 3\n4 5 6\n\"\"\"\n table = read_basic(text, parallel=parallel, check_meta=True)\n assert_equal(table.meta[\"comments\"], [\"header comment\", \"comment 2\", \"comment 3\"])", "def remove_c_style_comments(fd):\n ret = []\n comment_state = False\n for line in fd:\n while True:\n # seems we have nothing left\n if len(line) < 2:\n break\n # we're still inside a comment\n if comment_state:\n idx = line.find(\"*/\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = False\n continue\n # comment doesn't seem to end on this line\n break\n # we're not inside any comment\n else:\n idx = line.find(\"/*\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = True\n continue\n if \"//\" in line:\n line = line.split(\"//\", 1)[0]\n # only now we can actually do our job\n line = line.strip()\n if len(line) > 0:\n ret.append(line)\n break\n return ret", "def _readNormalComments(self):\n self.NCOM = self._readLines(self.NNCOML)\n return self.NCOM", "def _is_comment_line(self):\n pattern = re.compile(r\"^(\\s)*(//)+\")\n return pattern.search(self._line)", "def test_comment(parallel, read_basic):\n table = read_basic(\n \"# comment\\nA B C\\n # another comment\\n1 2 3\\n4 5 6\", parallel=parallel\n )\n expected = Table([[1, 4], [2, 5], [3, 6]], names=(\"A\", \"B\", \"C\"))\n assert_table_equal(table, expected)", "def comment_stripper(iterator):\n for line in iterator:\n if line [:1] == '#':\n continue\n if not line.strip ():\n continue\n yield line", "def test_open_ped_comment_line(self):\n \n self.temp.write('A B 0 0 1 1\\n')\n self.temp.write('#anything can go here\\n')\n self.temp.flush()\n families = open_ped(self.temp.name)\n \n fam = Family('A')\n fam.add_person(Person('A', 'B', '0', '0', '1', '1'))\n \n self.assertEqual(families[0].nodes, fam.nodes)", "def visit_comment(self, node):\n self.body.append('\\n%% %s \\n' % COMMENT_RE_SUB('\\n% ', node.astext()))\n raise nodes.SkipNode", "def test_read_quotes_commented_lines(self):\n\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes4.txt\")\n quotes = api.read_quotes(path)\n self.assertEqual(4, len(quotes))", "def block_comment(self):\n while (\n not (self.peek() == \"*\" and self.peek_next() == \"/\")\n and not self.is_at_end()\n ):\n if self.peek() == \"\\n\":\n self.line += 1\n self.advance()\n\n if self.peek() == \"*\" and self.peek_next() == \"/\":\n self.advance(spaces=2)\n\n return None", "def append_used_comment_to_file(comment):\n path = USED_PATH\n with open(path, 'a+') as fp:\n fp.write(repr(comment).lstrip('\"\\'').rstrip('\"\\'') + '\\n')", "def visit_comment(self, token: tokenize.TokenInfo) -> None:\n if not self._is_first_comment(token):\n return # this is a regular comment, not a shebang\n\n is_shebang = self._is_valid_shebang_line(token)\n self._check_executable_mismatch(token, is_shebang=is_shebang)\n if is_shebang:\n self._check_valid_shebang(token)", "def load_comments(self, pkgfile):\n\n # Note: This has to be called with a Python\n # source file (.py) only!\n\n if not os.path.exists(pkgfile):\n return \"\"\n\n comment = \"\"\n\n try:\n of = open(pkgfile,'rb')\n data = of.read()\n if data:\n # Create code object\n try:\n c = compiler.compile(data,pkgfile,'exec')\n # Get the position of first line of code\n if c:\n lno = c.co_firstlineno\n lnum = 0\n # Read file till this line number\n of.seek(0)\n for line in of:\n comment = \"\".join((comment, line))\n lnum += 1\n if lnum==lno or line==\"\\n\": break\n except SyntaxError:\n pass\n except Exception:\n pass\n of.close()\n except (OSError, IOError, TypeError):\n pass\n\n return comment", "def comment_content(c):\n content = str(c)[4:-3]\n return content.strip()", "def file_reading_iterator(filename, comment_char=\"#\", options='r'):\n # actual loop\n with open(filename, options) as f:\n while True:\n line = f.readline()\n if not line:\n break\n # get rid of comment_char\n line = line.partition(comment_char)[0]\n # get rid of white spaces\n line = line.rstrip()\n # return line\n yield line", "def test_comments(self):\n\n comment_str = \"# This is a comment\\n# This is another comment\"\n doc = parser.parse(comment_str)\n\n self.assertEqual(len(doc.children()), 2)", "def tests_comment(self):\n\n for domain in self.domains:\n expected = None\n\n data = f\"# {domain}\"\n actual = File(data).get_converted()\n\n self.assertEqual(expected, actual)", "def comments(self):\n lineno = 0\n novermin = set()\n src = self.__source\n if type(src) == bytes:\n src = src.decode(errors=\"ignore\")\n for line in src.splitlines():\n lineno += 1\n line = line.strip()\n m = RE_COMMENT.match(line)\n if m is not None:\n comment = m.group(2).strip()\n if comment == \"novermin\" or comment == \"novm\":\n # Ignore if it is inside another comment, like: `# test: # novm`\n if m.start(0) < m.start(1) and m.group(0).strip().startswith(\"#\"):\n continue\n # Associate with next line if the comment is \"alone\" on a line, i.e. '#' starts the line.\n novermin.add(lineno + 1 if m.start(1) == 0 else lineno)\n return novermin", "def block_comments(code):\n block = list()\n for line in code:\n if bool(line.strip()): # If line is not empty\n if line.strip()[0] == '!': # If the first character of the string is the start of a comment it adds it\n block.append(identify_comment(line))\n elif bool(line.strip()): # If the first character of the string is not the start of a comment or its not empty it exits\n break\n return block", "def _preprocess_file(file_name):\n raw_content = utils.run_on_main_thread(\n partial(utils.get_file_content, file_name, force_lf_endings=True))\n\n # replace all comments with spaces to not change the position\n # of the rest\n comments = [c for c in _RE_COMMENT.finditer(raw_content)]\n content = list(raw_content)\n for m in comments:\n for i in range(m.start(), m.end()):\n content[i] = ' '\n content = \"\".join(content)\n return raw_content, content", "def tests_ends_with_comment(self):\n\n for domain in self.domains:\n expected = domain\n\n data = f\"{domain} # hello world\"\n actual = File(data).get_converted()\n\n self.assertEqual(expected, actual)", "def make_comment(self, input, start, end, elements):\n return elements[1].text.strip('{}')", "def remove_comments(line):\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line", "def set_file_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_comment_set:\n self.file_comment_set = True\n if validations.validate_file_comment(text):\n self.file(doc).comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('File::Comment')\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')", "def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict", "def test_read_empty_basic_table_with_comments(fast_reader):\n dat = \"\"\"\n # comment 1\n # comment 2\n col1 col2\n \"\"\"\n t = ascii.read(dat, fast_reader=fast_reader)\n assert t.meta[\"comments\"] == [\"comment 1\", \"comment 2\"]\n assert len(t) == 0\n assert t.colnames == [\"col1\", \"col2\"]", "def _count_comment_rows(vcf_path):\n vcf_lines_generator = lines_from_vcf(vcf_path)\n\n comment_lines_count = 0\n for line in vcf_lines_generator:\n if line.startswith('#'):\n comment_lines_count += 1\n else:\n vcf_lines_generator.close() # Don't leave the file handle opened\n # Don't continue reading the VCF once the comments section ended\n break\n\n return comment_lines_count", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def identify_comment(code_line):\n discard_between = ['\\'', '\\\"']\n counter = 0\n while counter + 1 <= len(code_line): # Studies each position in the line\n if code_line[counter] in discard_between: # If fortran character is being written jumps to end of char\n jump = code_line[counter+1:].find(code_line[counter])\n if jump == -1:\n raise Exception('Fortran character did not finish being declared from position {}: \\n {}'.format(counter, code_line))\n counter += jump + 1\n if code_line[counter] == '!': # If it finds comment declaration it stores it \n return code_line[counter:]\n break\n counter += 1 # Advances counter\n else: # If it reaches the end of the code without finding comment it returns none\n return None", "def uncomment_xml(file, key_word):\n\n f = open(file, \"r+\")\n\n new_text = \"\"\n while True:\n line = f.readline()\n if not line:\n break\n if line.count(key_word) > 0:\n print('%s found in the code, about to uncomment...' % key_word)\n new_text += line.replace(\"!-- \", \"\").replace(\"--\", \"\")\n else:\n new_text += line\n f.close()\n f = open(file, \"w+\")\n f.write(new_text)\n print(\"Successfully uncommented!\")\n f.close()", "def _readSpecialComments(self):\n self.SCOM = self._readLines(self.NSCOML)\n return self.SCOM", "def test_end_comment():\n import figleaf\n \n filename = os.path.join(thisdir, 'tst_end_comment.py')\n figleaf.get_lines(open(filename))", "def test_lint_fail_nocomment(self, style):\n with ExpectedException(RuntimeError):\n run_linter_throw(\"path/to/file\",\n \"aabb\\nbbcc\",\n style,\n whitelist=[\"headerblock/filename\"])", "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith('*** START OF THIS PROJECT'):\n break", "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith(''):\n break", "def ostrip(thefile):\n outlines = []\n with open(thefile, 'r') as f:\n for line in f:\n if line[0] != '%':\n if '%' in line:\n if r'\\%' in line or line[-1] == '%':\n outlines.append(line) # these are not real comments\n else:\n outlines.append(line.split(' %')[0]+'\\n')\n else:\n outlines.append(line)\n return outlines", "def handleCommentLine(sLine, iLine):\r\n\tglobal sEType, sEVar, sEData, iIndent\r\n\r\n\t# Work out the indentation level to operate at.\r\n\t# This is only done once for each comment block.\r\n\tif iIndent < 0:\r\n\t\tiIndent = (len(sLine) - len(sLine.lstrip())) / 4\r\n\r\n\t# If there is no '@' symbol, save as much data as we can from the commentline.\r\n\tif START_SYMBOL not in sLine:\r\n\r\n\t\t# If we are a directive which only accepts single line values then anything extra is a remark.\r\n\t\tif sEType in (PARAM, RETURN, AUTHOR, DATE):\r\n\t\t\thandleExistingData(iIndent)\r\n\t\t\tsEType = REMARK\r\n\t\t\tsEData = \"\"\r\n\r\n\t\t# Get the data from the line and append it if it is exists.\r\n\t\tsData = dataFromLine(sLine)\r\n\t\tif len(sData) > 0:\r\n\t\t\t# If we already have data, insert a breakline.\r\n\t\t\tif sEData:\r\n\t\t\t\tsEData += BREAK + sData\r\n\r\n\t\t\t# Otherwise do not.\r\n\t\t\telse:\r\n\t\t\t\tsEData = sData\r\n\t\t\r\n\t\t# If we have an end comment on this line, exit the comment by returning false.\r\n\t\tif CLOSE_COMMENT in sLine:\r\n\t\t\thandleExistingData(iIndent)\r\n\t\t\tendComment()\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\t# Since the line does contain an '@' symbol, push any existing data.\r\n\thandleExistingData(iIndent)\r\n\r\n\t# If this line contains an '@' symbol then work out what is after it.\r\n\tsEType = sLine.split(START_SYMBOL)[1].split(\" \")[0]\r\n\r\n\t# If the comment data type is BRIEF\r\n\tif sEType == BRIEF:\r\n\t\tsEData = dataFromString(sLine, sLine.find(BRIEF) + len(BRIEF) + 1)\r\n\r\n\telif sEType == PARAM:\r\n\t\tsTemp = dataFromString(sLine, sLine.find(PARAM) + len(PARAM) + 1)\r\n\t\tiChop = sTemp.find(\" \") + 1\r\n\t\tsEData = sTemp[iChop:]\r\n\t\tsEVar = sTemp[:iChop].rstrip()\r\n\r\n\telif sEType == RETURN:\r\n\t\tsEData = dataFromString(sLine, sLine.find(RETURN) + len(RETURN) + 1)\r\n\r\n\telif sEType == DATE:\r\n\t\tsEData = dataFromString(sLine, sLine.find(DATE) + len(DATE) + 1)\r\n\r\n\telif sEType == AUTHOR:\r\n\t\tsEData = dataFromString(sLine, sLine.find(AUTHOR) + len(AUTHOR) + 1)\r\n\r\n\t# If we have an end comment on this line, exit the comment by returning false.\r\n\tif CLOSE_COMMENT in sLine:\r\n\t\thandleExistingData(iIndent)\r\n\t\tendComment()\r\n\t\treturn False\r\n\treturn True", "def loadText(self,inName):\n reComment = re.compile(r'\\s*\\#.*')\n ins = file(inName)\n for line in ins:\n #print line,\n #--Strip spaces and comments\n line = reComment.sub('',line)\n line = line.rstrip()\n #--Skip empty/comment lines\n if not line: continue\n #--Parse line\n (libId,srcId,altId) = line.split('\\t')[:3]\n self.libList.append(libId)\n self.libMap[libId] = (srcId,altId)\n #--Done\n ins.close()", "def test_double_comment(self):\n self.compare_tokens(\n \"## papān libbi[belly] (already in gloss, same spelling)\\n\",\n ['COMMENT', 'ID', 'NEWLINE']\n )", "def parser(filename: str):\r\n f = open(filename, 'r')\r\n lines = []\r\n for line in f:\r\n line = line.split(\"//\", 1)[0] # discard comments\r\n line = line.strip()\r\n if line: # discard pure whitespace lines\r\n lines.append(line)\r\n f.close()\r\n return lines", "def strip_comments(line):\n if \"#\" in line:\n return line[:line.find(\"#\")]\n else:\n return line", "def test_parse_multiline_comment(self):\n source_code = dedent(\"\"\"\\\n /**\n * this is a doc comment that stretches over\n * more than one line\n */\n int main()\n {\n return 0;\n }\n \"\"\")\n result = self.parser.parse(source_code.splitlines())\n assert_equal(result, {\n \"int main()\": (\"this is a doc comment that stretches over \"\n \"more than one line\")})", "def commentOutLineMatching(pattern,fileName,maxOccurs=None):\n \n file=open(fileName,mode='r')\n pattern=re.compile(pattern)\n fileText=\"\"\n numMatches=0\n if maxOccurs==None:\n maxOccurs=sys.maxsize\n \n for line in file:\n \n if pattern.match(line) and numMatches<maxOccurs:\n fileText+=\"#\"+line\n numMatches+=1\n else:\n fileText+=line\n file.close()\n file=open(fileName,mode='w')\n file.write(fileText)\n file.close()\n return numMatches", "def parse_comment(comment: Union[Token, PsuedoToken]) -> str:\n # Happens when there is no documentation comment in the source file for the\n # item.\n spelling = comment.spelling\n if spelling is None:\n return \"\"\n\n # Comments from clang start at the '/*' portion, but if the comment itself\n # is indented subsequent lines will have too much indent.\n # Transform::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n #\n # into::\n #\n # \"/**\\n * hello some comment\\n * on multiple lines\\n */\"\n indent = \" \" * (comment.extent.start.column - 1)\n indented_comment = indent + spelling\n dedented_comment = textwrap.dedent(indented_comment)\n\n # Notes on the regex here.\n # Option 1 '\\s?\\*/?'\n # This piece will match comment lines that start with '*' or ' *'.\n # This will also match a trailing '*/' for the end of a comment\n #\n # Option 2 '^/\\*+<?'\n # This will match the start of a comment '/*' and consume any\n # subsequent '*'. This is also meant to catch '/**<' for trailing comments.\n #\n # Option 3 '\\*+/'\n # Matches any and all '*' up to the end of the comment string.\n contents = re.sub(\n r\"^\\s?\\*/?|^/\\*+<?|\\*+/\",\n lambda x: len(x.group(0)) * \" \",\n dedented_comment,\n flags=re.MULTILINE,\n )\n\n contents = textwrap.dedent(contents)\n\n # there may still be left over newlines so only strip those, but leave any\n # whitespaces.\n contents = contents.strip(\"\\n\")\n\n return contents", "def detecteComments(liste, j, i):\n\n\treturn liste[j][i] == '#' or (i < len(liste[j])-2 and liste[j][i]==\"\\\"\" and liste[j][i+1]==\"\\\"\" and liste[j][i+2]==\"\\\"\")", "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None):\n if s is None:\n s = open(filename, 'rt').read()\n\n L = s.split('\\n')\n \n # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython\n if not is_c:\n for i in range(len(L)):\n if L[i].strip().startswith(\"'\") and L[i].strip().endswith(\"'\"):\n L[i] = ''\n i = 0\n while i < len(L):\n found = False\n for triple_quote in ['\"\"\"', \"'''\"]:\n if L[i].strip().startswith(triple_quote):\n L[i] = L[i].strip()[3:]\n for j in range(i, len(L)):\n if triple_quote in L[j]:\n found = True\n L[j] = ''\n if found:\n break\n i = j+1\n if not found:\n i += 1\n else:\n begin_comment = '/*'\n end_comment = '*/'\n i = 0\n while i < len(L):\n found = False\n if begin_comment in L[i]:\n rest = L[i][L[i].index(begin_comment)+len(begin_comment):]\n L[i] = L[i][:L[i].index(begin_comment)]\n if end_comment in rest:\n found = True\n i += 1\n else:\n for j in range(i+1, len(L)):\n if end_comment in L[j]:\n found = True\n L[j] = L[j][L[j].index(end_comment)+len(end_comment):]\n else:\n L[j] = ''\n if found:\n break\n i = j + 1\n if not found:\n i += 1\n\n# util.print_header('Lines before exclude_tests:' + filename, '\\n'.join(L))\n\n # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython\n if exclude_tests:\n # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper\n if not is_c:\n methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split()\n else:\n methods = ['int main', 'void main']\n i = 0\n while i < len(L):\n L_i_strip = L[i].strip()\n if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or\n any(L_i_strip.startswith('cdef ' + method) for method in methods))) or\n (is_c and (any(L_i_strip.startswith(method) for method in methods)))):\n L[i] = ''\n for j in range(i+1, len(L)):\n L_j_strip = L[j].strip()\n c_ok = True\n if is_c:\n c_ok = L_j_strip != '{' and L_j_strip != '}'\n if not L[j].startswith(' ') and not L[j].startswith('\\t') and not len(L[j].strip()) == 0 and c_ok:\n break\n else:\n L[j] = ''\n i = j\n elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c:\n L[i] = ''\n i += 1\n else:\n i += 1\n\n# util.print_header('Lines before exclude_imports:' + filename, '\\n'.join(L))\n if exclude_imports:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')]\n else:\n L = [x for x in L if not x.lstrip().startswith('#include')]\n# util.print_header('Lines before exclude_comments:' + filename, '\\n'.join(L))\n if exclude_comments:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass']\n else:\n L = [x for x in L if not x.lstrip().startswith('//')]\n# util.print_header('Lines before exclude_globals:' + filename, '\\n'.join(L))\n if exclude_globals and not is_c:\n L = [x for x in L if (x.startswith(' ') or x.startswith('\\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))]\n# util.print_header('Lines before exclude_blank:' + filename, '\\n'.join(L))\n\n if is_c:\n # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper\n L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')]\n if exclude_blank:\n L = [x for x in L if not len(x.strip()) == 0]\n\n if verbose:\n util.print_header('Final lines for:' + filename, '\\n'.join(L))\n\n return len(L)", "def extract_comment_py():\n debug(\"extract comment from a python script.\")\n for line in CURRENT_BUFFER[:3]:\n if re.search(r\"coding[:=]\\s*([-\\w.]+)\", line):\n pattern = re.compile(r\"coding[:=]\\s*(?P<encoding>[-\\w.]+)\")\n globals()['ENCODING'] = pattern.search(line).group('encoding')\n debug(\"found encoding: %s\" % globals()['ENCODING'])\n\n lines = list(CURRENT_BUFFER)\n for (i, iline) in enumerate(lines[:10]):\n # find \"\"\" or ''' in the first few lines.\n if '\"\"\"' in iline or \"'''\" in iline:\n # find the end of it.\n breaker = '\"\"\"' if '\"\"\"' in iline else \"'''\"\n for j, jline in enumerate(lines[i+1:]):\n if breaker in jline:\n # found it, format the comment a little bit.\n if j == 0:\n # in the same line, this is a one line comment.\n return [jline[jline.index(breaker)+3:jline.rindex(breaker)]]\n else:\n lines[i] = lines[i][lines[i].index(breaker)+3:]\n lines[i+j+1] = lines[i+j+1][:lines[i+j+1].rindex(breaker)]\n return lines[i:i+j+1]\n else:\n # end of the comment is not found.\n return\n else:\n # comment might start with #\n return extract_comment_sh(python_style=True)", "def tokenize_file(infilename):\n# reg expss\n\timport re\n\timport sys\n\timport fileinput\n\timport math\n\n#\n# open and parse input \n#\n\n\ttry:\n\t\tfp = open (infilename, 'r')\n\texcept IOError:\n\t\tprint \"Error opening file\"\n\t\traise\n\n\tlines = fp.readlines ()\n\n#\n# put all tokens into tokens and remove comments\n#\n\ttokens = []\n\tfor line in lines:\n\t\ttmp = re.split ('[ \\t\\n]*',line)\n#\t\tprint \"tmp = \", tmp\n\t\tfor tok in tmp:\n\t\t\tif (tok != ''):\n\t\t\t\tif (re.compile('[#!][.]*').match(tok)):\n\t\t\t\t\tbreak\n\t\t\t\ttokens.append(tok)\n#\tprint \"tokens = \", tokens\n\n\tfp.close()\n\n\treturn tokens", "def readFiltered(f):\n line = f.readline()\n while line:\n line = line.strip()\n if len(line) != 0:\n if line == \"### NEW EXPERIMENT ###\":\n # print (\"readFiltered: ''\")\n yield \"\"\n elif line[0] != \"#\":\n # print (\"readFiltered: '\",line,\"'\")\n yield line\n line = f.readline()\n # print (\"readFiltered: '\",line,\"'\")\n return line", "def commented(path):\n return 'Conformer ' + re.findall(r'.*test(\\d+)\\.out', path)[0]", "def skiprows(fn):\n for i, line in enumerate(open(fn)):\n if line.startswith('#'):\n continue\n else:\n break\n return i", "def get_list_of_comments(path):\n\n # opens comments file\n try:\n return [\n re.sub(\" +\", \" \", comment.strip().rstrip())\n for comment in list(open(path, \"r\"))\n ]\n except Exception as e:\n print(\"Error loading comments file: \", e)\n sys.exit(1)", "def tokenize(fp):\n for line in fp:\n line = line.strip()\n if line[0] == '#':\n continue\n for tok in line.split():\n yield tok", "def process_comment(self, data):\r\n if not self.is_suppress:\r\n return [data]", "def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp", "def visit_comment(self, node):\n self.printer.comment(node.xml_value)\n return", "def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()", "def read_patch_comments(self) -> dict:\n comment: List[str] = []\n patch_comments = {}\n for line in self.spec_content.section(\"%package\"):\n # An empty line clears the comment lines collected so far.\n if not line.strip():\n comment = []\n # Remember a comment line.\n if line.startswith(\"#\"):\n comment.append(line[1:].strip())\n # Associate comments with patches and clear the comments\n # collected.\n if line.lower().startswith(\"patch\"):\n patch_name = Path(line.split(\":\", 1)[1].strip()).name\n patch_comments[patch_name] = comment\n comment = []\n return patch_comments", "def parse(s):\n a = ArffFile()\n a.state = 'comment'\n a.lineno = 1\n for l in s.splitlines():\n a.__parseline(l)\n a.lineno += 1\n return a", "def _writeComments(self):\n self.header.write(wrapLine(\"NSCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NSCOML))\n self.header.write(wrapLines(\"SCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NSCOML % tuple(self.SCOM)))\n self.header.write(wrapLine(\"NNCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NNCOML))\n self.header.write(wrapLines(\"NCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NNCOML % tuple(self.NCOM)))", "def test_does_not_match_block_comments(self):\n\n comment = dedent(\"\"\"\\\n --[[\n Hello, World!\n --]]\"\"\")\n\n script = rbxmx.ScriptElement(source=comment)\n first_comment = script.get_first_comment()\n\n assert first_comment is None", "def commentaires(file_name_c):\n com = subprocess.run([\"grep\", \"eleves_bis/\" + file_name_c, \"-e\", '/\\*'], stdout=subprocess.PIPE)\n return com.stdout.decode().count(\"\\n\")", "def print_comment_v(text):\n print_comment(text, True)", "def __preprocess(self, infile, outfile):\r\n with open(outfile, \"w\") as _outfile:\r\n _outfile.write(textwrap.dedent(\"\"\"\\\r\n /*\r\n * This file is dynamically generated and ignored by Git.\r\n * DO NOT MAKE CHANGES HERE. Instead, go edit its template:\r\n * %s\r\n */\r\n \"\"\" % infile))\r\n _outfile.write(Template(filename=str(infile)).render(env=self.__context()))", "def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith('*END*THE SMALL PRINT!'):\n break" ]
[ "0.7160076", "0.6916625", "0.6867486", "0.6765558", "0.6749642", "0.6743258", "0.6716404", "0.670146", "0.6650719", "0.65236187", "0.64683205", "0.64683205", "0.64408094", "0.6426981", "0.6329149", "0.63261276", "0.6201037", "0.6192405", "0.61553055", "0.6140866", "0.613756", "0.61210155", "0.6114795", "0.6114537", "0.6101729", "0.6090377", "0.60869724", "0.6061222", "0.6056536", "0.6052817", "0.60502154", "0.6039174", "0.6035846", "0.60346216", "0.6025457", "0.6023761", "0.6014276", "0.60013646", "0.59949076", "0.59935206", "0.5977095", "0.59676135", "0.59630764", "0.5957912", "0.5953696", "0.5951151", "0.59421074", "0.59369737", "0.5936092", "0.5919474", "0.591075", "0.58561575", "0.5842712", "0.58312434", "0.5821185", "0.58052725", "0.5799269", "0.5796829", "0.5791195", "0.5788875", "0.57505906", "0.57489866", "0.57375515", "0.5733421", "0.5727098", "0.572219", "0.5717966", "0.57071257", "0.5704598", "0.57030874", "0.570201", "0.5701192", "0.5694906", "0.5690103", "0.5688152", "0.566977", "0.5668599", "0.5666812", "0.5663585", "0.5662815", "0.5660036", "0.565712", "0.56557876", "0.56543136", "0.5652424", "0.5647587", "0.5631532", "0.5622451", "0.5620777", "0.560656", "0.56000054", "0.5596486", "0.55858076", "0.5585754", "0.5579358", "0.55672693", "0.55596954", "0.55592173", "0.5541149", "0.5539186", "0.55323017" ]
0.0
-1
read the list of lick indices
def _read_lick_list(cls, fname=__default__, comment='#'): with open(fname, 'r') as f: data = {} hdr = [] for line in f: if line[0] != comment: l = line.split() attr = dict( band=(float(l[1]), float(l[2])), blue=(float(l[3]), float(l[4])), red=(float(l[5]), float(l[6])), unit='mag' if int(l[7]) > 0 else 'ew', ) name = l[8] data[name] = attr else: hdr.append(line[1:-1]) return data, hdr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_indices(path):\n paths = sorted(tf.io.gfile.glob('%s-*-of-*_index.json' % path))\n all_indices = []\n for path in paths:\n json_str = epath.Path(path).read_text()\n # parse it back into a proto.\n shard_index = json.loads(json_str)\n all_indices.append(list(shard_index['index']))\n return [os.path.basename(p) for p in paths], all_indices", "def get_raw_indexes(self) -> List[Dict[str, Any]]:\n return self.http.get(self.config.paths.index)", "def index():\n return list()", "def get_all_indexes (self):\r\n\r\n\r\n if self.using_database:\r\n aprint('GET ALL INDEXES')\r\n\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM notes\"\r\n +\" WHERE notebook=?\",value_tuple)\r\n indexes = db_cursor.fetchall()\r\n indexes = {str(index[0]).strip() for index in indexes}\r\n return indexes\r\n\r\n\r\n return self.note_dict.keys()", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def get_index_data():\n indexTickers = ['^DJI', '^RUA', '^GSPC', '^IXIC', '^SZSA', '^XCI', '^MSH']", "def _load_split_indices(self):\n split_file = self.SPLITS.get(self.split)\n indices_file = self._filepath(split_file)\n\n with open(indices_file) as txt_file:\n idx_data = [int(i) for i in txt_file.readline().split()]\n\n return idx_data", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def get_index_list(self, attached=\"main\"):\n self._check_connection()\n if attached == \"main\":\n request = \"\"\" SELECT name,tbl_name,sql\n FROM (SELECT * FROM sqlite_master UNION ALL SELECT * FROM sqlite_temp_master) AS temptbl\n WHERE type='index' ORDER BY name;\"\"\"\n else:\n request = \"\"\" SELECT name,tbl_name,sql\n FROM (SELECT * FROM %s.sqlite_master) AS temptbl\n WHERE type='index' ORDER BY name;\"\"\" % attached\n select = self._connection.execute(request)\n\n exp = re.compile(\"[(]([a-zA-Z0-9_,]+)[)]\")\n res = []\n for a, b, c in select:\n fi = exp.findall(c)\n if len(fi) != 1:\n raise DBException( # pragma: no cover\n \"Unable to extract index fields from %r\" % c)\n fi = tuple(s.strip() for s in fi[0].split(\",\"))\n res.append((a, b, c, fi))\n select.close()\n #self.LOG (\"number of indices \", len (res))\n select = res\n\n res = []\n if attached == \"main\":\n res = select\n else:\n for el in select:\n res.append((el[0], attached + \".\" + el[1], el[2], el[3]))\n #self.LOG (\"number of indices \", len (res))\n\n if attached == \"main\":\n attach = self.get_attached_database_list()\n for a in attach:\n if a in (\"main\", \"temp\"):\n continue\n r = self.get_index_list(a)\n res.extend(r)\n\n return res", "def _load_image_set_index(self):\n image_index = []\n image_set_file = self.data_dir \\\n + \"/ImageSets/{}.txt\".format(self.mode)\n\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file, 'r') as f:\n for line in f.readlines():\n image_index.append(line.strip())\n return image_index", "def get_rep_mol_indexes():\n f = open(FILE_WITH_REP_MOL_IDXS, \"r\")\n rd = csv.reader(f)\n mols = rd.next()\n f.close()\n mol_idxs = [int(i) - 1 for i in mols]\n os.unlink(FILE_WITH_REP_MOL_IDXS)\n return mol_idxs", "def _load_image_set_index(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n image_index = [x.strip().split()[0] for x in f.readlines()]\n # \n return image_index", "def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.rstrip('\\n') for x in f.readlines()]\n return image_index", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def major_load_list():\n track_event(\"DATA_INDEX_MAJOR_META\")\n uri = \"major-idx/list\"\n js = request_with_retry(uri, None)\n js = js.decode()\n idx = json.loads(js)\n return idx", "def get_indices(self):\r\n return self._indices", "def get_list_index(self):\r\n _debug('simq03b_api.get_list_index')\r\n \r\n s = self.query('LIST:IND?')\r\n return int(s)", "def listIndices(online: bool = False) -> list:\n return list(_get_indices(online).keys())", "def get_list_index(self):\r\n s = self.query('LIST:IND?')\r\n return int(s)", "def get_list_index(self):\r\n s = self.query('LIST:IND?')\r\n return int(s)", "def get_unread_indexes(self):\n pass", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def get_pulling_indices(self, weight):\n pass", "def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index", "def getSFNTResIndices(path):\n\ttry:\n\t\tresref = MyOpenResFile(path)\n\texcept Res.Error:\n\t\treturn []\n\tRes.UseResFile(resref)\n\tnumSFNTs = Res.Count1Resources('sfnt')\n\tRes.CloseResFile(resref)\n\treturn list(range(1, numSFNTs + 1))", "def cloud_index():\n import alltheitems.cloud\n return alltheitems.cloud.index()", "def read_index_data(data_path):\n index_keywords = []\n with open(data_path) as data:\n for line in data:\n index_keywords.append(line.rstrip())\n return index_keywords", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def test_indices(self, circuit):\n gate = jet.GateFactory.create(\"H\")\n circuit.append_gate(gate, wire_ids=[0])\n assert list(circuit.indices([0])) == [\"0-1\"]\n assert list(circuit.indices([1, 2, 3])) == [\"1-0\", \"2-0\", \"3-0\"]", "def get_indices(client):\n\n return [key for (key, _) in client.indices.get('*').items()]", "def get_indexes_for_key (self,key):\r\n\r\n if self.using_database:\r\n aprint('GETTING INDEXES FOR KEY')\r\n value_tuple = (notebookname,key,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=? and keyword=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.key_dict[str(key)]", "def _load_image_set_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index", "def print_ids(self):\n ids = [self.data[x][DATA_ID_INDEX] for x in self.index_list]\n print(ids)", "def get_index(path):\n with open(path,'r') as f:\n zz = f.readlines()\n return [index.split(\"\\n\")[0] for index in zz]", "def pypi_indexes() -> IndexesDefinition:\n return {}", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n return output", "def getIDs():", "def list_indexes(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/indexes/\", self.timeout)", "def _readIdxBuf(self):\n base = self.fres.bufferSection['buf_offs']\n self.idx_buf = self.fres.read(self.idx_fmt,\n pos = self.header['face_offs'] + base,\n count = self.header['idx_cnt'])\n\n for i in range(self.header['idx_cnt']):\n self.idx_buf[i] += self.header['visibility_group']", "def get_custom_indices():\n return [i for i, val in enumerate(all_topics) if val[2] == \"1\"]", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def exercise_indexes():\n print(exercise_indexes.__doc__)\n print(\"The indexes of 'data' are:\", data.index)\n print(data, \"\\n\")\n print(\"Changing the indexes of 'data'\")\n print(data.reindex([2, 0, 1]), \"\\n\")\n print(\"Changing the indexes of 'data' randomly\")\n print(data.reindex(np.random.permutation(data.index)))", "def ReadIndex_binary(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n indexFileHeaderText = []\n size_indexfile = os.path.getsize(indexfile)\n cntReadByte = 0\n try:\n fpin=open(indexfile, \"rb\")\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n dumpedtext = fpin.read(vI[0])\n cntReadByte += vI[0]\n\n strs = dumpedtext.split(\"\\n\")\n origdbname = \"\"\n origversion = \"\"\n origext = \"\"\n origprefix = \"\"\n for line in strs:\n if not line or line[0] == \"#\":\n continue\n ss=line.split()\n if ss[0] == \"DEF_DBNAME\":\n if len(ss)>=2:\n origdbname=ss[1]\n elif ss[0] == \"DEF_VERSION\":\n if len(ss)>=2:\n origversion=ss[1]\n elif ss[0] == \"DEF_EXTENSION\":\n if len(ss)>=2:\n origext=ss[1]\n elif ss[0] == \"DEF_PREFIX\":\n if len(ss)>=2:\n origprefix=ss[1]\n if isPrintWarning:\n if origversion == \"\": \n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0], indexfile,\n origversion, version), file=sys.stderr)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n #read in other information\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n dumpedidlist=fpin.read(vI[0])\n cntReadByte += vI[0]\n\n idlist = dumpedidlist.split(\"\\n\")\n vI=array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n numRecord = vI[0]\n if numRecord != len(idlist):\n msg = \"{}: numID ({}) != numRecord ({}) for indexfile {} \"\n print(msg.format(sys.argv[0], len(idlist),\n numRecord, indexfile), file=sys.stderr)\n\n sizeRecord_I = (array('B').itemsize + array('I').itemsize +\n array('I').itemsize)\n sizeRecord_L = (array('B').itemsize + array('L').itemsize +\n array('I').itemsize)\n sizeRecord = int(mybase.FloatDivision(size_indexfile - cntReadByte, numRecord))\n if abs(sizeRecord - sizeRecord_I) < abs(sizeRecord - sizeRecord_L):\n vIarray=[array('B'), array('I'), array('I')]\n else:\n vIarray=[array('B'), array('L'), array('I')]\n for i in range(3):\n vIarray[i].fromfile(fpin,numRecord)\n\n lastDBFileIndex = vIarray[0][numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n indexList.append(idlist)\n for i in range(3):\n indexList.append(vIarray[i])\n fpin.close()\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def enumerateIndexes(self):\n return (\n ('UID', 'FieldIndex'),\n ('id', 'FieldIndex'),\n ('Title', 'ZCTextIndex'),\n ('Themes', 'KeywordIndex'),\n ('Description', 'ZCTextIndex'),\n ('Phone', 'ZCTextIndex'),\n ('Email', 'ZCTextIndex'),\n )", "def get_data_idx(self)->list:\n return self.__data_idx", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def lc_index(*args):\n index = []\n x = check_lc_data(args[0])\n i = 0\n for line in args[0].Data.LCData.lc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def __loadIndex( self ):\n\n assert self.mCreateMode == False, \"asked to read from database opened for writing\"\n\n if self.mMethod == \"uncompressed\":\n self.mDatabaseFile = open( self.mDbname, \"r\" )\n elif self.mMethod == \"dictzip\":\n import dictzip\n self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)\n elif self.mMethod == \"lzo\":\n import lzo\n self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )\n elif self.mMethod == \"gzip\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )\n elif self.mMethod == \"zlib\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )\n elif eslf.mMethod == \"bz2\":\n self.mDatabaseFile = bz2.BZ2File( self.mNameDb )\n elif self.mMethod == \"debug\":\n self.mDatabaseFile = Uncompressor( self.mDbname + \".debug\", lambda x: x ) \n\n self.mIndex = {}\n\n for line in open(self.mNameIndex, \"r\"):\n\n if line.startswith(\"#\"): continue\n data = line[:-1].split(\"\\t\")\n\n # index with random access points\n if len(data) > 4:\n (identifier, pos_id, block_size, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n points = map(int, data[3:-1])\n self.mIndex[int(identifier)] = (pos_id, block_size, lsequence, points)\n else:\n (identifier, pos_id, pos_seq, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n self.mIndex[int(identifier)] = (pos_id, pos_seq, lsequence) \n \n self.mIsLoaded = True", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n\n return output", "def get_index_data(dir):\n db = IndexDb(dir)\n result = db.read_from_index()\n return result", "def print_indices(self):\n # Putting the param in the endpoint here because why not\n endpoint = \"/_cat/indices?v\"\n url = self.base_url + endpoint\n r = requests.get(url, headers=self.headers, verify=False)\n r.raise_for_status()\n print(r.text)\n return", "def get_data_as_indices(self, file_name):\n X, Y = [],[]\n org_X, org_Y = [], []\n\n for (words, tags) in read_conll_file(file_name):\n word_indices, word_char_indices = self.get_features(words)\n tag_indices = [self.tag2idx.get(tag) for tag in tags]\n X.append((word_indices,word_char_indices))\n Y.append(tag_indices)\n org_X.append(words)\n org_Y.append(tags)\n return X, Y #, org_X, org_Y - for now don't use", "def load_full_intensifiers():\n return list(pd.read_csv(INTF_ADVS_FULL_SPREADSHEET, index_col=0).index.values)", "def indices(self):\n return self.index.indices", "def get_ids():\n # Filename for SALAMI IA metadata\n metadata_file = os.path.join(\n dpath.SALAMI, 'metadata', 'id_index_internetarchive.csv')\n\n ids = []\n\n with open(metadata_file, \"r\") as rwc_file:\n reader = csv.reader(rwc_file)\n next(reader) #skip header\n for row in reader:\n ids.append(int(row[0]))\n\n return ids", "def InterfaceIndex(self) -> int:", "def InterfaceIndex(self) -> int:", "def get_indices():\n indices = requests.get(\"http://\"+ __srchost__ + \":9200/_stats\").json()['_all']['indices'].keys()\n return indices", "def index(self) -> int:", "def fetchindexed(ad):\n\n # Add the macro to the list of recognized macros.\n ad.AddMacro('.fetchindexed', 3, [ ['','symbol'] ]);\n\n # Define the macro functionality.\n def emitFunction(ad,fp,argument):\n (addr,ixBank,bankName) = ad.Emit_GetAddrAndBank(argument[0]);\n ad.EmitPush(fp,addr,ad.Emit_String(argument[0]['value']),argument[0]['loc']);\n ad.EmitOpcode(fp,ad.InstructionOpcode('+'),'+');\n ad.EmitOpcode(fp,ad.specialInstructions['fetch'] | ixBank,'fetch '+bankName);\n\n ad.EmitFunction['.fetchindexed'] = emitFunction;", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def _load_image_set_index(self):\n # Example path to image set file:\n # self._data_path + /ImageSets/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index", "def _load_image_set_index(self):\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self.cfg.file_path, 'ImageSets', 'Main',\n self.cfg.train_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_index = []\n for x in f.readlines():\n xdata = x.strip().split(' ')\n if len(xdata) == 1 or xdata[-1] == '1':\n image_index.append(xdata[0])\n # image_index = [x.strip() for x in f.readlines()]\n return image_index", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def reader(list, index_list):\r\n\tnewlist = []\r\n\tfor i in index_list:\r\n\t\tnewlist.append(list[i])\r\n\treturn newlist", "def indices(online: bool = False) -> dict:\n return _get_indices(online)", "def _load_image_set_index(self):\n image_index = self._load_annotations().keys()\n return image_index", "def get_items_to_index(self):\n\t\treturn []", "def mainIndices(self):\n return self.i1, self.i2", "def index_object(idxs=None):", "def getFeaturesIndices(self, tag, history, in_data=True):\n pass", "def _load_image_set_index(self):\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path,\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index", "def getIndices(self):\r\n return self._indices", "def set_list_index(self, n=0):\r\n #We have to be in step mode for being able to set the list index\r\n self.write('SOUR1:LIST:MODE STEP') #Have to be in STEP mode in order to select the index\r\n self.write('SOUR1:LIST:IND '+ str(int(n)) )", "def set_list_index(self, n=0):\r\n #We have to be in step mode for being able to set the list index\r\n self.write('SOUR1:LIST:MODE STEP') #Have to be in STEP mode in order to select the index\r\n self.write('SOUR1:LIST:IND '+ str(int(n)) )", "def get_cached_indices(self, start=None, end=None):\n params = {}\n indices = [\n y[\"sample_identifier\"]\n for y in self.mongo_database.cache.find(\n params, {\"_id\": 0, \"sample_identifier\": 1}\n )[start:end]\n ]\n return np.unique(indices).tolist()", "def get_raw_index(self, uid: str) -> Dict[str, Any]:\n return self.http.get(f'{self.config.paths.index}/{uid}')", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def indices(self):\n return range(len(self))", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def ascii_graph_index():\n return MandelbrotController.invoke(OUTPUT_DIRECTORY)", "def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes", "def get_all_index_builders(self):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM host WHERE type = 'Index Builder';\")\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def get_list_comp_ind(gpu):\n if gpu not in [0, 1, 2, 3, -1]:\n print('Your gpu index is not correct, check again')\n quit()\n data_dir = '/home/sr365/Bruce/cvdata'\n ind_list = []\n for file in os.listdir(data_dir):\n #print(file)\n # Check if this is a comp file\n if not file.endswith('.npy') or (not file[:-4].isdigit()):\n print('This file is {}, does not satisfy requirement, continue'.format(file))\n continue\n ind = int(file[:-4])\n #print('current comp ind is {}'.format(ind))\n ind_list.append(ind)\n #print(ind_list)\n length = len(ind_list)\n print(length)\n # If GPU == -1, return all list values\n if gpu == -1:\n return ind_list\n gpu_specific_list = ind_list[gpu*int(length / 4):(gpu+1)*int(length / 4)]\n print(len(gpu_specific_list))\n return gpu_specific_list", "def _read_index_slice(self, *args, **kwargs): # real signature unknown\n pass", "def get_pixel_indices(self, lats, lons):\n raise NotImplementedError(\"MapBase.get_pixel_indices()\")", "def ret_layer_index(file):\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names", "def _get_indices_1(image_set, num_labels=2, num_protected=2):\r\n indices = [[[] for _ in range(num_protected)] for _ in range(num_labels)]\r\n for _, label, cluster, index in image_set:\r\n indices[label][cluster].append(index)\r\n\r\n return indices", "def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes", "def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def test_tile_read_int_index():\n bounds = (\n -11663507.036777973,\n 4715018.0897710975,\n -11663487.927520901,\n 4715037.199028169,\n )\n with rasterio.open(S3_PATH) as src_dst:\n arr, mask = reader.part(src_dst, bounds, 16, 16, indexes=1)\n assert arr.shape == (1, 16, 16)\n assert mask.shape == (16, 16)", "def ids_to_index(self, ids):\n index = (ids[0]*self._div + ids[1])*self.batch_per_file +ids[2]\n return(index)", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def geneIds(self):\n\t\treturn self._dataframe.index.tolist()", "def idd_index(self):\n if self._idd_index is None:\n bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(\n self.idfname, self.iddname, self, commdct=None, block=None\n )\n self._block = block\n self._idd_info = commdct\n self._idd_index = idd_index\n self._idfobjects = bunchdt\n self._model = data\n self._idd_version = versiontuple\n return self._idd_index", "def get_index(self, *args, **dargs):\n pass" ]
[ "0.64467746", "0.6441595", "0.6411123", "0.63643146", "0.6275804", "0.6236173", "0.6204214", "0.6117496", "0.6063966", "0.6062045", "0.60506487", "0.59956235", "0.59590757", "0.59548295", "0.5933144", "0.59210235", "0.59165895", "0.5916158", "0.59155154", "0.588967", "0.588967", "0.58857876", "0.58673733", "0.5852506", "0.5824376", "0.58061713", "0.57989115", "0.5776528", "0.5760583", "0.57564014", "0.57562536", "0.5753244", "0.5736395", "0.56965214", "0.5689962", "0.56724674", "0.5661653", "0.5660726", "0.5658834", "0.565878", "0.5638217", "0.56351405", "0.56250155", "0.5624576", "0.5623928", "0.5623851", "0.56197876", "0.55968964", "0.55948716", "0.5594427", "0.5594327", "0.55931777", "0.559234", "0.557944", "0.5574476", "0.5565033", "0.5559461", "0.5557373", "0.5557373", "0.5555862", "0.5550844", "0.55468225", "0.55451113", "0.5543386", "0.553907", "0.5537786", "0.55318373", "0.55300033", "0.55186105", "0.5513191", "0.5511165", "0.5508972", "0.55075926", "0.5495908", "0.5495764", "0.5487836", "0.548545", "0.548545", "0.54854184", "0.5482094", "0.5481723", "0.5479118", "0.5476585", "0.54750645", "0.54739773", "0.54659235", "0.5465006", "0.5451255", "0.5449106", "0.54399997", "0.5430566", "0.5424927", "0.54215187", "0.54211706", "0.5419474", "0.54019177", "0.5399567", "0.53974366", "0.5395399", "0.5388056", "0.5382585" ]
0.0
-1
Size of the library
def __len__(self): return len(self.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_size(self):\n\t\treturn 4*self.version + 17", "def get_size(self):", "def getSize(self) -> long:\n ...", "def get_size(self):\n ...", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def calc_size(self):\r\n pass", "def size(self):\n return self.new_dll.length", "def getSize(self):\n return 1", "def getSize(self):\n return 1", "def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S", "def size(self):\n pass", "def size(self):\n pass", "def size(self):\n pass", "def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)", "def _size(self):\n raise NotImplementedError", "def size(self):\r\n return self.info().size", "def get_size(self):\n raise NotImplementedError", "def get_size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\r\n return self._size", "def getSize(self):\n assert False", "def size(self) -> int:", "def getSize(language=None):", "def size(self):", "def size(self):\n return self.__size", "def size(self):\n return self._size", "def __get_size(self):\n return self.__size", "def size_bytes(self):\n size_words = self.size_words()\n if size_words is None:\n return None\n return size_words * intnat.sizeof", "def get_size(self):\r\n return self.__size", "def s_size(self) -> Size:\n pass", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def size(self): \r\n pass", "def size(self):\n\t\treturn self._size", "def size(self):\r\n return self.size.data", "def get_size(self):\r\n return self._size", "def getsize(self):\n return self.__size", "def get_insternal_size(self):\n return (\n sys.getsizeof(self.theta) +\n sys.getsizeof(self.num_buckets) +\n sys.getsizeof(self.k) +\n sys.getsizeof(self.fp_size) +\n sys.getsizeof(self.max_iter) +\n sys.getsizeof(self.bucket_size)\n )", "def __sizeof__(self): # real signature unknown; restored from __doc__\n pass", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.size_number", "def get_size(self):\n return self.__size", "def get_size(self):\n return self.__size", "def get_size(self):\n\t\tpath =os.path.join(self.path, self.init_str)\n\t\ttry:\n\t\t\tself.size = os.path.getsize(path)\n\t\texcept :\n\t\t\tself.size = 0", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def get_size(self):\r\n\r\n return self._size", "def getsize(self):\n try :\n return self.size\n except:\n raise ReferenceError", "def size(self):\r\n return self._size", "def size_bytes(self) -> int:\n return pulumi.get(self, \"size_bytes\")", "def pack_size(self) -> int:\n return self._pack_size", "def getSize(self):\n return self.__size", "def size(self):\n return self.size", "def size(self):\n return self.size", "def size(self):\n return self.size", "def size(self) -> int:\n size = self.da.length()\n return size", "def get_size(self):\n return self._size", "def get_size(self):\n return self._size", "def size(self):\r\n return self.__length", "def getSize(self):\n\n return self.size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size():\n return int(os.environ['WORLD_SIZE'])", "def size() -> int:\n ...", "def update_size(self):\n return 3 + self.memory_unit_size", "def __len__(self):\n return self.size_", "def size(self):\n raise NotImplementedError", "def size(self):\r\n raise NotImplementedError", "def get_size(self) -> int:\n return self.__size", "def size(self):\n return self.__length", "def __len__(self):\n return int(self.size._value)", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def size(self):\n return self.data.size", "def getSize(self):\r\n return self.size", "def size(self):\n # Your implementation here", "def getsize(self):\n return os.path.getsize(self.path)", "def getSize(self):\n return self.bf.memory()", "async def size(self) -> int:", "def get_size(self):\n return self._data_size" ]
[ "0.80670005", "0.7656748", "0.76485074", "0.74939597", "0.7419034", "0.7419034", "0.74108195", "0.7401268", "0.73442686", "0.7325931", "0.7315922", "0.72330016", "0.72330016", "0.72330016", "0.72193474", "0.7187202", "0.71814", "0.7137367", "0.7112294", "0.70785505", "0.705915", "0.70406914", "0.7039407", "0.70293564", "0.7012447", "0.70078814", "0.6992316", "0.6986908", "0.6984256", "0.6983272", "0.6981556", "0.6946831", "0.69364524", "0.693278", "0.6918581", "0.6904323", "0.69022197", "0.69001335", "0.6896952", "0.6895359", "0.6895359", "0.6895359", "0.6895359", "0.6895359", "0.68916184", "0.6877433", "0.6877433", "0.68614435", "0.68583566", "0.68583566", "0.68583566", "0.68490106", "0.68424946", "0.68382025", "0.6820164", "0.68194675", "0.6813785", "0.6810381", "0.68037236", "0.68037236", "0.68037236", "0.6790706", "0.67899936", "0.67899936", "0.67896074", "0.67881185", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.67805", "0.6775249", "0.6774947", "0.6774668", "0.6768291", "0.6764591", "0.6760451", "0.6756851", "0.6749277", "0.6735802", "0.673489", "0.67215383", "0.67088807", "0.6705359", "0.67039186", "0.6698051", "0.669453", "0.6688162" ]
0.0
-1
Make this object like a dictionary and load one or multiple filters
def __getitem__(self, name): with self as s: try: f = s._load_filter(name) except TypeError: f = [s._load_filter(k) for k in name] return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def get_filters(self):", "def filter(self, filter_dict):\n pass", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def filter(self, filters):", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def filters(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"input must be a dictionary\")\n\n self._filters = value", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone", "def load_filter_dict(reload=True):\n\n # Get location of filter.json\n json_directory = pathlib.Path(__file__).parent\n json_file = pathlib.Path.joinpath(json_directory, \"filters.json\")\n\n # Reload the filters JSON file if present and requested\n if reload and json_file.is_file():\n with open(json_file, \"r\") as fp:\n return json.load(fp)\n\n # Get html from main filter page, ft=4 ensures all filters are present\n hdr = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) \"\n \"Chrome/23.0.1271.64 Safari/537.11\"\n }\n url = \"https://finviz.com/screener.ashx?ft=4\"\n req = urllib.request.Request(url, headers=hdr)\n with urllib.request.urlopen(req) as response:\n html = response.read().decode(\"utf-8\")\n\n # Parse html and locate table we are interested in.\n # Use one of the text values and get the parent table from that\n bs = BeautifulSoup(html, \"html.parser\")\n filters_table = None\n for td in bs.find_all(\"td\"):\n if td.get_text().strip() == \"Exchange\":\n filters_table = td.find_parent(\"table\")\n if filters_table is None:\n raise Exception(\"Could not locate filter parameters\")\n\n # Delete all div tags, we don't need them\n for div in filters_table.find_all(\"div\"):\n div.decompose()\n\n # Populate dict with filtering options and corresponding filter tags\n filter_dict = {}\n td_list = filters_table.find_all(\"td\")\n\n for i in range(0, len(td_list) - 2, 2):\n current_dict = {}\n if td_list[i].get_text().strip() == \"\":\n continue\n\n # Even td elements contain filter name (as shown on web page)\n filter_text = td_list[i].get_text().strip()\n\n # Odd td elements contain the filter tag and options\n selections = td_list[i + 1].find(\"select\")\n filter_name = selections.get(\"data-filter\").strip()\n\n # Store filter options for current filter\n options = selections.find_all(\"option\", {\"value\": True})\n for opt in options:\n # Encoded filter string\n value = opt.get(\"value\").strip()\n\n # String shown in pull-down menu\n text = opt.get_text()\n\n # Filter out unwanted items\n if value is None or \"Elite\" in text:\n continue\n\n # Make filter string and store in dict\n current_dict[text] = f\"{filter_name}_{value}\"\n\n # Store current filter dict\n filter_dict[filter_text] = current_dict\n\n # Save filter dict to finviz directory\n try:\n with open(json_file, \"w\") as fp:\n json.dump(filter_dict, fp)\n except Exception as e:\n print(e)\n print(\"Unable to write to file{}\".format(json_file))\n\n return filter_dict", "def load_filters(self):\n buffer_dict = dict(self.named_buffers())\n n = 0\n\n for k in self.phi_f.keys():\n if type(k) != str:\n self.phi_f[k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1", "def get_default_filters_dict(class_of_filters,measure,**filters):\n\tif \"datadrop__in\" in filters:\n\t\tfilters.pop(\"datadrop__in\")\n\tif class_of_filters==\"short_student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'All Lower':{'upn__wide_banding':\"L\"},\n\t\t\t'All Middle':{'upn__wide_banding':\"M\"},\n\t\t\t'All Higher':{'upn__wide_banding':\"H\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"}\n\t\t\t}\n\telif class_of_filters==\"student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':\"Lx\"},\n\t\t\t'Lower':{'upn__narrow_banding':\"L\"},\n\t\t\t'Middle':{'upn__narrow_banding':\"M\"},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':\"Ml\"},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':\"Mh\"},\n\t\t\t'Higher':{'upn__narrow_banding':\"H\"},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':\"Hx\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"},\n\t\t\t'Low Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\"},\n\t\t\t'Middle Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\"},\n\t\t\t'High Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\"},\n\t\t\t'Low Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\"},\n\t\t\t'Middle Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'Low PP Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Middle PP Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'High PP Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Low PP Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'Middle PP Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'High PP Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t}\n\telif class_of_filters==\"att8bucket\":\n\t\treturnDict= {'All':{},\n\t\t\t'Maths':{'subject__attainment8bucket':'ma'},\n\t\t\t'English':{'subject__attainment8bucket':'en'},\n\t\t\t'EBacc':{'subject__attainment8bucket':'eb'},\n\t\t\t'Open':{'subject__attainment8bucket':'op'},\n\t\t\t}\n\telif class_of_filters==\"banding\":\n\t\treturnDict= {'All':{},\n\t\t\t'All Lower':{'upn__wide_banding':'L'},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':'Lx'},\n\t\t\t'Lower':{'upn__narrow_banding':'L'},\n\t\t\t'All Middle':{'upn__wide_banding':'M'},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':'Ml'},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':'Mh'},\n\t\t\t'All Higher':{'upn__wide_banding':'H'},\n\t\t\t'Higher':{'upn__narrow_banding':'H'},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':'Hx'},\n\t\t\t'No Banding':{'upn__wide_banding':'N'},\n\t\t\t}\n\telif class_of_filters==\"subject_blocks\":\n\t\treturnDict= {'All':{},\n\t\t\t'Core':{'subject__option_subject':False},\n\t\t\t'Option':{'subject__option_subject':True},\n\t\t\t'EBacc':{'subject__ebacc_subject':True},\n\t\t\t'Non-EBacc':{'subject__ebacc_subject':False},\n\t\t\t}\n\telif \"staff\" in class_of_filters:\n\t\tfilters.pop('datadrop',None)\n\t\tfilters.pop('datadrop__name',None)\n\t\tif \"classgroup\" in filters:\n\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\tfilters.pop('classgroup',None)\n\t\treturnDict={'All':{}}\n\t\tstaff_set=set(classgroup.objects.filter(**filters).exclude(staff=\"---\")\n\t\t\t.values_list('staff').distinct())\n\t\tstaff_list=[]\n\t\tfor st in staff_set:\n\t\t\tfor s in st:\n\t\t\t\tstaff_list.append(s)\n\t\tstaff_list.sort()\n\t\tfor code in staff_list:\n\t\t\tclasses=classgroup.objects.filter(staff=code,**filters).distinct()\n\t\t\tif \"short\" not in class_of_filters:\n\t\t\t\tfor cl in classes:\n\t\t\t\t\treturnDict[code+\" \"+cl.class_code]={\"classgroup\":cl}\n\t\t\treturnDict['All ' +code]={\"classgroup__in\":classes}\n\telse:\n\t\t\"\"\"if not a fixed set of filters, populate from objects in db based on\n\t\tclass, code specific to each class removes invalid filters and replaces\n\t\tthem with valid ones where possible\"\"\"\n\t\tif class_of_filters==\"classgroup\" :\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"subject\" or class_of_filters==\"faculty\":\n\t\t\tif \"subject\" in filters:\n\t\t\t\tfilters['name']=filters['subject'].name\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['name']=filters['subject__name']\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tif \t\"datadrop__name\" in filters:\n\t\t\t\tfilters['name']=filters['datadrop__name']\n\t\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"datadrop\" in filters:\n\t\t\t\tfilters['id']=filters['datadrop'].id\n\t\t\t\tfilters.pop('datadrop',None)\n\t\t\tif \"subject\" in filters or \"faculty\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject=filters['subject'])\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject__name__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['cohort']=filters['classgroup'].cohort\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"yeargroup\" :\n\t\t\tif \"subject__name\" in filters and measure==\"progress\":\n\t\t\t\tfilters['subject__in']=subject.objects.filter(\n\t\t\t\t\tname__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"cohort\" in filters and measure==\"progress\":\n\t\t\t\tfilters['cohort']=filters['cohort'].cohort\n\t\t\tfilters.pop('subject',None)\n\n\t\t#get queryset or set of objects from db based on filters\n\t\tif class_of_filters in ['yeargroup','datadrop','subject',\n\t\t'classgroup']:\n\t\t\tqset=apps.get_model('analysis',class_of_filters).\\\n\t\t\t\tobjects.filter(**filters)\n\t\telif class_of_filters==\"faculty\":\n\t\t\tqset=['Maths','English','Science','Humanities','MFL',\n\t\t\t\t'Arts','Technology','IT',None]\n\t\t\tfor sub in subject.objects.filter(**filters):\n\t\t\t\tif sub.faculty not in qset:\n\t\t\t\t\tqset.add(sub.faculty)\n\n\t\t#sorting set for each class\n\t\tif class_of_filters==\"yeargroup\":\n\t\t\tclass_of_filters=\"subject__cohort\"\n\t\t\tqset=qset.order_by('cohort')\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tqset=qset.order_by('cohort','-date')\n\t\telif class_of_filters==\"subject\":\n\t\t\tqset=qset.order_by('name','faculty')\n\t\telif class_of_filters==\"classgroup\":\n\t\t\tqset=qset.order_by('class_code')\n\t\telif class_of_filters==\"faculty\":\n\t\t\tclass_of_filters=\"subject__faculty\"\n\t\t#populate returning dictionary with set/queryset\n\t\treturnDict={}\n\t\treturnDict['All']={}\n\t\tif class_of_filters==\"subject\":\n\t\t\tfor q in qset:\n\t\t\t\treturnDict[q.name]={'subject__name':q.name}\n\t\telse:\n\t\t\tfor q in qset:\n\t\t\t\tif q is None and \"faculty\" in class_of_filters:\n\t\t\t\t\treturnDict[\"Other\"]={class_of_filters:q}\n\t\t\t\telse:\n\t\t\t\t\treturnDict[q.__str__()]={class_of_filters:q}\n\tif measure in avg_headline_measures or measure in pct_headline_measures:\n\t\tfor outerkey,dict in returnDict.items():\n\t\t\tdict=clean_filters(dict)\n\treturn returnDict", "def get_filters(self) -> dict:\n return self._filters", "def create_filters(id=None, title=None, category=None, priority=None,\n status=None, place=None, description=None, name=None):\n\n filters = {}\n if id:\n filters['id'] = id\n if title:\n filters['title'] = title\n if category:\n filters['category'] = category\n if priority:\n filters['priority'] = priority\n if status:\n filters['status'] = status\n if place:\n filters['place'] = place\n if description:\n filters['description'] = description\n if name:\n filters['name'] = name\n return filters", "def get(cls, filters: Dict = None):\n if filters is None:\n filters = {}\n\n data = DATABASE_CONNECTION.get(cls.__name__)\n\n for k, v in filters.items():\n data = [row for row in data if row[k] in v]\n\n res = [cls.deserialize(row) for row in data]\n\n return res", "def get_special_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Function\"]\n filters.setdefault(function, {})\n filters[function][\"description\"] = row[\"Description\"]\n filters[function][\"parameters\"] = row[\"Parameters\"].split(\",\")\n filters[function][\"example\"] = row[\"Example\"]\n return filters", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def from_dict(cls, data):\n return cls(\n filter_id=data[\"Filter\"],\n name=data[\"Name\"],\n admin=data[\"Admin\"],\n action=data[\"Action\"],\n input_port=data[\"Input\"],\n output_port=data[\"Output\"],\n classifiers=data[\"Classifiers\"],\n packet_processing=data[\"Packet Processing\"],\n )", "def filters(self, filters):\n\n self._filters = filters", "def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs", "def __init__( self, filters=None, prx=None ):\n\n if filters is None:\n if prx is None:\n\n self._filter_list = rts2_wwwapi.rts2comm().get_filters()\n\n elif type(filters) == list:\n self._filter_list = filters\n\n elif type(filters) == dict:\n raise TypeError(\"Filters are should not be a dict, it probably should be None\")\n # this assumes that the keywords of the dictionary are \n # the fitler names and the value is the filter number. \n\n\n #sort by filter number and reverse look up. \n # this doesn't work in python3\n #for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):\n #self._filter_list.append( key )\n\n elif type(filters) == str or type(filters) == unicode:\n self._filter_list = str(filters).split()\n\n else:\n raise TypeError(\"Unexpected filter type {}, type must be string, unicode, list or dict\".format(type(filters)))", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def __init__(self):\n self._sections = {}\n self._filters = []\n self._id = 0", "def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def __init__(self, filters, event_file_path, device_name):\n super().__init__(device_name=device_name)\n self._filters_dict = {}\n self.event_file_path = event_file_path\n self.load_filters(filters)", "def get_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n filter_id = row[\"Filter Column\"]\n filters.setdefault(filter_id, {})\n filters[filter_id][\"results\"] = row[\"Result\"].split(\", \")\n filters[filter_id][\"type\"] = row[\"Type\"]\n filters[filter_id][\"description\"] = ''.join(row[\"Description\"])\n return filters", "def __filter(self, obj):\n filtered_keys = ['file_path', \"Data\", \"raw_block_data\", \"Reserved1\", \"raw\"]\n if isinstance(obj, list):\n return dict([t for t in obj if t[0] not in filtered_keys])\n elif isinstance(obj, dict):\n return {k: self.__filter(v) for k, v in obj.items()\n if k not in filtered_keys}\n else:\n return dict(obj)", "def filters(self):\n return self.__filters", "def get_params(self):\n outputs = ['sample',\n 'ratio_params',\n 'despike_params',\n 'autorange_params',\n 'bkgcorrect_params']\n\n out = {}\n for o in outputs:\n out[o] = getattr(self, o)\n\n out['filter_params'] = self.filt.params\n out['filter_sequence'] = self.filt.sequence\n out['filter_used'] = self.filt.make_keydict()\n\n return out", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def get_filters(self):\n location_id = self.cleaned_data.get('location_id')\n if (\n location_id\n and user_can_access_location_id(self.domain, self.user, location_id)\n ):\n location_ids = [location_id]\n else:\n location_ids = []\n\n filters = {\n 'location_ids': location_ids,\n 'selected_location_only': self.cleaned_data.get('selected_location_only', False)\n }\n location_status_active = self.cleaned_data.get('location_status_active', None)\n\n if location_status_active is not None:\n filters['is_archived'] = (not location_status_active)\n\n return filters", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def filter_features(self):\n return {key: {k: v for k, v in value.items() if k in {NAME, TYPE, ACTIVE}} for key, value in self.to_dict().items()}", "def test_categorical_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def display_representation(self, filters=None):\n attrs = OrderedDict()\n for field in get_fields(type(self)):\n name = field.field_name\n value = getattr(self, name)\n attrs[name] = value\n \n if filters is None:\n return attrs\n elif isinstance(filters, list):\n filter_attrs = OrderedDict()\n for attr in filters:\n if attr in attrs:\n filter_attrs[attr] = attrs[attr]\n return filter_attrs", "def filters(self):\n return self._filters", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def __init__(self, required={}, excluded={}, exact_match=True, DEBUG=False):\n self.logger = get_logger(name=\"Filter\", DEBUG=DEBUG)\n self.required = None\n self.excluded = None\n if isinstance(required, dict):\n self.required = required\n else:\n self.required = {}\n self.logger.error(msg=\"Required is not a dictionary!\")\n if isinstance(excluded, dict):\n self.excluded = excluded\n else:\n self.excluded = {}\n self.logger.error(msg=\"Excluded is not a dictionary!\")\n self.exact_match = exact_match", "def filters(self):\n return self.England_filter", "def __generateFilter(self, selectionPairs):\n filter = None\n for (selSyntax, argSyntax) in selectionPairs:\n if self._arg.has_key(argSyntax) and self._arg[argSyntax] != '':\n if filter is None:\n filter = {}\n filter[selSyntax] = self._arg[argSyntax]\n \n return filter", "def filters(self):\n\t\treturn self.local_filter", "def process_filters(self, filters, queryset, view):\n return filters", "def _load_filter(self, fname, **kwargs):\n with self as s:\n return LickIndex(fname, s._content[fname])", "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters", "def _add_filter(self, type, args):\r\n if isinstance(args, pylastica.filter.abstractfilter.AbstractFilter):\r\n args = args.to_dict()\r\n assert isinstance(args, dict), \"Invalid parameter. Must be a dict or instance of implementation of AbstractFilter.\"\r\n var_name = '_' + type\r\n self.__dict__[var_name].append(args)\r\n return self", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def _init_from_dictionary(self, from_dictionary, template_model=None):\n\n if not isinstance(from_dictionary, dict):\n raise TypeError(\"from_dictionary must be of type dict, %s \\\n provided\" % from_dictionary.__class__.__name__)\n rewrite_map = None\n if template_model is not None:\n\n rewrite_map = template_model.attribute_rewrite_reverse_map()\n\n if not isinstance(template_model, prestans.types.DataCollection):\n raise TypeError(\"template_model should be a prestans model in AttributeFilter \\\n init (from dictionary), %s provided\" % template_model.__class__.__name__)\n\n for key, value in from_dictionary.iteritems():\n\n target_key = key\n\n #:\n #: Minification support\n #:\n if rewrite_map is not None:\n target_key = rewrite_map[key]\n\n #:\n #: Check to see we can work with the value\n #:\n if not isinstance(value, (bool, dict)):\n raise TypeError(\"AttributeFilter input for key %s must be \\\n boolean or dict, %s provided\" % (key, value.__class__.__name__))\n\n #:\n #: Ensure that the key exists in the template model\n #:\n if template_model is not None and not template_model.has_key(target_key):\n\n unwanted_keys = list()\n unwanted_keys.append(target_key)\n raise prestans.exception.AttributeFilterDiffers(unwanted_keys)\n\n #:\n #: Either keep the value of wrap it up with AttributeFilter\n #:\n if isinstance(value, bool):\n setattr(self, target_key, value)\n elif isinstance(value, dict):\n\n sub_map = None\n if template_model is not None:\n\n sub_map = getattr(template_model, target_key)\n\n #: prestans Array support\n if isinstance(sub_map, prestans.types.Array):\n sub_map = sub_map.element_template\n\n setattr(self, target_key, \\\n AttributeFilter(from_dictionary=value, template_model=sub_map))", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def get_filter(cls, filter, odata=False):\n\n if filter:\n #www.odata.org/libraries\n if odata:\n lst_filter = []\n if 'and' in filter:\n tmp_filters = filter.split('and')\n else:\n tmp_filters = [filter, ]\n for tmp_filter in tmp_filters:\n if 'eq' in tmp_filter:\n tmp_filter = tmp_filter.replace('eq', '=')\n elif 'gt' in tmp_filter:\n tmp_filter = tmp_filter.raplace('gt', '>')\n elif 'lt' in tmp_filter:\n tmp_filter = tmp_filter.replace('lt', '>')\n lst_filter.append(tmp_filter.split())\n return lst_filter\n else:\n dict_filter = {}\n for lst_attribut in filter.split(','):\n attribut = lst_attribut.split(':')\n if \"/\" in attribut[1]:\n dict_filter[attribut[0]] = attribut[1].split('/')\n else:\n if attribut[1] == 'false':\n dict_filter[attribut[0]] = False\n elif attribut[1] == 'true':\n dict_filter[attribut[0]] = True\n else:\n dict_filter[attribut[0]] = attribut[1]\n return dict_filter\n return False", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def filter_config(resource, first_pub_pair=None, last_pub_pair=None):\n if resource in constants.ALLOWS_FILTER:\n # Process eventual filter parameters:\n if first_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_FIRST, *first_pub_pair)\n elif last_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_LAST, *last_pub_pair)\n else: # Default is 'empty' filter\n a_filter = query_client.Filter()\n else:\n a_filter = None\n\n return {'a_filter': a_filter}", "def get_default_filters(self, **resources):\r\n return dict((k, (v, False)) for k, v in resources.items()\r\n if k in self._meta.fields)", "def test_quantitative_filter_serialization(self):\n result = serializers.FilterSerializer(self.internal_filter).data\n self.assertDictEqual(result, self.external_filter)", "def __init__(self, kind=None, filters={}, _app=None, keys_only=False,\n compile=True, cursor=None, _namespace=None):\n if kind is not None:\n datastore_types.ValidateString(kind, 'kind',\n datastore_errors.BadArgumentError)\n\n self.__kind = kind\n self.__orderings = []\n self.__filter_order = {}\n self.update(filters)\n\n self.__app = datastore_types.ResolveAppIdNamespace(_app,\n _namespace).to_encoded()\n self.__keys_only = keys_only\n self.__compile = compile\n self.__cursor = cursor", "def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object", "def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))", "def condition_filters(self):\r\n return filters.Filters(self)", "def setFilters(self, filters):\n self.__filters = filters", "def __get_data(self, filters):\n if not os.path.exists(CACHE_FILE):\n raise DataNotScrappedError()\n df = pd.read_csv(CACHE_FILE)\n if not filters:\n return list(df.T.to_dict().values())\n\n filtered_df = df[df['name'] == filters][['category', 'name']]\n\n return list(filtered_df.T.to_dict().values())", "def filter(self, filter_dict):\n self.result = [x for x in self.result if all(str(x[y]) == z or (hasattr(x[y], \"__iter__\") and (z in str(x[y]) or any(z in str(d.values) for d in x[y] if isinstance(d, dict)))) for y,z in filter_dict.items())] \n\n return self", "def to_dict(self):\n result = {}\n\n if hasattr(super(EnhancedWatermarkFilter, self), \"to_dict\"):\n result = super(EnhancedWatermarkFilter, self).to_dict()\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def prepare_advanced_filter(filter_options: str) -> dict:\n import ast\n import json\n\n if filter_options:\n if os.path.isfile(filter_options):\n with open(filter_options, 'r') as f:\n filter_options = json.load(f)\n # advanced filter do not specify collections!\n if 'collections' in filter_options:\n del filter_options['collections']\n else:\n filter_options = ast.literal_eval(filter_options)\n return filter_options\n return None", "def _create_filter_session(self,table_name):\n # import pdb;pdb.set_trace()\n if not self.HEADER_NAME in session:\n session[self.HEADER_NAME] = {}\n if not table_name in session[self.HEADER_NAME]:\n session[self.HEADER_NAME][table_name] = {self.FILTERS_NAME:{},self.ORDERS_NAME:[]}\n \n return session[self.HEADER_NAME][table_name]", "def __init__ (self, config_yaml):\n configdef = yaml.safe_load(io.StringIO(config_yaml)) \n\n if \"filters\" not in configdef:\n configdef = dict(filters=[configdef])\n\n self._configs = []\n\n for definition in configdef[\"filters\"]:\n config = Bunch( valid_from = None\n , volume_follows = False\n , copy_last_price = False\n , copy_last_volume = False\n , qualifier_include_filters = []\n , qualifier_exclude_filters = []\n , exclude_filters = [] )\n\n if \"filter\" in definition and definition[\"filter\"] != None:\n for exclude_filter in definition[\"filter\"]:\n parts = exclude_filter.split(\",\")\n if parts[0] == \"floor\":\n config.exclude_filters.append(FloorFilter(float(parts[1]), \"price\"))\n elif parts[0] == \"cap\":\n config.exclude_filters.append(CapFilter(float(parts[1]), \"price\")) \n elif parts[0] == \"step\":\n config.exclude_filters.append(StepFilter(int(parts[1]), float(parts[2]), float(parts[3]), \"price\"))\n else:\n raise Exception(\"Unknown filter (%s)\" % (parts[0])) \n \n if \"remove\" in definition and definition[\"remove\"] != None:\n for exclude_filter in definition[\"remove\"]:\n config.qualifier_exclude_filters.append(QualifierFilter(exclude_filter))\n \n if \"allow\" in definition and definition[\"allow\"] != None:\n for include_filter in definition[\"allow\"]:\n config.qualifier_include_filters.append(QualifierFilter(include_filter))\n\n if \"volFollows\" in definition: config.volume_follows = definition[\"volFollows\"] \n if \"copyLast\" in definition and definition[\"copyLast\"] != None:\n config.copy_last_price = definition[\"copyLast\"] \n config.copy_last_volume = definition[\"copyLast\"] \n if \"volumeLimit\" in definition and definition[\"volumeLimit\"] != None:\n config.exclude_filters.append(CapFilter(definition[\"volumeLimit\"], \"volume\"))\n if \"validFrom\" in definition and definition[\"validFrom\"] != None:\n valid_from = datetime.datetime.strptime(definition[\"validFrom\"], \"%Y-%m-%d %H:%M:%S\")\n valid_from.replace(tzinfo=pytz.utc)\n config.valid_from = common.Time.tick(valid_from)\n if \"weekTimezone\" in definition and definition[\"weekTimezone\"] != None:\n config.exclude_filters.append(WeekendFilter(definition[\"weekTimezone\"], definition[\"weekEnd\"], definition[\"weekStart\"]))\n\n self._configs.append(config)\n \n self._config_index = 0\n self._config_count = len(self._configs)", "def filter(self, filtered=None, **kwargs):\n \"\"\"whose attributes match the given keyword arguments.\n \"\"\"\n if filtered is None:\n filtered = self._objects\n try:\n key, value = kwargs.popitem()\n except KeyError:\n # We're out of filters, return\n return filtered\n\n def get_match(obj):\n return key in obj and obj.get(key) == value\n\n return self.filter(filtered=filter(get_match, filtered), **kwargs)", "def to_pickle(self, filter=[]):\n if not filter:\n lexicon_dict = {\n 'lemma_dict': self.lemma_dict,\n 'tag_dict': self.tag_dict,\n 'suffix_dict': self.suffix_dict,\n 'word_tag_dict': self.word_tag_dict, }\n else:\n lexicon_dict = {}\n if 'lemma_dict' in filter:\n lexicon_dict['lemma_dict'] = self.lemma_dict\n if 'tag_dict' in filter:\n lexicon_dict['tag_dict'] = self.tag_dict\n if 'suffix_dict' in filter:\n lexicon_dict['suffix_dict'] = self.suffix_dict\n if 'word_tag_dict' in filter:\n lexicon_dict['word_tag_dict'] = self.word_tag_dict\n return pickle.dumps(lexicon_dict)", "def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query" ]
[ "0.69429255", "0.68280834", "0.678945", "0.67390245", "0.6707644", "0.6680645", "0.66653675", "0.664991", "0.66167235", "0.6582854", "0.6568152", "0.6519853", "0.6499183", "0.64964235", "0.64200693", "0.6409539", "0.6404884", "0.63648516", "0.6337536", "0.62964696", "0.62726504", "0.62343895", "0.6232529", "0.62193334", "0.61978686", "0.61801714", "0.61617017", "0.6150349", "0.6150145", "0.61168945", "0.6106384", "0.6105307", "0.6104092", "0.6087968", "0.60667264", "0.6059978", "0.6058376", "0.60390323", "0.6036272", "0.6031177", "0.6027145", "0.5982385", "0.5971585", "0.59682506", "0.595332", "0.59381557", "0.5938138", "0.5921962", "0.59080243", "0.5905373", "0.5883838", "0.5873565", "0.5868219", "0.58611864", "0.58381754", "0.58312505", "0.5798071", "0.57853913", "0.5785204", "0.5779823", "0.5767842", "0.57577276", "0.57461137", "0.57364947", "0.5720399", "0.5703775", "0.5686047", "0.56782293", "0.5671084", "0.56685644", "0.5640796", "0.56247866", "0.5624731", "0.56201774", "0.5619854", "0.5619854", "0.56193", "0.5615682", "0.5613405", "0.56087637", "0.56046367", "0.56030416", "0.5601035", "0.55977553", "0.5567843", "0.55548066", "0.55373377", "0.55310816", "0.54912096", "0.54905564", "0.54874164", "0.5486503", "0.547673", "0.54708916", "0.5460965", "0.5456756", "0.5453817", "0.5450716" ]
0.6210194
24
Load a given filter from the library
def _load_filter(self, fname, **kwargs): with self as s: return LickIndex(fname, s._content[fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n fnode = ftab.get_node('/filters/' + fname.decode('utf8'))\n else:\n fnode = ftab.get_node('/filters/' + fname)\n flamb = fnode[:]['WAVELENGTH']\n transmit = fnode[:]['THROUGHPUT']\n dtype = 'photon'\n unit = None\n\n attrs = fnode.attrs\n if 'DETECTOR' in attrs:\n dtype = attrs['DETECTOR']\n if 'WAVELENGTH_UNIT' in attrs:\n unit = attrs['WAVELENGTH_UNIT']\n\n fil = UnitFilter(flamb, transmit, name=fnode.name,\n dtype=dtype, unit=unit)\n\n if interp & (lamb is not None):\n fil = fil.reinterp(lamb)\n return fil", "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def _load_filter(self, fname, **kwargs):\n with self as current_lib:\n return UnitLickIndex(fname, current_lib._content[fname])", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def load_filter_file(self, filter_path):\n logger.debug(\"Adding filter file {}\", filter_path)\n try:\n with open(filter_path, \"r\") as filter_file:\n try:\n json_filter_data = json.load(filter_file)\n except Exception as err:\n msg = \"Unable to parse filter file {} as a json file. {!r}\".format(\n filter_path, err)\n logger.debug(msg)\n raise errors.ParserError(msg)\n except IOError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))\n\n if \"version\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'version' key.\".format(\n filter_path))\n\n if \"filters\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'filters' key.\".format(\n filter_path))\n\n if not isinstance(json_filter_data[\"version\"], dict):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'version' entry to be a dictionary \"\n \"but instead its a {}.\".format(filter_path,\n type(json_filter_data[\"version\"])))\n\n version_info = json_filter_data[\"version\"]\n\n if \"major\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'major' key in 'version' value.\".format(filter_path))\n\n if \"minor\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'minor' key in 'version' value.\".format(filter_path))\n\n if not isinstance(version_info[\"major\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for major version found {} instead.\".format(\n filter_path, type(version_info[\"major\"])))\n\n if not isinstance(version_info[\"minor\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for minor version found {} instead.\".format(\n filter_path, type(version_info[\"minor\"])))\n\n if version_info[\"major\"] != FILTER_JSON_FORMAT_MAJOR_VERSION:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Found unexpected major version in JSON filter file.\".format(\n filter_path))\n\n self._add_filters(json_filter_data[\"filters\"], filter_path)", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def load(self, source: Union[str, Any], key: str) -> None: # type: ignore\n self._logger.info(f\"Loading filter policy model from {source} to {key}\")\n if 'torch' in key:\n model = load_torch_model(source,'filter',device=self._config.device)\n else:\n model = load_model(source, key, self._config.use_remote_models)\n self._items[key] = {\n \"model\": model\n }", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]", "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def __init__(self, filter: ghidra.program.util.ProgramMergeFilter):\n ...", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def load_filters(self, filters):\n if not filters:\n return\n if not isinstance(filters, list):\n raise errors.ParserError(\"Expecting 'filters' value to be a list \"\n \"but instead its a {}.\".format(type(filters)))\n for filter_path in filters:\n if not os.path.exists(filter_path):\n raise errors.ParserError(\n \"Filter path '{}' doesn't exist\".format(filter_path))\n elif os.path.isdir(filter_path):\n self._load_filter_directory(filter_path)\n else:\n self.load_filter_file(filter_path)", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'tolerance'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def _load_filter_directory(self, filter_path):\n try:\n logger.debug(\"Adding filters from directory {}\", filter_path)\n for filter_file in os.listdir(filter_path):\n if filter_file.endswith(\".json\"):\n filter_file_path = os.path.join(filter_path, filter_file)\n self.load_filter_file(filter_file_path)\n else:\n logger.debug(\"Skipping file {} missing .json extension\", filter_path)\n except OSError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))", "def testUsingFilterTool(self):\n pass", "def initialize(module_name):\n \n global filter_function\n global debug\n \n # Get the level of debug\n debug = int(rule_manager.get_property(None, module_name, 'debug'))\n\n filter_function = process_filters.initialize_filter(module_name)\n\n return", "def __init__(self, filters, event_file_path, device_name):\n super().__init__(device_name=device_name)\n self._filters_dict = {}\n self.event_file_path = event_file_path\n self.load_filters(filters)", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def LoadSourceFilter(coverable_file_name):\n \n with open(coverable_file_name, \"r\") as cov_file:\n file_list = [line.strip() for line in cov_file.readlines()]\n return SourceFilter(file_list)", "def load_filters(self):\n buffer_dict = dict(self.named_buffers())\n n = 0\n\n for k in self.phi_f.keys():\n if type(k) != str:\n self.phi_f[k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1\n\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n psi_f[sub_k] = buffer_dict['tensor' + str(n)]\n n += 1", "def getFilterClass(filterName, pkg=\"ufo2ft.filters\"):\n # TODO add support for third-party plugin discovery?\n # if filter name is 'Foo Bar', the module should be called 'fooBar'\n filterName = filterName.replace(\" \", \"\")\n moduleName = filterName[0].lower() + filterName[1:]\n module = importlib.import_module(\".\".join([pkg, moduleName]))\n # if filter name is 'Foo Bar', the class should be called 'FooBarFilter'\n className = filterName[0].upper() + filterName[1:]\n if not className.endswith(\"Filter\"):\n className += \"Filter\"\n return getattr(module, className)", "def filter(self, filter):\n self._filter = filter", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f", "def _add_filter(self, filter_list, filter_path):\n if \"name\" not in filter_list:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'name' key in filter object '{!r}'.\".format(\n filter_path, filter_list))\n if \"regex_match\" not in filter_list:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'regex_match' key in filter object '{!r}'.\".format(\n filter_path, filter_list))\n\n full_filter_name = _get_full_filter_name(filter_list[\"name\"], filter_path)\n if full_filter_name in self._filters_dict:\n raise errors.ParserError(\"Loading filter-file {} failed. \"\n \"Filter named {} already exists.\".format(\n filter_path, full_filter_name))\n\n try:\n self._filters_dict[full_filter_name] = re.compile(\n filter_list[\"regex_match\"])\n logger.debug(\"Added filter {} from filter file {}\", full_filter_name,\n filter_path)\n except re.error as err:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Unable to\"\n \" compile regular expression value '{}'. Error {!r}\".format(\n filter_path, filter_list[\"regex_match\"], err))", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __init__(self, init=None, filter_table=None, filter_name=None,\n filter_type=None, **kwargs):\n super(MiriFilter, self).__init__(init=init, **kwargs)\n\n # Data type is filter.\n self.meta.filetype = 'FILTER'\n \n # Define the filter name and type, if given\n if filter_name is not None:\n self.meta.instrument.filter = filter_name\n if filter_type is not None:\n self.meta.instrument.filter_type = filter_type\n\n if filter_table is not None:\n try:\n self.filter_table = filter_table\n except (ValueError, TypeError) as e:\n strg = \"filter_table must be a numpy record array or list of records.\"\n strg += \"\\n %s\" % str(e)\n raise TypeError(strg)\n \n # Define the wavelength units.\n# units = self.get_data_units('filter_table')\n \n # Cached arrays\n self._wavelength = None\n self._transmission = None\n self._interptransmission = None", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def filter(ctx, fil, filter_host, filter_port):\n if not fil:\n raise ValueError(\"Must specify at least one filtering operaion (of the form '<filter>=<value>'\")\n client = aceclient.FilterClient(host=filter_host, port=filter_port)\n filters = {}\n for f in fil:\n filters.update(parse_tag(f))\n client.update(**filters)", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)", "def setFilter(self, afilter):\n\n if afilter in (self.FilterU, self.FilterG, self.FilterR, self.FilterI, self.FilterZ, self.FilterY):\n self.filter = afilter\n else:\n raise ValueError(\"No '%s' filter.\" % afilter)", "def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func", "def filter(self, filters):", "def load_scans_filter(img_org, filterdata):\n\n # check which filter will be used and apply that one\n filter = filterdata['filtername']\n if filter == 'gaussian':\n sigma = filterdata['parameters'][0]\n smoothed_img = calc_gaussian(img_org, sigma=sigma)\n elif filter == 'median':\n radius = filterdata['parameters'][0]\n smoothed_img = calc_median(img_org, radius=radius)\n elif filter == 'curvatureflow':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n smoothed_img = calc_curvatureflow(img_org, iteration=iter, step=timestep)\n elif filter == 'anisodiff':\n iter = filterdata['parameters'][0]\n timestep = filterdata['parameters'][1]\n conductance = filterdata['parameters'][2]\n smoothed_img = calc_anisodiff(img_org, iteration=iter, step=timestep, conductance=conductance)\n else:\n print('The filtername does not exist.')\n\n return smoothed_img", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def __init__(self, image, filter_name, cutoff, order = 0):\r\n self.image = image\r\n if filter_name == 'ideal_l':\r\n self.filter = self.get_ideal_low_pass_filter\r\n elif filter_name == 'ideal_h':\r\n self.filter = self.get_ideal_high_pass_filter\r\n elif filter_name == 'butterworth_l':\r\n self.filter = self.get_butterworth_low_pass_filter\r\n elif filter_name == 'butterworth_h':\r\n self.filter = self.get_butterworth_high_pass_filter\r\n elif filter_name == 'gaussian_l':\r\n self.filter = self.get_gaussian_low_pass_filter\r\n elif filter_name == 'gaussian_h':\r\n self.filter = self.get_gaussian_high_pass_filter\r\n\r\n self.cutoff = cutoff\r\n self.order = order\r\n self.filter_name = filter_name", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def load_filter(filename):\n # parse config file\n if not os.path.isfile(filename):\n raise IOError('File \"%s\" does not exist' % filename)\n try:\n f = open(filename)\n except IOError:\n raise IOError('Could not open file \"%s\"' % filename)\n\n cfg_items = []\n for (i, line) in enumerate(f):\n try:\n # remove all comments and unnecessary whitespace\n normalizer = shlex.shlex(line)\n normalizer.wordchars += '.-'\n normal_line = ' '.join([t for t in normalizer])\n if normal_line:\n # split up normalized line and build dictionary\n cfg_item = {}\n for part in normal_line.split(','):\n cfg_split = shlex.split(part)\n key = cfg_split.pop(0)\n value = cfg_split\n cfg_item[key] = value\n cfg_items.append(cfg_item)\n except (IndexError, ValueError):\n raise RuntimeError( \\\n 'Could not parse line %i of file \"%s\"' % (i, filename))\n\n # look for global bit settings\n bits_global = None\n factor_bits_global = None\n norm_bits_global = None\n for cfg_item in cfg_items:\n if 'bits_global' in cfg_item:\n if bits_global is None:\n [bits_global] = cfg_item.pop('bits_global')\n bits_global = int(bits_global)\n else:\n raise RuntimeError( \\\n 'bits_global must not be specified more than once')\n if 'factor_bits_global' in cfg_item:\n if factor_bits_global is None:\n [factor_bits_global] = cfg_item.pop('factor_bits_global')\n factor_bits_global = int(factor_bits_global)\n else:\n raise RuntimeError( \\\n 'factor_bits_global must not be specified more than once')\n if 'norm_bits_global' in cfg_item:\n if norm_bits_global is None:\n [norm_bits_global] = cfg_item.pop('norm_bits_global')\n norm_bits_global = int(norm_bits_global)\n else:\n raise RuntimeError( \\\n 'norm_bits_global must not be specified more than once')\n\n # remove empty items from cfg_items, only node definitions should be left\n cfg_items = filter(None, cfg_items)\n\n # look for filter nodes\n filter_nodes = {}\n adjacency = {}\n input_node = None\n output_node = None\n for cfg_item in cfg_items:\n # mandatory settings\n try:\n [node] = cfg_item['node']\n except KeyError:\n raise RuntimeError('Node type not specified')\n try:\n [name] = cfg_item['name']\n except KeyError:\n raise RuntimeError('Name not specified')\n # optional settings\n if 'bits' in cfg_item:\n [bits] = map(int, cfg_item['bits'])\n else:\n bits = bits_global\n if 'connect' in cfg_item:\n connect = cfg_item['connect']\n else:\n connect = []\n if 'input' in cfg_item:\n if input_node is None:\n input_node = name\n else:\n raise RuntimeError('More than one input node specified')\n if 'output' in cfg_item:\n if output_node is None:\n output_node = name\n else:\n raise RuntimeError('More than one output node specified')\n\n # make filter node\n if name not in filter_nodes:\n if bits is not None:\n if node == 'Const':\n filter_nodes[name] = Const(bits)\n elif node == 'Add':\n filter_nodes[name] = Add(bits)\n elif node == 'Delay':\n filter_nodes[name] = Delay(bits)\n elif node == 'Multiply':\n if 'factor_bits' in cfg_item:\n [factor_bits] = cfg_item['factor_bits']\n factor_bits = int(factor_bits)\n else:\n factor_bits = factor_bits_global\n if 'norm_bits' in cfg_item:\n [norm_bits] = cfg_item['norm_bits']\n norm_bits = int(norm_bits)\n else:\n norm_bits = norm_bits_global\n if (factor_bits is not None and norm_bits is not None):\n filter_nodes[name] = Multiply(\n bits, factor_bits, norm_bits)\n if 'factor' in cfg_item:\n [factor] = cfg_item['factor']\n factor = float(factor)\n filter_nodes[name].set_factor(factor, norm=True)\n else:\n raise ValueError('Unknown node type: %s' % node)\n else:\n raise RuntimeError('Number of bits for node \"%s\" not specified' \\\n % name)\n adjacency[name] = connect\n else:\n raise RuntimeError('Node \"%s\" already present' % name)\n\n # make filter\n if input_node is None:\n raise RuntimeError('No input node specified')\n elif output_node is None:\n raise RuntimeError('No output node specified')\n else:\n return Filter(filter_nodes, adjacency, input_node, output_node)", "def test_filters_with_extra_extraction(self) -> None:\n\n # pylint: disable=too-many-locals\n\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n def add_named_library(in_dict: Dict[str, Any]) -> Dict[str, Any]:\n out_dict = deepdict(in_dict)\n out_dict[\"libraries\"].append({\n \"name\": \"abcdef\",\n \"milkyway techfile\": \"test/abcdef.tf\"\n })\n return out_dict\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, add_named_library)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n def filter_func(lib: hammer_tech.Library) -> bool:\n return lib.milkyway_techfile is not None\n\n def paths_func(lib: hammer_tech.Library) -> List[str]:\n assert lib.milkyway_techfile is not None\n return [lib.milkyway_techfile]\n\n def extraction_func(lib: hammer_tech.Library, paths: List[str]) -> List[str]:\n assert len(paths) == 1\n if lib.name is None:\n name = \"\"\n else:\n name = str(lib.name)\n return [json.dumps({\"path\": paths[0], \"name\": name}, cls=HammerJSONEncoder, indent=4)]\n\n def sort_func(lib: hammer_tech.Library):\n assert lib.milkyway_techfile is not None\n return lib.milkyway_techfile\n\n test_filter = LibraryFilter.new(\"metatest\", \"Test filter that extracts metadata\",\n is_file=True, filter_func=filter_func,\n paths_func=paths_func,\n extraction_func=extraction_func,\n sort_func=sort_func)\n\n database = hammer_config.HammerDatabase()\n tech.set_database(database)\n raw = tech.process_library_filter(pre_filts=[], filt=test_filter,\n must_exist=False,\n output_func=hammer_tech.HammerTechnologyUtils.to_plain_item)\n\n # Disable false positive from pylint\n outputs = list(map(lambda s: json.loads(s), raw)) # pylint: disable=unnecessary-lambda\n self.assertEqual(outputs,\n [\n {\"path\": tech.prepend_dir_path(\"test/abcdef.tf\"), \"name\": \"abcdef\"},\n {\"path\": tech.prepend_dir_path(\"test/coconut\"), \"name\": \"\"},\n {\"path\": tech.prepend_dir_path(\"test/soy\"), \"name\": \"\"}\n ])\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def get_filters(self):", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def addAutoSaveFilter(filter):", "def load_filter_dict(reload=True):\n\n # Get location of filter.json\n json_directory = pathlib.Path(__file__).parent\n json_file = pathlib.Path.joinpath(json_directory, \"filters.json\")\n\n # Reload the filters JSON file if present and requested\n if reload and json_file.is_file():\n with open(json_file, \"r\") as fp:\n return json.load(fp)\n\n # Get html from main filter page, ft=4 ensures all filters are present\n hdr = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) \"\n \"Chrome/23.0.1271.64 Safari/537.11\"\n }\n url = \"https://finviz.com/screener.ashx?ft=4\"\n req = urllib.request.Request(url, headers=hdr)\n with urllib.request.urlopen(req) as response:\n html = response.read().decode(\"utf-8\")\n\n # Parse html and locate table we are interested in.\n # Use one of the text values and get the parent table from that\n bs = BeautifulSoup(html, \"html.parser\")\n filters_table = None\n for td in bs.find_all(\"td\"):\n if td.get_text().strip() == \"Exchange\":\n filters_table = td.find_parent(\"table\")\n if filters_table is None:\n raise Exception(\"Could not locate filter parameters\")\n\n # Delete all div tags, we don't need them\n for div in filters_table.find_all(\"div\"):\n div.decompose()\n\n # Populate dict with filtering options and corresponding filter tags\n filter_dict = {}\n td_list = filters_table.find_all(\"td\")\n\n for i in range(0, len(td_list) - 2, 2):\n current_dict = {}\n if td_list[i].get_text().strip() == \"\":\n continue\n\n # Even td elements contain filter name (as shown on web page)\n filter_text = td_list[i].get_text().strip()\n\n # Odd td elements contain the filter tag and options\n selections = td_list[i + 1].find(\"select\")\n filter_name = selections.get(\"data-filter\").strip()\n\n # Store filter options for current filter\n options = selections.find_all(\"option\", {\"value\": True})\n for opt in options:\n # Encoded filter string\n value = opt.get(\"value\").strip()\n\n # String shown in pull-down menu\n text = opt.get_text()\n\n # Filter out unwanted items\n if value is None or \"Elite\" in text:\n continue\n\n # Make filter string and store in dict\n current_dict[text] = f\"{filter_name}_{value}\"\n\n # Store current filter dict\n filter_dict[filter_text] = current_dict\n\n # Save filter dict to finviz directory\n try:\n with open(json_file, \"w\") as fp:\n json.dump(filter_dict, fp)\n except Exception as e:\n print(e)\n print(\"Unable to write to file{}\".format(json_file))\n\n return filter_dict", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def add_filter(self, f):\n raise NotImplementedError", "def __init__( self, filters=None, prx=None ):\n\n if filters is None:\n if prx is None:\n\n self._filter_list = rts2_wwwapi.rts2comm().get_filters()\n\n elif type(filters) == list:\n self._filter_list = filters\n\n elif type(filters) == dict:\n raise TypeError(\"Filters are should not be a dict, it probably should be None\")\n # this assumes that the keywords of the dictionary are \n # the fitler names and the value is the filter number. \n\n\n #sort by filter number and reverse look up. \n # this doesn't work in python3\n #for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):\n #self._filter_list.append( key )\n\n elif type(filters) == str or type(filters) == unicode:\n self._filter_list = str(filters).split()\n\n else:\n raise TypeError(\"Unexpected filter type {}, type must be string, unicode, list or dict\".format(type(filters)))", "def load_transmission_data(filter_id, cache_dir=CACHE_DIR):\n facility, instrument, filter_name = re.split('/|\\.', filter_id)\n transmission_data_loc = os.path.join(cache_dir, facility, instrument,\n '{0}.vot'.format(filter_name))\n\n # When no such filter votable is present\n if not os.path.exists(transmission_data_loc):\n raise IOError(\"No data found in the cache directory ({0}) for the \"\n \"requested filter ID: {1}. Use download_transmission_data() \"\n \"to download it to the cache.\".format(cache_dir, filter_id))\n\n transmission_df = df_from_votable(transmission_data_loc)\n detector_type = detector_type_from_votable(transmission_data_loc)\n\n return transmission_df, detector_type", "def fromfile(cls, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support fromfile\")", "def __init__(self, classname=None, jobject=None, options=None):\n if jobject is None:\n jobject = Filter.new_instance(classname)\n self.enforce_type(jobject, \"weka.filters.Filter\")\n super(Filter, self).__init__(jobject=jobject, options=options)", "def addAutoSaveRestoreFilter(filter):", "def unpack(self, filter_file_type=\".dat\", verbose=False):\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass", "def init(self, *args):\n return _ida_hexrays.udc_filter_t_init(self, *args)", "def module(filter_):\n def decorator(module_fn):\n \"\"\"Decorates a module function.\"\"\"\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn\n return decorator", "def addFilenameFilter(call, args=(), kwargs={}, nodeClass='*'):", "def import_data_filter_options(data_url, column_filter, authorization_token):\n # filter_url = data_url\n headers = {'content-type': 'application/json',\n 'Authorization': authorization_token}\n response = requests.get(data_url, headers=headers, verify=False)\n get_json = json.loads(response.content)\n data = get_json\n data_filters = data[column_filter]\n return HttpResponse(data_filters, content_type=\"application/json\")", "def _add_filters(self, filter_list, filter_path):\n if not isinstance(filter_list, list):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'filter_list' entry to be a list \"\n \"but instead its a {}.\".format(filter_path, type(filter_list)))\n\n for cur_filter in filter_list:\n self._add_filter(cur_filter, filter_path)", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def __init__(self, module_plugin_filters=None):\n if module_plugin_filters is None:\n module_plugin_filters = []\n module_plugin_filters = util.return_list(module_plugin_filters)\n self.loaded_modules = set()\n self.processed_filepaths = dict()\n self.module_plugin_filters = module_plugin_filters\n self._log = logging.getLogger(__name__)\n self._error_string = 'pluginmanager unable to import {}\\n'", "def _read_filters(self, path):\n blob = utils.read_blob_file_contents(path)\n try:\n rules = json.loads(blob)\n except ValueError as e:\n msg = _(\n \"An error occurred when reading filters from file \"\n \"%(path)s: %(error)s\"\n ) % {\"path\": path, \"error\": e}\n raise exceptions.CommandError(msg)\n else:\n return rules", "def add_filter(self, f, **kwargs):\n if not isinstance(f, UnitFilter):\n msg = \"Argument of type Filter expected. Got type {0}\"\n raise TypeError(msg.format(type(f)))\n\n if f.wavelength_unit is None:\n msg = \"Filter wavelength must have units for storage.\"\n raise AttributeError(msg)\n\n append = kwargs.pop('append', True)\n\n f.write_to(\"{0:s}\".format(self.source),\n tablename='/filters/{0}'.format(f.name),\n createparents=True, append=append,\n **kwargs)", "def parse_filters(filters_str):\n fltrs = []\n for part in str(filters_str).lower().split(\",\"):\n if part==\"blur\":\n fltrs.append(filters.blur(1))\n elif part==\"distort\":\n fltrs.append(filters.distort(18))\n\n return fltrs", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filter_array(image: Image, filter_id: str) -> Image:\n \n if filter_id == \"3\":\n image = three_tone(image,\"aqua\",\"blood\",\"lemon\")\n elif filter_id == \"X\":\n image = extreme_contrast(image)\n elif filter_id == \"T\":\n image = sepia_filter(image)\n elif filter_id == \"P\":\n image = posterize(image)\n elif filter_id == \"E\":\n image = detect_edges(image,15)\n elif filter_id == \"V\":\n image = flip_vertical(image)\n elif filter_id == \"H\":\n image = flip_horizontal(image)\n \n return image", "def use_filter(filter_func, url, input):\n output = filter_func(url, input)\n\n if output is None:\n # If the filter does not return a value, it is\n # assumed that the input does not need filtering.\n # In this case, we simply return the input.\n return input\n\n return output", "def load_filter_evaluation(db_path):\n engine = create_engine('sqlite:///' + db_path)\n return pd.read_sql_table(TmFilterEval.__tablename__, engine)", "def prepend_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = [filter] + self.filters", "def __init__(self, filter,\n double_precision = False):\n filters.check_is_filter(filter)\n (f, i) = filter\n filt_len = f.shape[0]\n assert filt_len == i * 2 + 1 # check it's symmetric\n dtype = (torch.float64 if double_precision else torch.float32)\n # the shape is (out_channels, in_channels, width),\n # where out_channels and in_channels are both 1.\n self.filt = torch.tensor(f, dtype=dtype).view(1, 1, filt_len)\n self.padding = i", "def lazy_loader(self, entry):\n lookup = plugin.get('api_bluray', self).lookup\n\n try:\n with Session() as session:\n title, year = split_title_year(entry['title'])\n movie = lookup(title=title, year=year, session=session)\n entry.update_using_map(self.field_map, movie)\n except LookupError:\n log_once('Bluray lookup failed for %s' % entry['title'], logger, 'WARNING')", "def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def decorator(module_fn):\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn", "def setup_ignore_filter(options):\n if not options.ignore:\n return None\n\n # load the filter from the file, giving it access to the logger\n #\n gdict = { 'logger':logger }\n try:\n execfile(options.ignore, gdict)\n except (OSError, IOError), e:\n logger.info('Could not read ignore_filter file: %s' % str(e))\n\n return gdict.get('ignore_filter')", "def LP_filt(filterLength, x):\n b=np.ones(filterLength,)/(filterLength) #Finite Impulse Response (FIR) Moving Average (MA) filter with one second filter length\n a=1\n y = signal.filtfilt(b, a, x)\n return y", "def filter(self, filter_dict):\n pass", "def add_filters(fnames):\n with Database(writable=True) as base:\n for fname in fnames:\n with open(fname, 'r') as f_fname:\n filter_name = f_fname.readline().strip('# \\n\\t')\n filter_type = f_fname.readline().strip('# \\n\\t')\n filter_description = f_fname.readline().strip('# \\n\\t')\n filter_table = np.genfromtxt(fname)\n # The table is transposed to have table[0] containing the\n # wavelength and table[1] containing the transmission.\n filter_table = filter_table.transpose()\n # We convert the wavelength from Å to nm.\n filter_table[0] *= 0.1\n\n print(\"Importing {}... ({} points)\".format(filter_name,\n filter_table.shape[1]))\n\n new_filter = Filter(filter_name, filter_description, filter_type,\n filter_table)\n\n # We normalise the filter and compute the effective wavelength.\n # If the filter is a pseudo-filter used to compute line fluxes, it\n # should not be normalised.\n if not filter_name.startswith('PSEUDO'):\n new_filter.normalise()\n else:\n new_filter.effective_wavelength = np.mean(\n filter_table[0][filter_table[1] > 0]\n )\n\n base.add_filter(new_filter)", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def __init__(self, type: int, filter: int):\n ...", "def get_module_plugin_filters(self, filter_function=None):\n if filter_function is None:\n return self.module_plugin_filters\n else:\n return filter_function(self.module_plugin_filters)", "def append_crds_filter(self, filter):\n if filter not in self.filters:\n self.filters = self.filters + [filter]", "def _set_filter_type(filter):\n if filter == 'nat':\n return '-N'\n if filter == 'options':\n return '-O'\n if filter == 'filter':\n return '-R'", "def apply_filter(image, filter):\n filteredImage = create_image(image)\n # Apply Filter:\n # FIXME!\n \n return filteredImage", "def __init__(self, filter: Filter, matcher: Matcher, storage_manager: StorageManager):\n super().__init__()\n self.filter = filter\n self.matcher = matcher\n self.storage_manager = storage_manager", "def add_filter(self, label):\n if label not in self.FILTER:\n if \"PASS\" in self.FILTER:\n self.FILTER = [f for f in self.FILTER if f != \"PASS\"]\n self.FILTER.append(label)", "def setFilter(self, c: Cmdr, obj: Any, w: Wrapper, tag: str) -> None:\n # w's type is in (DynamicWindow,QMinibufferWrapper,LeoQtLog,LeoQtTree,\n # QTextEditWrapper,LeoQTextBrowser,LeoQuickSearchWidget,cleoQtUI)\n assert isinstance(obj, QtWidgets.QWidget), obj\n theFilter = qt_events.LeoQtEventFilter(c, w=w, tag=tag)\n obj.installEventFilter(theFilter)\n w.ev_filter = theFilter # Set the official ivar in w.", "def gofilter(self, filt):\n if self.status != \"not connected\":\n m = self.serial\n m.write(\"filter \" + str(filt) + \"\\r\\n\")\n m.read(100)\n result = \"out.monochrom: Moving to filter \" + str(filt)\n return filt\n else:\n pass", "def from_url(cls, url: str, filter: int = None, image_kwargs: dict | None = None):\n filename = get_url(url, progress_bar=True)\n return cls(filename, filter=filter, image_kwargs=image_kwargs)", "def filter(self, filterstring):\n if filterstring not in self.FILTERS:\n raise ValueError(\"{dataset} must be in {d for d in self.DATASETS}\")\n self.query[\"filter\"] = filterstring\n return self", "def parse_filter(value):\n\n if value.endswith('+pol'):\n pol = True\n value = value[:-4]\n else:\n pol = False\n\n if value in ufti_filters:\n return (ufti_filters[value], pol)\n\n else:\n logger.warning('Filter ' + value + ' is not recognised')\n return (None, pol)" ]
[ "0.7596461", "0.73965544", "0.70921934", "0.6901158", "0.6694745", "0.6442038", "0.6273887", "0.6259627", "0.62564397", "0.6248318", "0.623958", "0.62291145", "0.6115416", "0.6082468", "0.6026562", "0.59056175", "0.5899553", "0.5848221", "0.58212197", "0.5782896", "0.57552046", "0.5752952", "0.56564146", "0.56331563", "0.55728614", "0.5538719", "0.55260974", "0.5519062", "0.5502095", "0.5500817", "0.5460378", "0.54511577", "0.54511577", "0.54511577", "0.54506004", "0.5443221", "0.54165864", "0.5379771", "0.53740144", "0.53639954", "0.5357821", "0.533804", "0.532377", "0.5311007", "0.5295839", "0.52916557", "0.5287804", "0.5282199", "0.5277942", "0.5265372", "0.5249366", "0.5235815", "0.52264756", "0.5213272", "0.5200184", "0.51746666", "0.51382035", "0.51351434", "0.51333123", "0.51231253", "0.51163775", "0.51133657", "0.51120037", "0.5109653", "0.5107596", "0.5105362", "0.50954485", "0.5060299", "0.5032171", "0.5020665", "0.50042874", "0.49992806", "0.49986795", "0.49953842", "0.49668667", "0.4965068", "0.49515226", "0.49475682", "0.49277323", "0.4896555", "0.48932195", "0.4887694", "0.48744595", "0.485354", "0.48380676", "0.4836589", "0.4830282", "0.48283353", "0.48242655", "0.4822263", "0.4803276", "0.4802775", "0.48013592", "0.4792464", "0.4788132", "0.47844356", "0.47747958", "0.47722235", "0.47666115", "0.47635168" ]
0.6228943
12
Scan for independent loops and set up dictionaries.
def main(self, verbose=0): indepdict=self.scan_for_loop(self.indeploop) pegdict1 = self.scan_for_loop(self.pegloop1) pegdict2 = self.scan_for_loop(self.pegloop2) if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0: return dict() alldict = dict(indepdict) alldict.update(pegdict1) alldict.update(pegdict2) indepcomb=self.get_combo_list(indepdict, 0) pegcomb1=self.get_combo_list(pegdict1, 1) pegcomb2=self.get_combo_list(pegdict2, 1) allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2) datasets = self.prepare_looped_datasets(alldict, allcombs) createdfiles = self.create_input_files(datasets) if verbose == 1: self.print_list(indepcomb) self.print_list(pegcomb1) self.print_list(pegcomb2) self.print_list(allcombs) for datakey in datasets: self.print_list(datasets[datakey]) return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict", "def iterate():\n # States are of the form (coordinates, word so far, used spots)\n # Load the initial states into the stack\n global theStack\n for r,layer in enumerate(honeycomb):\n for e,el in enumerate(layer):\n theStack.append( ((e,r), [el],set([(e,r)])) )\n \n while (len(theStack) != 0):\n #pop the next run\n (e,r),soFar,used=theStack[-1]\n theStack=theStack[:-1]\n #run it!\n step((e,r),soFar,used)", "def find_loop_nest_with_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result = {}\n\n from loopy.kernel.data import ConcurrentTag, IlpBaseTag\n\n all_nonpar_inames = {\n iname for iname in kernel.all_inames()\n if not kernel.iname_tags_of_type(iname,\n (ConcurrentTag, IlpBaseTag))}\n\n iname_to_insns = kernel.iname_to_insns()\n\n for iname in all_nonpar_inames:\n result[iname] = {other_iname\n for insn in iname_to_insns[iname]\n for other_iname in kernel.insn_inames(insn) & all_nonpar_inames}\n\n return result", "def multiple_eval_for_loops_v2():", "def multiple_eval_for_loops_v1():", "def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __calculate_iterations(self):\n iterables = {}\n \n def get_type(type_options):\n key = self._config_dict[type_options].keys()[0]\n data = self._config_dict[type_options][key]\n \n if type(data) == dict:\n iterables[key] = [data]\n else:\n iterables[key] = data\n \n for config_type in self.__iterables:\n get_type(config_type)\n \n self.__iterables = [dict(zip(iterables, v)) for v in product(*iterables.values())] # Calculates the cartesian product of all the lists to iterate to generate permutations.\n self.__iterables_counter = 0", "def find_loop_nest_around_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result: Dict[str, Set[str]] = {}\n\n all_inames = kernel.all_inames()\n\n iname_to_insns = kernel.iname_to_insns()\n\n # examine pairs of all inames--O(n**2), I know.\n from loopy.kernel.data import IlpBaseTag\n for inner_iname in all_inames:\n result[inner_iname] = set()\n for outer_iname in all_inames:\n if inner_iname == outer_iname:\n continue\n\n if kernel.iname_tags_of_type(outer_iname, IlpBaseTag):\n # ILP tags are special because they are parallel tags\n # and therefore 'in principle' nest around everything.\n # But they're realized by the scheduler as a loop\n # at the innermost level, so we'll cut them some\n # slack here.\n continue\n\n if iname_to_insns[inner_iname] < iname_to_insns[outer_iname]:\n result[inner_iname].add(outer_iname)\n\n for dom in kernel.domains:\n for outer_iname in dom.get_var_names(isl.dim_type.param):\n if outer_iname not in all_inames:\n continue\n\n for inner_iname in dom.get_var_names(isl.dim_type.set):\n result[inner_iname].add(outer_iname)\n\n return result", "def NM08_model_loop(root, run_dict, res_dict, dual_list, perm_tup, machine,\n decimate=100, i=1, verbose=False):\n if machine == 'laptop':\n fz_file_pat = '/home/chet/gmt/data/NZ/wells/feedzones/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/home/chet/data/mrp_data/Steve_Sewell_MRP_PhD_Data/' \\\n 'Natural_State_Temperatures/NM08_profile_pyfehm_comma.txt'\n excel_file = '/home/chet/data/mrp_data/well_data/flow_rates/' \\\n 'July_2017_final/Merc_Ngatamariki.xlsx'\n elif machine == 'server':\n fz_file_pat = '/Users/home/hoppche/data/merc_data/wells/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/Users/home/hoppche/data/merc_data/temps/' \\\n 'NM08_profile_pyfehm_comma.txt'\n excel_file = '/Users/home/hoppche/data/merc_data/flows/' \\\n 'Merc_Ngatamariki.xlsx'\n # Make the directory for this object\n print('Making grid')\n # Extract just floats and exponent from perms\n work_dir = '{}/run_{}'.format(root, i)\n dat = make_NM08_grid(work_dir=work_dir, log_base=3, max_range=15)\n print('Assigning reservoir parameters')\n dat = reservoir_params(dat, temp_file=T_file, reservoir_dict=res_dict,\n show=False)\n print('Defining well nodes')\n dat = define_well_nodes(\n dat, well_file_pattern=fz_file_pat,\n well_name='NM08', type='injection', surf_loc=[1500., 1500.])\n print('Running initial condition')\n dat = run_initial_conditions(dat)\n dat = set_well_boundary(\n dat, excel_file=excel_file, sheet_name='NM08 Stimulation',\n well_name='NM08', dates=[datetime(2012, 6, 7), datetime(2012, 7, 12)],\n t_step='day', decimate=decimate, debug=0)\n dat = set_stress(dat)\n dat = set_dual(dat, zonelist=['tahorakuri'], dual_list=dual_list)\n if perm_tup:\n dat = set_permmodel(dat, zonelist=['tahorakuri'], index=perm_tup[0],\n permmodel_dict=perm_tup[1])\n model_run(dat, run_dict, verbose=verbose)\n return", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def algorithm_loop(self):", "def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info", "def initialize(self): \n \n \n fixed_counts = {}\n partial_counts = {}\n \n self.ctx.static = []\n self.ctx.partial = {}\n self.ctx.select = []\n \n self.ctx.compositions = []\n self.ctx.sites_refactored = {}\n \n # We collect the sites into their composition and calculate the theoretical number of occupied sites\n for site in self.ctx.structure:\n if self.__is_partial(site):\n self.ctx.sites_refactored.setdefault(site.species_and_occu, [])\n if site.species_and_occu not in self.ctx.compositions:\n self.ctx.compositions.append(site.species_and_occu)\n \n self.ctx.partial.setdefault(site.species_and_occu, [[]])\n self.ctx.partial.get(site.species_and_occu)[0].append(site)\n \n partial_counts.setdefault(site.species_and_occu, [[0, 0] for s in site.species_and_occu])\n \n for i, element in enumerate(site.species_and_occu):\n partial_counts[site.species_and_occu][i][0] += site.species_and_occu.get(element)\n partial_counts[site.species_and_occu][i][1] += site.species_and_occu.get(element)\n else:\n self.ctx.static.append(PeriodicSite(site.specie, site.coords, site.lattice, True, True))\n fixed_counts.setdefault(site.specie, 0)\n fixed_counts[site.specie] += 1\n \n # If all sites are static, then no need to do anything.\n if len(self.ctx.static) == len(self.ctx.structure):\n self.ctx.do_break = 0\n self.out('structures.%s' % self.inputs.structure.uuid, self.inputs.structure)\n return\n \n # We compile the number of occupied site for each partial composition while not going over the theoretical number\n for comp in partial_counts:\n self.ctx.rs.shuffle(self.ctx.partial.get(comp)[0])\n for i, sp in enumerate(comp):\n partial_counts[comp][i][0] = np.floor(partial_counts[comp][i][0])\n \n # Calculation of the departure from the composition. \n error = {\n el: self.ctx.structure.composition.get(el) - fixed_counts.get(el, 0)\n for el in self.ctx.structure.composition\n }\n\n for comp in partial_counts:\n for i, sp in enumerate(comp):\n error[sp] -= partial_counts.get(comp)[i][0]\n\n # Adding ions to sites with the highest departure from theoretical number as long as the error\n # is greater than 0.5.\n for element in error:\n while error[element] > 0.5:\n if error[element] > 0:\n max_error = (None, 0)\n for i, comp in enumerate(partial_counts):\n if element in comp:\n for j, sp in enumerate(comp):\n if sp == element:\n err = (partial_counts.get(comp)[j][1] - partial_counts.get(comp)[j][0]) ** 2\n if err > max_error[1]:\n max_error = ((comp, j), err)\n partial_counts.get(max_error[0][0])[max_error[0][1]][0] += 1\n error[element] -= 1\n \n self.ctx.configurations = tuple()\n self.ctx.configuration_hashes = tuple()\n self.ctx.configuration_steps = tuple()\n self.ctx.configuration_energies = tuple()\n \n for comp in partial_counts:\n # For each site, calculate log10 of the multinomial factor,\n # it will be used to scale the probability of each site to \n # be used for a swap.\n n = 0\n for i in range(len(self.ctx.partial.get(comp)[-1])):\n n += np.log10(i + 1)\n \n for i, sp in enumerate(comp):\n for j in range(int(partial_counts.get(comp)[i][0])):\n n -= np.log10(j + 1)\n \n for _ in range(int(partial_counts.get(comp)[i][0])):\n site = self.ctx.partial.get(comp)[-1].pop(0)\n self.ctx.partial.get(comp).insert(0, PeriodicSite(Specie(sp, self.ctx.charges.get(sp.value, 0)), \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(sp)\n leftovers = self.ctx.partial.get(comp).pop()\n \n for j in range(len(leftovers)):\n n -= np.log10(j + 1)\n \n for site in leftovers:\n self.ctx.partial.get(comp).insert(0, PeriodicSite(self.ctx.vacancy, \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(self.ctx.vacancy.element)\n\n for _ in range(np.ceil(n).astype(int)):\n self.ctx.select.append(comp)\n \n for sites_refactored in self.ctx.sites_refactored.values():\n self.ctx.rs.shuffle(sites_refactored)\n \n self.ctx.idxes = [idx for idx in range(len(self.ctx.select))]\n self.ctx.sites = self.ctx.partial\n del self.ctx.partial\n \n self.ctx.partial_refactored = []\n # (site #, element) -> particle #\n self.ctx.indices = {}\n i = 0\n \n for site in self.ctx.structure:\n if self.__is_partial(site):\n for element in site.species_and_occu.keys():\n self.ctx.indices[(i, element)] = len(self.ctx.partial_refactored)\n self.ctx.partial_refactored.append(PeriodicSite(Specie(element, self.ctx.charges.get(element.value)), site.coords, site.lattice, True, True))\n i += 1\n \n self.ctx.all_indices = set(range(len(self.ctx.partial_refactored)))\n structure = Structure.from_sites(self.ctx.partial_refactored)\n self.ctx.ewald = EwaldSummation(structure)\n\n self.ctx.energy = self.__ewald(self.ctx.sites_refactored) * np.ones(1)\n self.ctx.tested = np.empty(0, dtype=np.float)\n self.ctx.accepted = np.empty(0, dtype=np.float)\n\n if self.inputs.verbose:\n self.report('Starting structure: E = %f' % self.ctx.energy[-1])", "def interpret_specs(self,details,return_stubs=False):\n\n\t\t#---this loop interpreter allows for a loop key at any point over specs in list or dict\n\t\t#---trim a copy of the specs so all loop keys are terminal\n\t\tdetails_trim = deepcopy(details)\n\t\t#---get all paths to a loop\n\t\tnonterm_paths = list([tuple(j) for j in set([tuple(i[:i.index('loop')+1]) \n\t\t\tfor i,j in catalog(details_trim) if 'loop' in i[:-1]])])\n\t\t#---some loops end in a list instead of a sub-dictionary\n\t\tnonterm_paths_list = list([tuple(j) for j in set([tuple(i[:i.index('loop')+1]) \n\t\t\tfor i,j in catalog(details_trim) if i[-1]=='loop'])])\n\t\t#---for each non-terminal path we save everything below and replace it with a key\n\t\tnonterms = []\n\t\tfor path in nonterm_paths:\n\t\t\tbase = deepcopy(delve(details_trim,*path[:-1]))\n\t\t\tnonterms.append(base['loop'])\n\t\t\tpivot = delve(details_trim,*path[:-1])\n\t\t\tpivot['loop'] = base['loop'].keys()\n\t\t#---hypothesize over the reduced specifications dictionary\n\t\tsweeps = [{'route':i[:-1],'values':j} for i,j in catalog(details_trim) if 'loop' in i]\n\t\t#---! note that you cannot have loops within loops (yet?) but this would be the right place for it\n\t\tif sweeps == []: new_calcs = [deepcopy(details)]\n\t\telse: new_calcs = hypothesis(sweeps,default=details_trim)\n\t\tnew_calcs_stubs = deepcopy(new_calcs)\n\t\t#---replace non-terminal loop paths with their downstream dictionaries\n\t\tfor ii,i in enumerate(nonterms):\n\t\t\tfor nc in new_calcs:\n\t\t\t\tdownkey = delve(nc,*nonterm_paths[ii][:-1])\n\t\t\t\tupkey = nonterm_paths[ii][-2]\n\t\t\t\tpoint = delve(nc,*nonterm_paths[ii][:-2])\n\t\t\t\tpoint[upkey] = nonterms[ii][downkey]\n\t\t#---loops over lists (instead of dictionaries) carry along the entire loop which most be removed\n\t\tfor ii,i in enumerate(nonterm_paths_list):\n\t\t\tfor nc in new_calcs: \n\t\t\t\t#---! this section is supposed to excise the redundant \"loop\" list if it still exists\n\t\t\t\t#---! however the PPI project had calculation metadata that didn't require it so we just try\n\t\t\t\ttry:\n\t\t\t\t\tpivot = delve(nc,*i[:-2]) if len(i)>2 else nc\n\t\t\t\t\tval = delve(nc,*i[:-1])[i[-2]]\n\t\t\t\t\tpivot[i[-2]] = val\n\t\t\t\texcept: pass\n\t\treturn new_calcs if not return_stubs else (new_calcs,new_calcs_stubs)", "def iterate_results(results, extract_fn):\n outputs = {}\n for environment, environment_results in results.items():\n if environment not in outputs:\n outputs[environment] = {}\n for experimental_setting, setting_results in environment_results.items():\n outputs[environment][experimental_setting] = []\n for config, seeds_results in setting_results.items():\n for seed, actual_results in seeds_results.items():\n output = extract_fn(actual_results)\n outputs[environment][experimental_setting].append(output)\n outputs[environment][experimental_setting] = np.array(outputs[environment][experimental_setting])\n return outputs", "def __staticLoopBoundScanning(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # generate the lower and upper values of each inter-tile loop\n val_table = {}\n for iname in outer_loop_inames:\n _, _, _, st_exp, _ = loop_info_table[iname]\n lval = ast.IdentExp(self.__getTileIterName(iname, tile_level))\n t = ast.BinOpExp(\n ast.IdentExp(self.__getTileSizeName(iname, tile_level)),\n ast.ParenthExp(st_exp.replicate()),\n ast.BinOpExp.SUB,\n )\n uval = ast.BinOpExp(lval.replicate(), ast.ParenthExp(t), ast.BinOpExp.ADD)\n val_table[iname] = (lval, uval)\n\n # iterate over each statement to determine loop bounds that are affine functions\n # of outer loop iterators\n lb_exps_table = {}\n ub_exps_table = {}\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # determine the value of the new lower loop bound\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_prolog:\n t = self.__findMinMaxVal(\n \"max\", lb_exp.replicate(), lb_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # determine the value of the new upper loop bound\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_epilog:\n t = self.__findMinMaxVal(\n \"min\", ub_exp.replicate(), ub_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def initialize(self):\n self.assmts = {}\n\n offset = 0\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.bit = 1 << offset\n assmts.mask = assmts.bit\n self.assmts[entry] = assmts\n offset += 1\n\n for block in self.blocks:\n block.stats = block.phis.values() + block.stats\n for stat in block.stats:\n if isinstance(stat, (PhiNode, NameAssignment)):\n stat.bit = 1 << offset\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= stat.bit\n offset += 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bound:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def _init_dictionaries(self):\n\t\t# Dictionary contatining all actionPotential\n\t\tself.actionPotentials = {}\n\t\t# Dictionary containing all cells id.\n\t\t# Cells id are used by neuron to communicate synapses between different cells in different hosts. Ids (gids) can be any integer, they just need to be unique.\n\t\tself.cellsId = {}\n\t\t# Dictionary containing all cells\n\t\tself.cells = {}\n\n\t\tself._nMuscles = len(self._infoMuscles)\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\t# Create sub-dictionaries for all DoF\n\t\t\tself.actionPotentials[muscle]={}\n\t\t\tself.cellsId[muscle]={}\n\t\t\tself.cells[muscle]={}\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\t# add lists containing cell ids/cells/ap\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tself.cellsId[muscle][cellName]=[]\n\t\t\t\tself.cells[muscle][cellName]=[]\n\t\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\n\t\t# Add special cells (specifc for some muscles or not muscle related)\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tif not groupOrMuscle in self.cellsId.keys():\n\t\t\t\tself.actionPotentials[groupOrMuscle]={}\n\t\t\t\tself.cellsId[groupOrMuscle]={}\n\t\t\t\tself.cells[groupOrMuscle]={}\n\n\t\t\tself.cellsId[groupOrMuscle][cellName]=[]\n\t\t\tself.cells[groupOrMuscle][cellName]=[]\n\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]", "def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass", "def init_globals():\n global cycles, used_edges, split\n cycles = []\n used_edges = []\n split = []", "def loops(graph = None):\n\tunknown_structs = []\n\tcompound_structs = []\n\tloops_dict = create_components_dict()\n\tfor subgraph in nx.connected_component_subgraphs(graph):\n\t\tif subgraph.number_of_nodes() < 3:\n\t\t\tunknown_structs.append(subgraph)\n\t\telse:\n\t\t\tif connectivity_threshold(graph = subgraph) > 2 or loop_type(graph= subgraph) == 'NA':\n\t\t\t\tcompound_structs.append(subgraph)\n\t\t\telse:\n\t\t\t\tloops_dict[loop_type(graph= subgraph)].append(subgraph)\n\treturn loops_dict", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def initialize(self):\n self.assmts = {}\n\n bit = 1\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.mask = assmts.bit = bit\n self.assmts[entry] = assmts\n bit <<= 1\n\n for block in self.blocks:\n for stat in block.stats:\n if isinstance(stat, NameAssignment):\n stat.bit = bit\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= bit\n bit <<= 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bounded:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.values():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def make_loop(loop_orders, dtypes, loop_tasks, sub, openmp=None):\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n dtype = dtypes[j]\r\n update += \"%(dtype)s &%(var)s_i = * ( %(var)s_iter + %(iterv)s * %(var)s_jump%(index)s_%(i)s );\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n if openmp:\r\n openmp_elemwise_minsize = theano.config.openmp_elemwise_minsize\r\n forloop = \"\"\"#pragma omp parallel for if( %(suitable_n)s >=%(openmp_elemwise_minsize)s)\\n\"\"\" % locals()\r\n else:\r\n forloop = \"\"\r\n forloop += \"\"\"for (int %(iterv)s = 0; %(iterv)s<%(suitable_n)s; %(iterv)s++)\"\"\" % locals()\r\n return\"\"\"\r\n %(preloop)s\r\n %(forloop)s {\r\n %(update)s\r\n %(code)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n s = \"\"\r\n\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s", "def tunes_non_cyclic():\n A = Tune(\"A\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"B\", \"D\"])\n B = Tune(\"B\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"C\"])\n C = Tune(\"C\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\")\n D = Tune(\"D\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\")\n E = Tune(\"E\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"A\"])\n tunes = {\n \"A\": A,\n \"B\": B,\n \"C\": C,\n \"D\": D,\n \"E\": E\n }\n return tunes", "def make_sol_dict():\n file_names = [\"FORMAT3_Copy of KommuneMTPLforTriangle.xls\",\n \"C Triangulations analysis R2017 GC20161109.xls\",\n \"EVOLUTION 2017 _ M+F - Triangles cat nat brut net.xls\",\n \"Bsp8 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"Analysis MTPL MOD.xls\",\n \"Bsp6 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"FORMAT6_sinistres.xls\",\n \"FORMAT1_LOSSES-MTPL-OVER-500-GROUP-2005_modified.xls\"]\n solutions_dict = dict()\n raw_dict = dict()\n for file_name in file_names:\n sr_list, file_name = ExcelLoader.load_excel(pdir.RESOURCES_DIR + \"/raw_test_files/\" + file_name)\n dh = DataHolder()\n for sr in sr_list:\n dh.add_sheet(sr.sheet_name, pd.DataFrame(columns=sr.headers, data=sr.row_vals),\n pd.DataFrame(columns=sr.headers, data=sr.xls_types), orig_sheet_name=sr.sheet_name)\n\n dh = SheetPreProcessor.separate_components(dh)\n raw_dict[file_name] = dh.encode()\n dh = HorizontalMerger.horizontal_merge(dh)\n #temp_path = pdir.RESOURCES_DIR + \"/temp/\"\n #dh.write_excel(temp_path + file_name)\n solutions_dict[file_name] = dh\n solutions_dict = MergePararametersOptimizer.make_ind_col_dict(solutions_dict)\n with open(pdir.RESOURCES_DIR + \"/test/merge_solutions.obj\", \"wb\") as temp_file:\n pickle.dump(solutions_dict, temp_file)\n with open(pdir.RESOURCES_DIR + \"/test/raw_test.obj\", \"wb\") as temp_file:\n pickle.dump(raw_dict, temp_file)", "def loop_nonThreaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n b_analyzeStatusHist: bool = False\n b_inputStatusHist: bool = False\n b_outputStatusHist: bool = False\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n for path, data in iterator:\n dret_inputSet = {}\n dret_analyze = {}\n dret_outputSet = {}\n # Read (is sometimes skipped) / Analyze / Write (also sometimes skipped)\n if fn_inputReadCallback:\n dret_inputSet = inputSet_read(path, data)\n try:\n b_inputStatusHist = b_inputStatusHist or dret_inputSet['status']\n except:\n pass\n if fn_analysisCallback:\n try:\n dret_analyze = analysis_do(path, d_tree[path], index)\n except:\n dret_analyze['status'] = False\n self.dp.qprint(\"Analysis failed\", comms = 'error')\n try:\n b_analyzeStatusHist = b_analyzeStatusHist or dret_analyze['status']\n except:\n pass\n if fn_outputWriteCallback:\n if 'status' in dret_analyze.keys():\n if dret_analyze['status']:\n dret_outputSet = outputSet_write(path, d_tree[path])\n try:\n b_outputStatusHist = b_outputStatusHist or dret_outputSet['status']\n except:\n pass\n index += 1\n dret_inputSet['status'] = b_inputStatusHist\n dret_analyze['status'] = b_analyzeStatusHist\n dret_outputSet['status'] = b_outputStatusHist\n tree_removeDeadBranches()", "def _extract_loops(self, pdb, loop_type, mapping, normalize):\n try:\n mlab = matlab.Matlab(self.config['locations']['fr3d_root'])\n [loops, count, err_msg] = mlab.extractLoops(pdb, loop_type, nout=3)\n except Exception as err:\n self.logger.exception(err)\n raise err\n\n if err_msg != '':\n raise core.MatlabFailed(err_msg)\n\n if loops == 0:\n self.logger.warning('No %s in %s', loop_type, pdb)\n loop_id = self._get_fake_loop_id(pdb, loop_type)\n return [mod.LoopInfo(loop_id=loop_id,\n type = 'NA',\n pdb_id=pdb,\n sequential_id='000',\n length=0,\n seq='',\n r_seq='',\n nwc_seq='',\n r_nwc_seq='',\n unit_ids='',\n loop_name='')]\n\n self.logger.info('Found %i %s loops', count, loop_type)\n\n data = []\n for index in xrange(count):\n loop = loops[index].AllLoops_table\n full_id = normalize(loop.full_id)\n loop_id = self._get_loop_id(full_id, pdb, loop_type, mapping)\n loops[index].Filename = loop_id\n\n data.append(mod.LoopInfo(\n loop_id=loop_id,\n type=loop_type,\n pdb_id=pdb,\n sequential_id=loop_id.split(\"_\")[-1],\n length=int(loops[index].NumNT[0][0]),\n seq=loop.seq,\n r_seq=loop.r_seq,\n nwc_seq=loop.nwc,\n r_nwc_seq=loop.r_nwc,\n unit_ids=','.join(full_id),\n loop_name=loop.loop_name))\n\n if self.save_loops:\n self.__save__(loops, self.config['locations']['loops_mat_files'])\n\n return data", "def preLoopFunctions(self):\n\t\treturn", "def _iterate(self):\n\n for atom in self.mol.GetAtoms(): #type: Chem.Atom\n atm_idx = atom.GetIdx()\n shells: List[Set[int]] = [] # List of Shells (set of atomidx (int))\n current_iter_atomts: Set[int] = {atm_idx} # Set of atoms for current iteration, initialized with central-atom\n prev_atms = {atm_idx} # Atoms already in inner shells, used to avoid occurrence in multiple shells\n for i in range(self.shellCount): # type: Chem.Atom\n next_iter_atoms: Set[int] = set()\n # Add neighbours of atoms in previous shell (potential candidates for next shell)\n for j_atm in current_iter_atomts: # type: int\n j_atm_obj: Chem.Atom = self.mol.GetAtomWithIdx(j_atm)\n next_iter_atoms.update([k_atm.GetIdx() for k_atm in j_atm_obj.GetNeighbors()])\n # Add atoms as shell if not in one of the previous shells\n shells.append(next_iter_atoms - prev_atms)\n # Update for next loop\n current_iter_atomts = next_iter_atoms - prev_atms\n prev_atms.update(next_iter_atoms)\n assert len(shells) == self.shellCount\n self._processShells(atm_idx, shells)", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):\r\n\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n update += \"%(var)s_iter += %(var)s_jump%(index)s_%(i)s;\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n return \"\"\"\r\n %(preloop)s\r\n for (int %(iterv)s = %(suitable_n)s; %(iterv)s; %(iterv)s--) {\r\n %(code)s\r\n %(update)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n if len(loop_tasks) == 1:\r\n s = preloops.get(0, \"\")\r\n else:\r\n s = \"\"\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def createDictionary(dataset):\r\n for columnNumber in range(2, dataset.shape[1]):\r\n print(\"manipulating \", dataset.at[0, columnNumber])\r\n manipulateData(columnNumber, dataset)\r\n return Dictionary", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def createDictBase(self):\n #allFiles = glob.glob(self.path + \"/*\"+ self.filetype)\n #data = pd.read_excel(allFiles[0])\n#================================================================================================================== \n# self.list_files = self.Files_to_import()\n# data=pd.read_excel(self.path +'/'+self.list_files[0]) # importing the first excel sheet from the first/zero time point\n self.list_files = self.Files_to_import()\n try:\n tim = pd.read_excel(self.path +'/timePoints' + self.filetype) # importin the time points from a shhet called time_points\n time = np.array(tim['time']) # assigning variable time conataing an array with the timepoints\n self.nr_files = len(time)\n except:\n time = np.array(list(range(self.nr_files))) \n \n data=pd.read_excel(self.path +'/'+self.list_files[0])\n \n data=np.array(data) # converts it to array, so we can manipualte the data easier\n #python wants for some reason first to create the dictionary with at least on value before we can run it in a loop. THat is why we have litle redundancy, since the next part is allmost the same.\n for i in range(len(data)): # the numbers of rows. Goes through the rows\n for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns\n cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4\n dat=[] # a list that will contain the first value of the cell. It will be cleaned every time the loop runs the newxt value\n dat.append(data[i][ii]) # we put the value of the well to the list\n self.dbase[cell_id]=dat # the list is put to the table. For example dabse['cell_id']= some OD value \n \n # then we go through the rest of the excell time points and collect them\n for i in range(1,len(time)): \n if self.list_files[i] != 0:\n \n #data = pd.read_excel(allFiles[i])\n data=pd.read_excel(self.path +'/'+ self.list_files[i]) \n data=np.array(data)\n for i in range(len(data)): # the numbers of rows. Goes through the rows\n for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns\n cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4\n \n tempVar=self.dbase[cell_id] # here we use a method of exchanging variables to be able to uppdate the cloumn corresponding to the cell_id\n tempVar.append(data[i][ii]) # add the new data to the copy\n self.dbase[cell_id] = tempVar # uppdate the original dictionary\n else:\n pass\n self.dbase['time'] = time # at theend we add a column that takes care of the time_points \n return self.dbase", "def iter_fun(self):\n\n run_id = self._run_id\n etopo_dir = driver_home\n topodir = driver_home\n\n # load input info\n if self._input_info == None:\n scn_fname = os.path.join(self._run_home,'scenario_pts.txt') \n scn = np.loadtxt(scn_fname)\n scn_list = scn.tolist()\n else:\n scn_list = self._input_info\n \n # total number of runs\n M = len(scn_list)\n N = 8*M + 2 # 8*M runs plus two empty bathymetry runs\n\n if run_id == N:\n raise StopIteration()\n\n else:\n \n #=========================\n # set coarse and fine grids\n #\n t_shelf = 0. # time approaching continental slope\n t_harbor = 0. # time approaching harbor\n\n if ((run_id >= 0) and (run_id < 4*M)) or (run_id == 8*M):\n #------------------\n # setrun for coarse\n #\n grid = 'coarse'\n \n self._rundata.amrdata.amr_levels_max = 4\n # coarse grid run = 10\"\n # dx = 30', 5', 1', 10\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6]\n\n\n # add topography (coarse)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 4, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 3, 4, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n\n # add regions\n regions = self._rundata.regiondata.regions \n # between shelf and CC \n regions = []\n regions.append(\\\n [2, 3, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [3, 4, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [4, 4, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_coarse.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_coarse.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_coarse.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n \n elif ((run_id >= 4*M) and (run_id < 8*M)) or (run_id == 8*M+1):\n #----------------\n # setrun for fine\n #\n grid = 'fine'\n \n self._rundata.amrdata.amr_levels_max = 6\n\n ## fine grid run = 2/3\"\n ## dx = 30', 5', 1', 10\", 2\", 2/3\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6, 5, 3]\n\n regions = self._rundata.regiondata.regions \n regions = []\n # between shelf and CC\n regions.append(\\\n [2, 4, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [4, 5, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [6, 6, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # add topography (fine)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 6, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 4, 6, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n topofiles.append([3, 6, 6, 0., 1.e10, \\\n os.path.join(topodir,'cc-1_3sec-c_pierless.asc')])\n \n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_fine.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_fine.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_fine.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n\n\n #\n # set desired magnitude\n #\n if ((run_id >= 0) and (run_id < M)) \\\n or ((run_id >= 4*M) and (run_id < 5*M)):\n self.KL_Mw_desired = 8.6\n elif ((run_id >= M) and (run_id < 2*M)) \\\n or ((run_id >= 5*M) and (run_id < 6*M)):\n self.KL_Mw_desired = 8.8\n elif ((run_id >= 2*M) and (run_id < 3*M)) \\\n or ((run_id >= 6*M) and (run_id < 7*M)):\n self.KL_Mw_desired = 9.0\n elif ((run_id >= 3*M) and (run_id < 4*M)) \\\n or ((run_id >= 7*M) and (run_id < 8*M)):\n self.KL_Mw_desired = 9.2\n \n #\n # set slip distribution\n #\n run_id_mod = run_id - 100*(run_id/100)\n m = scn_list[run_id_mod]\n self.set_KL_slip(m)\n \n if run_id < 8*M:\n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_' + str(self.KL_Mw_desired)\n self._rundir = os.path.join(dir_grid_Mw, 'run_' + str(run_id_mod))\n else:\n # empty runs to obtain bathymetry\n \n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_B0'\n self._rundir = dir_grid_Mw\n self.KL_Mw_desired = 0.0\n self.set_KL_slip([0.]*len(m)) # set output\n self._rundata.clawdata.output_times = [1.0, 3.0]\n \n self._run_id += 1\n \n return self", "def Init(self, *args):\n return _BRepAlgo.BRepAlgo_Loop_Init(self, *args)", "def buildRunDictMain(self, ori_images):\n self.run_dict[\"Of\"] = {\n \"Run\": not self.of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running optical flow\",\n }\n self.run_dict[\"Back_Of\"] = {\n \"Run\": not self.back_of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running back optical flow\",\n }\n self.run_dict[\"Depth\"] = {\n \"Run\": not self.depth_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running depth estimation\",\n }\n self.run_dict[\"Speed\"] = {\n \"Run\": True,\n \"Progress\": ori_images,\n \"Text\": \"Running speed estimation\",\n }\n self.run_dict[\"Optimization\"] = {\n \"Run\": self.ui.c_optimize.isChecked(),\n \"Progress\": ori_images * 9,\n \"Text\": \"Running parameter optimization\",\n }\n\n self.run_dict[\"Of_Vid\"] = {\n \"Run\": self.ui.c_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating optical flow video\",\n }\n self.run_dict[\"Back_Of_Vid\"] = {\n \"Run\": self.ui.c_back_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating backward optical flow video\",\n }\n self.run_dict[\"Depth_Vid\"] = {\n \"Run\": self.ui.c_depth.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating depth estimation video\",\n }\n\n self.run_dict[\"Speed_Plot\"] = {\n \"Run\": self.ui.c_speed_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed values\",\n }\n self.run_dict[\"Crash_Plot\"] = {\n \"Run\": self.ui.c_crash_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for time to crash\",\n }\n self.run_dict[\"Error_Plot\"] = {\n \"Run\": self.ui.c_error_plot.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed error\",\n }\n\n self.run_dict[\"Speed_Plot_Video\"] = {\n \"Run\": self.ui.c_speed_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating speed plot video\",\n }\n self.run_dict[\"Error_Plot_Video\"] = {\n \"Run\": self.ui.c_error_plot_video.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating error plot video\",\n }\n self.run_dict[\"Crash_Plot_Video\"] = {\n \"Run\": self.ui.c_crash_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating time to crash plot video\",\n }\n\n self.run_dict[\"Super_Pixel_Video\"] = {\n \"Run\": self.ui.combo_superpixel.currentIndex() != 0\n and self.ui.c_super_pixel_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating super pixel video\",\n }\n self.run_dict[\"Super_Pixel_Label\"] = {\n \"Run\": self.create_super_pixel_label,\n \"Progress\": ori_images,\n \"Text\": \"Creating {0} superpixel labels\".format(self.super_pixel_method),\n }\n\n self.run_dict[\"Object_Detection\"] = {\n \"Run\": (\n self.ui.c_object_detection.isChecked()\n or self.ui.c_crash_plot.isChecked()\n )\n and not self.object_detection_dir_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running Object Detection\",\n }\n\n self.addAllProgressBar()\n self.buildParamsDict()\n self.saveUser()\n self.startCalcThread()", "def do_ns_loop():\n global print_prefix\n global cur_config_ind\n\n if rank == 0:\n nD = 3\n if movement_args['2D']:\n nD = 2\n if energy_io.tell() == 0:\n if movement_args['do_velocities']:\n nExtraDOF = 0\n else:\n nExtraDOF = n_atoms*nD\n energy_io.write(\"%d %d %d %s %d\\n\" % (ns_args['n_walkers'], ns_args['n_cull'], nExtraDOF, movement_args['MC_cell_flat_V_prior'], n_atoms) )\n\n ## print print_prefix, \": random state \", np.random.get_state()\n ## if rank == 0:\n ## print print_prefix, \": common random state \", common_random_state\n\n if ns_args['debug'] >= 10 and size <= 1:\n for at in walkers:\n at.info['n_walks'] = 0\n\n for at in walkers:\n at.info['KEmax']=KEmax \n if movement_args['MC_cell_P'] > 0:\n print rank, \": initial enthalpy \", at.info['ns_energy'], \" PE \", eval_energy_PE(at), \" KE \", eval_energy_KE(at), \" PV \", eval_energy_PV(at), \" mu \", eval_energy_mu(at), \" vol \", at.get_volume()\n else:\n print rank, \": initial enthalpy \", at.info['ns_energy'], \" PE \", eval_energy_PE(at), \" KE \", eval_energy_KE(at), \" mu \", eval_energy_mu(at), \" vol \",at.get_volume()\n\n # stats for purpose of adjusting step size\n walk_stats_adjust={}\n # stats for purpose of monitoring acceptance rates\n walk_stats_monitor={}\n zero_stats(walk_stats_adjust, movement_args)\n zero_stats(walk_stats_monitor, movement_args)\n\n initial_time = time.time()\n prev_time = initial_time\n step_size_setting_duration = 0.0\n total_step_size_setting_duration = 0.0\n\n Emax_of_step = None\n Emax_save = []\n i_ns_step_save = []\n traj_walker_list = []\n E_dump_list = []\n E_dump_list_times = []\n\n verbose=False\n\n # to avoid errors of unassigned values, if in case of a restart the final number of iter is the same as the satring, stop.\n if start_first_iter == ns_args['n_iter']:\n print \"WARNING: Increase the n_iter_times_fraction_killed variable in the input if you want NS cycles to be performed.\"\n exit_error(\"starting iteration and the total number of required iterations are the same,hence no NS cycles will be performed\\n\",11)\n\n last_log_X_n = 0.0\n i_range_mod_n_cull = np.array(range(ns_args['n_cull']))\n i_range_plus_1_mod_n_cull = np.mod(np.array(range(ns_args['n_cull']))+1, ns_args['n_cull'])\n log_X_n_term = np.log(ns_args['n_walkers']-i_range_mod_n_cull) - np.log(ns_args['n_walkers']+1-i_range_mod_n_cull)\n log_X_n_term_cumsum = np.cumsum(log_X_n_term)\n log_X_n_term_cumsum_modified = log_X_n_term_cumsum - np.log(ns_args['n_walkers']+1-i_range_plus_1_mod_n_cull)\n log_X_n_term_sum = log_X_n_term_cumsum[-1]\n if ns_args['converge_down_to_T'] > 0:\n converge_down_to_beta = 1.0/(ns_args['kB']*ns_args['converge_down_to_T'])\n log_Z_term_max = None\n\n prev_snapshot_iter = None\n pprev_snapshot_iter = None\n last_snapshot_time = time.time()\n\n # for estimating current temperature from d log Omega / d E\n if ns_args['T_estimate_finite_diff_lag'] > 0:\n log_alpha = np.log(float(ns_args['n_walkers']+1-ns_args['n_cull'])/float(ns_args['n_walkers']+1))\n Emax_history=collections.deque(maxlen=ns_args['T_estimate_finite_diff_lag'])\n\n if ns_analyzers is not None:\n for (ns_analyzer, ns_analyzer_interval) in ns_analyzers:\n ns_analyzer.analyze(walkers, -1, \"NS_loop start\")\n\n # START MAIN LOOP\n i_ns_step = start_first_iter\n while ns_args['n_iter'] < 0 or i_ns_step < ns_args['n_iter']:\n print_prefix=\"%d NS %d\" % (rank, i_ns_step)\n\n if ns_args['debug'] >= 4 and ns_args['track_configs']:\n for at in walkers:\n print print_prefix, \"INFO: 10 config_ind \", at.info['config_ind'], \" from \", at.info['from_config_ind'], \" at \", at.info['config_ind_time']\n\n if movement_args['adjust_step_interval'] < 0:\n zero_stats(walk_stats_adjust, movement_args)\n if movement_args['monitor_step_interval'] < 0:\n zero_stats(walk_stats_monitor, movement_args)\n\n if ns_args['debug'] >= 20:\n print print_prefix, \"%30s\" % \": LOOP_TE START 00 \",i_ns_step, [ \"%.10f\" % eval_energy(at) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_PE START 01 \",i_ns_step, [ \"%.10f\" % eval_energy(at, do_KE=False) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_X START 02 \",i_ns_step, [ \"%.10f\" % at.positions[0,0] for at in walkers ]\n\n # get list of highest energy configs\n (Emax, Vmax, cull_rank, cull_ind) = max_energy(walkers, n_cull)\n Emax_next = Emax[-1]\n if rank == 0 and Emax_of_step is not None and Emax[0] > Emax_of_step:\n print print_prefix, \": WARNING: energy above Emax \", Emax_of_step, \" bad energies: \", Emax[np.where(Emax > Emax_of_step)], cull_rank[np.where(Emax > Emax_of_step)], cull_ind[np.where(Emax > Emax_of_step)]\n # comm.barrier()\n # exit_error(\"Energy above Emax\\n\", 5)\n\n if rank == 0 and (i_ns_step > start_first_iter and Emax_next >= Emax_of_step):\n print \"WARNING: Emax not decreasing \",Emax_of_step, Emax_next\n Emax_of_step=Emax_next\n\n if ns_args['min_Emax'] is not None and Emax_of_step < ns_args['min_Emax']:\n if rank == 0:\n # if the termination was set by a minimum energy, and it is reached, stop.\n print \"Leaving loop because Emax=\",Emax_of_step,\" < min_Emax =\",ns_args['min_Emax']\n i_ns_step += 1 # add one so outside loop when one is subtracted to get real last iteration it's still correct\n break\n\n if rank == 0:\n cur_time=time.time()\n output_this_iter = (cur_time > prev_time+60 or i_ns_step == 0 or i_ns_step == ns_args['n_iter'] or (ns_args['n_iter'] > 0 and i_ns_step % max(int(ns_args['n_iter']/1000),1) == 0))\n else:\n output_this_iter = False\n\n if ns_args['converge_down_to_T'] > 0:\n # see ns_analyse.py calc_log_a() for math\n log_a = log_X_n_term_sum*i_ns_step + log_X_n_term_cumsum_modified\n #DEBUG if rank == 0:\n #DEBUG for ii in range(len(log_a)):\n #DEBUG print i_ns_step, \"log_a, beta, Es, beta*Es \", log_a[ii], beta, Emax[ii], beta*Emax[ii]\n log_Z_term_max = max(log_Z_term_max, np.amax(log_a-converge_down_to_beta*Emax))\n log_Z_term_last = log_a[-1]-converge_down_to_beta*Emax[-1]\n if output_this_iter:\n print \"log_Z_term max \", log_Z_term_max, \"last \", log_Z_term_last, \"diff \", log_Z_term_max-log_Z_term_last\n if log_Z_term_last < log_Z_term_max - 10.0:\n if rank == 0:\n print print_prefix, \"Leaving loop because Z(%f) is converged\" % ns_args['converge_down_to_T']\n i_ns_step += 1 # add one so outside loop when one is subtracted to get real last iteration it's still correct\n break\n\n if ns_args['T_estimate_finite_diff_lag'] > 0:\n Emax_history.append(Emax_of_step)\n if output_this_iter:\n if ns_args['T_estimate_finite_diff_lag'] > 0 and len(Emax_history) > 1:\n beta_estimate = (len(Emax_history)-1)*log_alpha/(Emax_history[-1]-Emax_history[0])\n T_estimate = 1.0/(ns_args['kB']*beta_estimate)\n else:\n T_estimate = -1\n print i_ns_step, \"Emax_of_step \", Emax_of_step, \"T_estimate \", T_estimate, \" loop time \", cur_time-prev_time-step_size_setting_duration,\" time spent setting step sizes: \",step_size_setting_duration\n prev_time = cur_time\n step_size_setting_duration = 0.0\n\n entries_for_this_rank = np.where(cull_rank == rank)[0]\n cull_list = cull_ind[entries_for_this_rank]\n if rank == 0 and ns_args['debug'] >= 4 and len(cull_ind[entries_for_this_rank]) > 0:\n print print_prefix, \"INFO: 20 cull \", cull_ind[entries_for_this_rank], \" on \",rank\n\n # record Emax walkers energies\n if rank == 0:\n for (E, V) in izip(Emax, Vmax):\n energy_io.write(\"%d %.60f %.60f\\n\" % (i_ns_step, E, V))\n energy_io.flush()\n\n ## Save the energies and corresponding iteration numbers in a list then print them out only when printing a snapshot\n #Emax_save.extend(Emax)\n #i_ns_step_save.extend(n_cull*[i_ns_step])\n ## if it is time to print (i.e. at the same iteration when a snapshot is written, or at every iter if no snapshots - for smooth restarts)\n #if ns_args['snapshot_interval'] < 0 or i_ns_step % ns_args['snapshot_interval'] == ns_args['snapshot_interval']-1:\n # for istep,E in zip(i_ns_step_save,Emax_save):\n # energy_io.write(\"%d %.60f\\n\" % (istep, E))\n # energy_io.flush()\n # #empty the save lists, so they are ready for the next bunch of saved energies\n # Emax_save[:]=[]\n # i_ns_step_save[:]=[]\n\n # record Emax walkers configurations\n if cull_list is not None:\n for (i, global_n_offset) in zip(cull_list, entries_for_this_rank):\n if ns_args['debug'] >= 10 and size <= 1:\n print print_prefix, \"walker killed at age \",walkers[i].info['n_walks']\n # store culled config in list to be written (when snapshot_interval has passed) every traj_interval steps\n global_n = i_ns_step*n_cull + global_n_offset\n if ns_args['traj_interval'] > 0 and global_n % ns_args['traj_interval'] == ns_args['traj_interval']-1:\n walker_copy = walkers[i].copy()\n walker_copy.info['volume'] = walker_copy.get_volume()\n walker_copy.info['ns_P'] = movement_args['MC_cell_P']\n walker_copy.info['iter'] = i_ns_step\n walker_copy.info['config_n_global'] = global_n\n if walker_copy.has('masses') and walker_copy.has('momenta'):\n walker_copy.info['ns_KE'] = walker_copy.get_kinetic_energy()\n\n traj_walker_list.append(walker_copy)\n\n # if tracking all configs, save this one that has been culled\n if track_traj_io is not None:\n at = walkers[i].copy()\n at.info['culled'] = True\n ase.io.write(track_traj_io, at, format=ns_args['config_file_format'])\n\n if ns_args['E_dump_interval'] > 0 and i_ns_step % ns_args['E_dump_interval'] == 0: # ns_args['E_dump_interval']-1:\n if walkers[0].has('masses') and walkers[0].has('momenta'):\n E_dump_list.append([ w.info['ns_energy'] - w.get_kinetic_energy() for w in walkers])\n else:\n E_dump_list.append([ w.info['ns_energy'] for w in walkers])\n E_dump_list_times.append(i_ns_step)\n\n # print the recorded Emax walkers configurations to output file\n if (ns_args['snapshot_interval'] < 0 or i_ns_step % ns_args['snapshot_interval'] == ns_args['snapshot_interval']-1 or\n (ns_args['snapshot_seq_pairs'] and i_ns_step > 0 and i_ns_step%ns_args['snapshot_interval'] == 0) ) :\n if ns_args['traj_interval'] > 0:\n for at in traj_walker_list:\n ase.io.write(traj_io, at, format=ns_args['config_file_format'])\n traj_io.flush()\n traj_walker_list=[]\n if ns_args['E_dump_interval'] > 0:\n if comm is not None:\n E_dump_list_all = np.array(comm.allgather(E_dump_list))\n else:\n E_dump_list_all = np.array(E_dump_list)\n if rank == 0:\n for i in range(E_dump_list_all.shape[1]):\n E_dump_io.write(\"step %d\\n\" % E_dump_list_times[i])\n if len(E_dump_list_all.shape) == 3:\n np.savetxt(E_dump_io, E_dump_list_all[:,i,:])\n else:\n np.savetxt(E_dump_io, E_dump_list_all[i,:])\n E_dump_io.flush()\n E_dump_list = []\n E_dump_list_all = None\n E_dump_list_times = []\n\n # calculate how many will be culled on each rank\n n_cull_of_rank = np.array([ sum(cull_rank == r) for r in range(size) ])\n\n # label configs to be culled\n status = np.empty( (size, n_walkers), np.object_)\n status[:,:] = ''\n for r in range(size):\n status[r,cull_ind[np.where(cull_rank == r)[0]]] = 'c_t'\n\n if ns_args['debug'] >= 10:\n initial_PE_loc = [ eval_energy(at, do_KE=False) for at in walkers ]\n initial_E_loc = [ eval_energy(at) for at in walkers ]\n if comm is not None:\n initial_PE = np.array(comm.allgather(initial_PE_loc)).flatten()\n initial_E = np.array(comm.allgather(initial_E_loc)).flatten()\n else:\n initial_PE = np.array(initial_PE_loc)\n initial_E = np.array(initial_E_loc)\n initial_changed = initial_PE[np.where(status.flatten() == 'c_t')]\n initial_unchanged = initial_PE[np.where(status.flatten() == '')]\n\n if ns_args['debug'] >= 30:\n for r in range(len(status)):\n print print_prefix, \": initial status \", r, [ s for s in status[r,:] ]\n\n # find load balance by cloning on top of excess maxima\n recv_ind=[]\n recv_rank=[]\n send_ind=[]\n send_rank=[]\n cull_inds_to_remove=[]\n\n if n_cull > 1: # send/recv for fixing load balance\n # CHECK FOR RANDOMNESS ISSUES AND WHICH NODES ARE USED FOR CLONES\n for r in range(size):\n # maybe remote_r should be chosen completely randomly, rather than close to task of extra culled configs\n for dr in np.array(zip(np.array(range(1,size)), -np.array(range(1,size)))).flatten():\n if n_cull_of_rank[r] <= max_n_cull_per_task: # not too many that need to be culled on this rank\n break\n # this rank has too many to cull, must receive replacement from another node\n remote_r = (r+dr) % size\n if n_cull_of_rank[remote_r] < max_n_cull_per_task: # send from r+dr to r\n n_transfer = min(n_cull_of_rank[r]-max_n_cull_per_task, max_n_cull_per_task-n_cull_of_rank[remote_r])\n recv_rank.extend([r]*n_transfer)\n send_rank.extend([remote_r]*n_transfer)\n local_ind = np.where(status[r,:] == 'c_t')[0][0:n_transfer]\n recv_ind.extend(local_ind)\n remote_ind = np.where(status[remote_r,:] == '')[0][0:n_transfer]\n send_ind.extend(remote_ind)\n status[r,local_ind] = 'c_s'\n status[remote_r,remote_ind] = 'c_t_a'\n n_cull_of_rank[r] -= n_transfer\n n_cull_of_rank[remote_r] += n_transfer\n\n\n # save local random state, and switch to common one\n rng.switch_to_common()\n\n # select clones\n for r in range(size):\n list_clone_target = np.where(status[r,:] == 'c_t')[0]\n # assign clones\n n_remaining_clones = len(list_clone_target)\n while n_remaining_clones > 0:\n remote_r = rng.int_uniform(0,size)\n n_avail_remote = sum(status[remote_r,:] == '')\n if n_avail_remote > 0: # something is available on remote_r\n # send from random avail walker on remote_r to clone_target on r\n n_transfer = min(n_remaining_clones, n_avail_remote)\n\n # set ranks\n send_rank.extend([remote_r]*n_transfer)\n recv_rank.extend([r]*n_transfer)\n\n # set indices\n r_is = []\n for ii in range(n_transfer):\n r_i = rng.int_uniform(0, n_walkers)\n while status[remote_r,r_i] != '':\n r_i = rng.int_uniform(0, n_walkers)\n # now r_i should be something with status ''\n status[remote_r,r_i] = 'c_s'\n r_is.append(r_i)\n send_ind.extend(r_is)\n\n status[r,list_clone_target[0:n_transfer]] = 'c_t_a'\n recv_ind.extend(list_clone_target[0:n_transfer])\n\n if n_transfer < len(list_clone_target):\n list_clone_target = list_clone_target[n_transfer:]\n n_remaining_clones -= n_transfer\n\n if ns_args['debug'] >= 20:\n print print_prefix, \"%30s\" % \": LOOP_TE POST_LOC_CLONE 15 \",i_ns_step, [ \"%.10f\" % eval_energy(at) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_PE POST_LOC_CLONE 16 \",i_ns_step, [ \"%.10f\" % eval_energy(at, do_KE=False) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_X POST_LOC_CLONE 17 \",i_ns_step, [ \"%.10f\" % at.positions[0,0] for at in walkers ]\n\n # make into numpy arrays so that mathematical operations will work\n send_rank = np.array(send_rank)\n send_ind = np.array(send_ind)\n recv_rank = np.array(recv_rank)\n recv_ind = np.array(recv_ind)\n\n if ns_args['debug'] >= 10:\n if rank == 0:\n for i in range(len(send_rank)):\n print print_prefix, \"send from \",send_rank[i],send_ind[i],\" to \",recv_rank[i], recv_ind[i]\n\n # save new common state, and restore to local state\n rng.switch_to_local()\n\n if n_cull == 1:\n if send_rank[0] == recv_rank[0] and send_rank[0] == rank: # local copy\n walkers[recv_ind[0]].set_positions(walkers[send_ind[0]].get_positions())\n walkers[recv_ind[0]].set_cell(walkers[send_ind[0]].get_cell())\n if movement_args['do_velocities']:\n walkers[recv_ind[0]].set_velocities(walkers[send_ind[0]].get_velocities())\n if movement_args['do_GMC']:\n walkers[recv_ind[0]].arrays['GMC_direction'][:,:] = walkers[send_ind[0]].arrays['GMC_direction']\n if ns_args['n_extra_data'] > 0:\n walkers[recv_ind[0]].arrays['ns_extra_data'][...] = walkers[send_ind[0]].arrays['ns_extra_data']\n if ns_args['swap_atomic_numbers']:\n walkers[recv_ind[0]].set_atomic_numbers(walkers[send_ind[0]].get_atomic_numbers())\n if movement_args['do_velocities']:\n walkers[recv_ind[0]].set_masses(walkers[send_ind[0]].get_masses())\n if ns_args['track_configs']:\n walkers[recv_ind[0]].info['config_ind'] = walkers[send_ind[0]].info['config_ind']\n walkers[recv_ind[0]].info['from_config_ind'] = walkers[send_ind[0]].info['from_config_ind']\n walkers[recv_ind[0]].info['config_ind_time'] = walkers[send_ind[0]].info['config_ind_time']\n walkers[recv_ind[0]].info['ns_energy'] = eval_energy(walkers[recv_ind[0]])\n if ns_args['debug'] >= 10 and size <= 1:\n walkers[recv_ind[0]].info['n_walks'] = 0\n else: # need send/recv\n n_send = 3*(n_atoms + 3)\n if movement_args['do_velocities']:\n n_send += 3*n_atoms\n if movement_args['do_GMC']:\n n_send += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n n_send += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n n_send += n_atoms # Z\n if movement_args['do_velocities']:\n n_send += n_atoms # mass\n if ns_args['track_configs']:\n n_send += 3\n buf = np.zeros ( n_send )\n if send_rank[0] == rank: # only one config is sent/received\n buf_o = 0\n buf[buf_o:buf_o+3*n_atoms] = walkers[send_ind[0]].get_positions().reshape( (3*n_atoms) ); buf_o += 3*n_atoms\n buf[buf_o:buf_o+3*3] = walkers[send_ind[0]].get_cell().reshape( (3*3) ); buf_o += 3*3\n if movement_args['do_velocities']:\n buf[buf_o:buf_o+3*n_atoms] = walkers[send_ind[0]].get_velocities().reshape( (3*n_atoms) ); buf_o += 3*n_atoms\n if movement_args['do_GMC']:\n buf[buf_o:buf_o+3*n_atoms] = walkers[send_ind[0]].arrays['GMC_direction'].reshape( (3*n_atoms) ); buf_o += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n buf[buf_o:buf_o+ns_args['n_extra_data']*n_atoms] = walkers[send_ind[0]].arrays['ns_extra_data'].reshape( (ns_args['n_extra_data']*n_atoms) ); buf_o += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n buf[buf_o:buf_o+n_atoms] = walkers[send_ind[0]].get_atomic_numbers(); buf_o += n_atoms\n if movement_args['do_velocities']:\n buf[buf_o:buf_o+n_atoms] = walkers[send_ind[0]].get_masses(); buf_o += n_atoms\n if ns_args['track_configs']:\n buf[buf_o] = walkers[send_ind[0]].info['config_ind']; buf_o += 1\n buf[buf_o] = walkers[send_ind[0]].info['from_config_ind']; buf_o += 1\n buf[buf_o] = walkers[send_ind[0]].info['config_ind_time']; buf_o += 1\n comm.Send([buf, MPI.DOUBLE], dest=recv_rank[0], tag=100)\n elif recv_rank[0] == rank:\n comm.Recv([buf, MPI.DOUBLE], source=send_rank[0], tag=100)\n buf_o = 0\n walkers[recv_ind[0]].set_positions(buf[buf_o:buf_o+3*n_atoms].reshape( (n_atoms, 3) )); buf_o += 3*n_atoms\n walkers[recv_ind[0]].set_cell(buf[buf_o:buf_o+3*3].reshape( (3, 3) )); buf_o += 3*3\n if movement_args['do_velocities']:\n walkers[recv_ind[0]].set_velocities(buf[buf_o:buf_o+3*n_atoms].reshape( (n_atoms, 3) )); buf_o += 3*n_atoms\n if movement_args['do_GMC']:\n walkers[recv_ind[0]].arrays['GMC_direction'][:,:] = buf[buf_o:buf_o+3*n_atoms].reshape( (n_atoms, 3) ); buf_o += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n walkers[recv_ind[0]].arrays['ns_extra_data'][...] = buf[buf_o:buf_o+3*n_atoms].reshape( walkers[recv_ind[0]].arrays['ns_extra_data'].shape ); buf_o += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n walkers[recv_ind[0]].set_atomic_numbers(buf[buf_o:buf_o+n_atoms].astype(int)); buf_o += n_atoms\n if movement_args['do_velocities']:\n walkers[recv_ind[0]].set_masses(buf[buf_o:buf_o+n_atoms]); buf_o += n_atoms\n if ns_args['track_configs']:\n walkers[recv_ind[0]].info['config_ind'] = int(buf[buf_o]); buf_o += 1\n walkers[recv_ind[0]].info['from_config_ind'] = int(buf[buf_o]); buf_o += 1\n walkers[recv_ind[0]].info['config_ind_time'] = int(buf[buf_o]); buf_o += 1\n walkers[recv_ind[0]].info['ns_energy'] = eval_energy(walkers[recv_ind[0]])\n\n else: # complicated construction of sending/receiving buffers\n # figure out how much is sent per config\n n_data_per_config = 1+3*(n_atoms + 3)\n if movement_args['do_velocities']:\n n_data_per_config += 3*n_atoms\n if movement_args['do_GMC']:\n n_data_per_config += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n n_data_per_config += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n n_send += n_atoms # Z\n if movement_args['do_velocities']:\n n_send += n_atoms # mass\n if ns_args['track_configs']:\n n_data_per_config += 3\n\n # figure out send counts\n send_count = [0] * size\n for i in np.where(send_rank == rank)[0]:\n r_recv = recv_rank[i]\n send_count[r_recv] += n_data_per_config\n\n # figure out send displacements\n send_displ = [0] * size\n send_displ[0] = 0\n for i in range(1,size):\n send_displ[i] = send_displ[i-1] + send_count[i-1]\n\n # create empty buffer for sending\n send_count_tot = sum(send_count)\n send_data = np.zeros(send_count_tot)\n\n # copy data to be sent to buffer\n send_displ_t = list(send_displ)\n for i in np.where(send_rank == rank)[0]:\n r_recv = recv_rank[i]\n i_send = send_ind[i]\n\n data_o = send_displ_t[r_recv]\n send_data[data_o] = walkers[i_send].info['ns_energy']; data_o += 1\n send_data[data_o:data_o+3*n_atoms] = walkers[i_send].get_positions().reshape( (3*n_atoms) ); data_o += 3*n_atoms\n send_data[data_o:data_o+3*3] = walkers[i_send].get_cell().reshape( (3*3) ); data_o += 3*3\n if movement_args['do_velocities']:\n send_data[data_o:data_o+3*n_atoms] = walkers[i_send].get_velocities().reshape( (3*n_atoms) ); data_o += 3*n_atoms\n if movement_args['do_GMC']:\n send_data[data_o:data_o+3*n_atoms] = walkers[i_send].arrays['GMC_direction'].reshape( (3*n_atoms) ); data_o += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n send_data[data_o:data_o+ns_args['n_extra_data']*n_atoms] = walkers[i_send].arrays['ns_extra_data'].reshape( (ns_args['n_extra_data']*n_atoms) ); data_o += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n send_data[data_o:data_o+n_atoms] = walkers[i_send].get_atomic_numbers(); data_o += n_atoms\n if movement_args['do_velocities']:\n send_data[data_o:data_o+n_atoms] = walkers[i_send].get_masses(); data_o += n_atoms\n if ns_args['track_configs']:\n send_data[data_o] = walkers[i_send].info['config_ind']; data_o += 1\n send_data[data_o] = walkers[i_send].info['from_config_ind']; data_o += 1\n send_data[data_o] = walkers[i_send].info['config_ind_time']; data_o += 1\n send_displ_t[r_recv] = data_o\n\n # figure out recv counts\n recv_count = [0] * size\n for i in np.where(recv_rank == rank)[0]:\n r_send = send_rank[i]\n recv_count[r_send] += n_data_per_config\n\n # figure out recv displacements\n recv_displ = [0] * size\n recv_displ[0] = 0\n for i in range(1,size):\n recv_displ[i] = recv_displ[i-1] + recv_count[i-1]\n\n # create empty buffer for receiving\n recv_count_tot = sum(recv_count)\n recv_data = np.zeros(recv_count_tot)\n\n # do communications\n if comm is not None:\n send_buf = [send_data, send_count, send_displ, MPI.DOUBLE]\n recv_buf = (recv_data, recv_count, recv_displ, MPI.DOUBLE)\n comm.Alltoallv(send_buf, recv_buf)\n else:\n recv_data = send_data.copy()\n\n # copy data from recv buffer to walkers\n recv_displ_t = list(recv_displ)\n for i in np.where(recv_rank == rank)[0]:\n r_send = send_rank[i]\n i_recv = recv_ind[i]\n\n data_o = recv_displ_t[r_send]\n walkers[i_recv].info['ns_energy'] = recv_data[data_o]; data_o += 1\n walkers[i_recv].set_positions( recv_data[data_o:data_o+3*n_atoms].reshape( (n_atoms, 3) )); data_o += 3*n_atoms\n walkers[i_recv].set_cell( recv_data[data_o:data_o+3*3].reshape( (3, 3) )); data_o += 3*3\n if movement_args['do_velocities']:\n walkers[i_recv].set_velocities( recv_data[data_o:data_o+3*n_atoms].reshape( (n_atoms, 3) )); data_o += 3*n_atoms\n if movement_args['do_GMC']:\n walkers[i_recv].arrays['GMC_direction'][:,:] = recv_data[data_o:data_o+3*n_atoms].reshape( (n_atoms, 3) ); data_o += 3*n_atoms\n if ns_args['n_extra_data'] > 0:\n walkers[i_recv].arrays['ns_extra_data'][...] = recv_data[data_o:data_o+ns_args['n_extra_data']*n_atoms].reshape( walkers[i_recv].arrays['ns_extra_data'].shape ); data_o += ns_args['n_extra_data']*n_atoms\n if ns_args['swap_atomic_numbers']:\n walkers[i_recv].set_atomic_numbers(recv_data[data_o:data_o+n_atoms].astype(int)); data_o += n_atoms\n if movement_args['do_velocities']:\n walkers[i_recv].set_masses(recv_data[data_o:data_o+n_atoms]); data_o += n_masses\n if ns_args['track_configs']:\n walkers[i_recv].info['config_ind'] = int(recv_data[data_o]); data_o += 1\n walkers[i_recv].info['from_config_ind'] = int(recv_data[data_o]); data_o += 1\n walkers[i_recv].info['config_ind_time'] = int(recv_data[data_o]); data_o += 1\n recv_displ_t[r_send] = data_o\n\n if ns_args['debug'] >= 20:\n print print_prefix, \"%30s\" % \": LOOP_TE POST_CLONE 20 \",i_ns_step, [ \"%.10f\" % eval_energy(at) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_PE POST_CLONE 21 \",i_ns_step, [ \"%.10f\" % eval_energy(at, do_KE=False) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_X POST_CLONE 22 \",i_ns_step, [ \"%.10f\" % at.positions[0,0] for at in walkers ]\n\n if ns_args['track_configs']:\n # loop over _all_ clone targets and increment cur_config_ind, setting appropriate configs' new config_ind as needed \n for r in range(size):\n clone_walk_ind = np.where(status[r,:] == 'c_t_a')[0]\n for i_at in clone_walk_ind:\n if r == rank:\n walkers[i_at].info['from_config_ind'] = walkers[i_at].info['config_ind']\n walkers[i_at].info['config_ind'] = cur_config_ind\n walkers[i_at].info['config_ind_time'] = i_ns_step\n cur_config_ind += 1\n # move cloned walkers\n\n if (i_ns_step == start_first_iter and movement_args['full_auto_step_sizes']):\n # set initial step sizes. Performed here since this is the first time all the arrays are in place\n conf_pre=walkers[0].copy()\n conf_pre.set_calculator(walkers[0].get_calculator())\n move_args_pre=deepcopy(movement_args)\n walk_stats_pre=walk_single_walker(conf_pre, move_args_pre, Emax_of_step, KEmax)\n delta_step_size_setting_duration = full_auto_set_stepsizes(walkers, walk_stats_pre, movement_args, comm, Emax_of_step, KEmax, size)\n total_step_size_setting_duration += delta_step_size_setting_duration\n step_size_setting_duration += delta_step_size_setting_duration\n del(walk_stats_pre)\n del(move_args_pre)\n del(conf_pre)\n\n # walk clone targets\n if ns_args['debug'] >= 4:\n for i in np.where(status[rank,:] == 'c_s')[0]:\n print print_prefix, \"INFO: 30 clone source \", rank, i\n clone_walk_ind = np.where(status[rank,:] == 'c_t_a')[0]\n for i_at in clone_walk_ind:\n if ns_args['debug'] >= 4:\n print print_prefix, \"INFO: 40 WALK clone_target \", rank, i_at\n walk_stats = walk_single_walker(walkers[i_at], movement_args, Emax_of_step, KEmax)\n walkers[i_at].info['last_walked_iter_clone'] = i_ns_step\n # if tracking all configs, save this one that has been walked\n if track_traj_io is not None:\n walkers[i_at].info['iter'] = i_ns_step\n ase.io.write(track_traj_io, walkers[i_at], format=ns_args['config_file_format'])\n #print \"WALK on rank \", rank, \"at iteration \", i_ns_step, \" walker \", i_at \n if ns_args['debug'] >= 10 and size <= 1:\n walkers[i_at].info['n_walks'] += movement_args['n_model_calls']\n accumulate_stats(walk_stats_adjust, walk_stats)\n accumulate_stats(walk_stats_monitor, walk_stats)\n\n if ns_args['debug'] >= 20:\n print print_prefix, \"%30s\" % \": LOOP_TE POST_CLONE_WALK 25 \",i_ns_step, [ \"%.10f\" % eval_energy(at) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_PE POST_CLONE_WALK 26 \",i_ns_step, [ \"%.10f\" % eval_energy(at, do_KE=False) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_X POST_CLONE_WALK 27 \",i_ns_step, [ \"%.10f\" % at.positions[0,0] for at in walkers ]\n\n # check that everything that should have been changed has, and things that shouldn't have, haven't\n if ns_args['debug'] >= 10:\n final_PE_loc = [ eval_energy(at, do_KE=False) for at in walkers ]\n final_E_loc = [ eval_energy(at) for at in walkers ]\n if comm is not None:\n final_PE = np.array(comm.allgather(final_PE_loc)).flatten()\n final_E = np.array(comm.allgather(final_E_loc)).flatten()\n else:\n final_PE = final_PE_loc\n final_E = final_E_loc\n if rank == 0:\n final_status = status.flatten()\n for e in initial_unchanged:\n if e not in final_PE:\n print \"initial_PE \", initial_PE\n print \"final_PE \", final_PE\n print \"initial_E \", initial_E\n print \"final_E \", final_E\n print \"final_status \", final_status\n print \"WARNING: energy that should have been unchanged \", e,\" missing from final energies\"\n for e in initial_changed:\n if e in final_PE:\n print \"initial_PE \", initial_PE\n print \"final_PE \", final_PE\n print \"initial_E \", initial_E\n print \"final_E \", final_E\n print \"final_status \", final_status\n print \"WARNING: energy that should have been changed \", e,\" still there in final energies\"\n\n\n # walk extras\n if not ns_args['no_extra_walks_at_all']:\n for ii in range(max_n_cull_per_task - len(clone_walk_ind)+n_extra_walk_per_task):\n r_i = rng.int_uniform(0, n_walkers)\n # WARNING: this may select walkers for extra walks multiple times, yet never re-walk ones that were walked as clone targets\n while status[rank,r_i] != '' and status[rank,r_i] != 'c_s':\n r_i = rng.int_uniform(0, n_walkers)\n if ns_args['debug'] >= 4:\n print print_prefix, \"INFO: 50 WALK extra \",rank, r_i\n walk_stats = walk_single_walker(walkers[r_i], movement_args, Emax_of_step, KEmax)\n walkers[r_i].info['last_walked_iter_extra'] = i_ns_step\n # if tracking all configs, save this one that has been walked\n if track_traj_io is not None:\n walkers[i_at].info['iter'] = i_ns_step\n ase.io.write(track_traj_io, walkers[i_at], format=ns_args['config_file_format'])\n #print \"WALK EXTRA on rank \", rank, \"at iteration \", i_ns_step, \" walker \", r_i\n if ns_args['debug'] >= 10 and size <= 1:\n walkers[r_i].info['n_walks'] += movement_args['n_steps']\n accumulate_stats(walk_stats_adjust, walk_stats)\n accumulate_stats(walk_stats_monitor, walk_stats)\n\n monitored_this_step=False\n if movement_args['monitor_step_interval'] != 0 and i_ns_step % abs(movement_args['monitor_step_interval']) == abs(movement_args['monitor_step_interval'])-1:\n adjust_step_sizes(walk_stats_monitor, movement_args, comm, monitor_only=True)\n zero_stats(walk_stats_monitor, movement_args)\n monitored_this_step=True\n\n if movement_args['adjust_step_interval'] != 0 and i_ns_step % abs(movement_args['adjust_step_interval']) == abs(movement_args['adjust_step_interval'])-1:\n\n if (not movement_args['full_auto_step_sizes']):\n adjust_step_sizes(walk_stats_adjust, movement_args, comm, do_print_rate=(not monitored_this_step))\n else:\n delta_step_size_setting_duration = full_auto_set_stepsizes(walkers, walk_stats_adjust, movement_args, comm, Emax_of_step, KEmax, size)\n total_step_size_setting_duration += delta_step_size_setting_duration\n step_size_setting_duration += delta_step_size_setting_duration\n zero_stats(walk_stats_adjust, movement_args)\n\n if ns_args['debug'] >= 20:\n print print_prefix, \"%30s\" % \": LOOP_TE END 30 \",i_ns_step, [ \"%.10f\" % eval_energy(at) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_PE END 31 \",i_ns_step, [ \"%.10f\" % eval_energy(at,do_KE=False) for at in walkers ]\n print print_prefix, \"%30s\" % \": LOOP_X END 32 \",i_ns_step, [ \"%.10f\" % at.positions[0,0] for at in walkers ]\n\n if ns_args['debug'] >= 30:\n for r in range(len(status)):\n print print_prefix, \": final status \", r, [ s for s in status[r,:] ]\n\n if (rank == 0) and ((ns_args['snapshot_interval'] > 0 and i_ns_step > 0 and i_ns_step % ns_args['snapshot_interval'] == 0) or\n (ns_args['snapshot_seq_pairs'] and i_ns_step > 1 and i_ns_step%ns_args['snapshot_interval'] == 1) or\n (ns_args['snapshot_time'] > 0 and time.time()-last_snapshot_time > ns_args['snapshot_time'])):\n do_snapshot=True\n else:\n do_snapshot=False\n if comm is not None:\n do_snapshot = comm.bcast(do_snapshot, root=0)\n if do_snapshot:\n save_snapshot(i_ns_step)\n last_snapshot_time = time.time()\n clean_prev_snapshot(pprev_snapshot_iter)\n pprev_snapshot_iter = prev_snapshot_iter\n prev_snapshot_iter = i_ns_step\n\n if ns_analyzers is not None:\n for (ns_analyzer, ns_analyzer_interval) in ns_analyzers:\n if ns_analyzer_interval > 0 and (i_ns_step+1)%ns_analyzer_interval == 0:\n ns_analyzer.analyze(walkers, i_ns_step, \"NS_loop %d\" % i_ns_step)\n i_ns_step += 1\n ### END OF MAIN LOOP\n\n # flush remaining traj configs\n for at in traj_walker_list:\n ase.io.write(traj_io, at, format=ns_args['config_file_format'])\n traj_io.flush()\n traj_walker_list=[]\n\n if ns_args['E_dump_interval'] > 0:\n if comm is not None:\n E_dump_list_all = np.array(comm.allgather(E_dump_list))\n else:\n E_dump_list_all = np.array(E_dump_list)\n if rank == 0:\n for i in range(E_dump_list_all.shape[1]):\n E_dump_io.write(\"step %d\\n\" % E_dump_list_times[i])\n if len(E_dump_list_all.shape) == 3:\n np.savetxt(E_dump_io, E_dump_list_all[:,i,:])\n else:\n np.savetxt(E_dump_io, E_dump_list_all[i,:])\n E_dump_io.flush()\n\n cur_time = time.time()\n if rank == 0:\n print \"LOOP TIME total \",cur_time-initial_time-total_step_size_setting_duration, \" per iter \", (cur_time-initial_time-total_step_size_setting_duration)/(i_ns_step+1)\n print \"TIME SPENT SETTING STEP SIZES total \",total_step_size_setting_duration\n\n return i_ns_step-1", "def initpridict(cls):\n for i in range(len(clslist)):\n instcls = clslist[i]\n prilist = cls.pristage(instcls)\n configlist = cls.getConfigStages()\n tmpdict = dict()\n for j in range(len(configlist)):\n tmpdict.update(dict({configlist[j]: prilist[j]}))\n pridict.update(dict({instcls: tmpdict}))", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)", "def creating_dict(i, states):\n # base case\n if i == 5:\n # no more edges - recursion ends here\n return {'barcode': []}\n\n # iterative case\n else:\n # this is a tree structure where the node contains timepoint information and barcode information\n # and three edges link to other nodes that represent lineages in three differnet states\n updated_dict = {'t{}'.format(i): {state: creating_dict(i + 1, states) for state in states}}\n updated_dict['t{}'.format(i)].update({'barcode': []})\n return updated_dict", "def find_loop_insn_dep_map(\n kernel: LoopKernel,\n loop_nest_with_map: Mapping[str, AbstractSet[str]],\n loop_nest_around_map: Mapping[str, AbstractSet[str]]\n ) -> Mapping[str, AbstractSet[str]]:\n\n result: Dict[str, Set[str]] = {}\n\n from loopy.kernel.data import ConcurrentTag, IlpBaseTag\n for insn in kernel.instructions:\n for iname in kernel.insn_inames(insn):\n if kernel.iname_tags_of_type(iname, ConcurrentTag):\n continue\n\n iname_dep = result.setdefault(iname, set())\n\n for dep_insn_id in insn.depends_on:\n if dep_insn_id in iname_dep:\n # already depending, nothing to check\n continue\n\n dep_insn = kernel.id_to_insn[dep_insn_id]\n dep_insn_inames = dep_insn.within_inames\n\n if iname in dep_insn_inames:\n # Nothing to be learned, dependency is in loop over iname\n # already.\n continue\n\n # To make sure dep_insn belongs outside of iname, we must prove\n # that all inames that dep_insn will be executed in nest\n # outside of the loop over *iname*. (i.e. nested around, or\n # before).\n\n may_add_to_loop_dep_map = True\n for dep_insn_iname in dep_insn_inames:\n if dep_insn_iname in loop_nest_around_map[iname]:\n # dep_insn_iname is guaranteed to nest outside of iname\n # -> safe.\n continue\n\n if kernel.iname_tags_of_type(dep_insn_iname,\n (ConcurrentTag, IlpBaseTag)):\n # Parallel tags don't really nest, so we'll disregard\n # them here.\n continue\n\n if dep_insn_iname not in loop_nest_with_map.get(iname, []):\n # dep_insn_iname does not nest with iname, so its nest\n # must occur outside.\n continue\n\n may_add_to_loop_dep_map = False\n break\n\n if not may_add_to_loop_dep_map:\n continue\n\n logger.debug(\"{knl}: loop dependency map: iname '{iname}' \"\n \"depends on '{dep_insn}' via '{insn}'\"\n .format(\n knl=kernel.name,\n iname=iname,\n dep_insn=dep_insn_id,\n insn=insn.id))\n\n iname_dep.add(dep_insn_id)\n\n return result", "def main():\n print(\"\")\n print(\"##################################################\")\n print(\"\")\n the_loop = True\n while the_loop:\n set_holder = choose_set()\n quantity_holder = choose_quantity()\n generate_packs(set_holder, quantity_holder)", "def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1", "def postLoopFunctions(self):\n\t\treturn", "def getNetworksFromLoops(loops, genes, fout, pdis=2000, gap=1, cpu=1):\n ks = [key for key in loops.keys() if key in genes]\n print(\"Merging anchors and annotating loops through networks.\")\n ds = Parallel(n_jobs=cpu, backend=\"multiprocessing\")(delayed(getNet)(\n chrom,\n loops[chrom],\n genes[chrom],\n pdis=pdis,\n gap=gap,\n ) for chrom in tqdm(ks))\n anchors, anots, nets, targets = {}, {}, {}, {}\n for d in ds:\n for k, v in d[0].items():\n anchors[k] = v\n for k, v in d[1].items():\n anots[k] = v\n for k, v in d[2].items():\n nets[k] = v\n for k, v in d[3].items():\n targets[k] = v\n #output results\n #anchors\n anchors = pd.DataFrame(anchors).T\n anchors.to_csv(fout + \"_mergedAnchors.txt\", sep=\"\\t\", index_label=\"anchor\")\n with open(fout + \"_mergedAnchors.bed\", \"w\") as fo:\n for t in anchors.itertuples():\n line = [t[1], t[2], t[3], t[0]]\n fo.write(\"\\t\".join(list(map(str, line))) + \"\\n\")\n #annotations\n anots = pd.DataFrame(anots).T\n anots.to_csv(fout + \"_loop2anchors.txt\", sep=\"\\t\", index_label=\"loopId\")\n #networks\n with open(fout + \"_ep_net.sif\", \"w\") as fo:\n for s, es in nets.items():\n es = list(es)\n ta = s.split(\"|\")[-1]\n for e in es:\n tb = e.split(\"|\")[-1]\n t = [ta, tb]\n t.sort()\n t = \"-\".join(t)\n line = [s, t, e]\n fo.write(\"\\t\".join(line) + \"\\n\")\n with open(fout + \"_targets.txt\", \"w\") as fo:\n ks = list(targets.keys())\n ks.sort()\n line = [\n \"Promoter\", \"PromoterTarget\", \"directEnhancer\", \"indirectEnhancer\",\n \"directPromoter\", \"indirectPromoter\", \"directEnhancerHub\",\n \"indirectEnhancerHub\"\n ]\n fo.write(\"\\t\".join(line) + \"\\n\")\n for k in ks:\n line = [\n k, targets[k][\"targetGene\"],\n \",\".join(targets[k][\"directEnhancer\"]),\n \",\".join(targets[k][\"indirectEnhancer\"]),\n \",\".join(targets[k][\"directPromoter\"]),\n \",\".join(targets[k][\"indirectPromoter\"]),\n targets[k][\"directEnhancerHub\"],\n targets[k][\"indirectEnhancerHub\"]\n ]\n fo.write(\"\\t\".join(line) + \"\\n\")", "def next_minibatch_feed_dict(self, placeholders):\n while True:\n if self.iter % 4 == 0:\n # gene-gene relation\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n elif self.iter % 4 == 1:\n # gene-drug relation\n self.current_edge_type_idx = self.edge_type2idx[0, 1, 0]\n elif self.iter % 4 == 2:\n # drug-gene relation\n self.current_edge_type_idx = self.edge_type2idx[1, 0, 0]\n else:\n # random side effect relation\n if len(self.freebatch_edge_types) > 0:\n self.current_edge_type_idx = np.random.choice(self.freebatch_edge_types)\n else:\n self.current_edge_type_idx = self.edge_type2idx[0, 0, 0]\n self.iter = 0\n\n i, j, k = self.idx2edge_type[self.current_edge_type_idx]\n if self.batch_num[self.current_edge_type_idx] * self.batch_size \\\n <= len(self.train_edges[i,j][k]) - self.batch_size + 1:\n break\n else:\n if self.iter % 4 in [0, 1, 2]:\n self.batch_num[self.current_edge_type_idx] = 0\n else:\n self.freebatch_edge_types.remove(self.current_edge_type_idx)\n\n self.iter += 1\n start = self.batch_num[self.current_edge_type_idx] * self.batch_size\n self.batch_num[self.current_edge_type_idx] += 1\n batch_edges = self.train_edges[i,j][k][start: start + self.batch_size]\n return self.batch_feed_dict(batch_edges, self.current_edge_type_idx, placeholders)", "def _preprocess(self):\n for f in self._variables:\n self._path.joinpath(f).mkdir(parents=True, exist_ok=True)\n\n for i in tqdm(range(self._size)):\n linear, w = self._get_spectrograms(i)\n self._store_entry(i, linear, w)", "def iterate_data(dataset,iter_no=5,pixel_mask=None,plot_clear=True,algo=\"FordRollett\",unit_weights=False):\n import overlap\n start_gain = array.ones(len(dataset))\n if unit_weights is True:\n weights = array.ones_like(dataset)\n else:\n weights = 1.0/dataset.var\n # Use weights as the mask\n if pixel_mask is not None:\n weights = weights*pixel_mask\n if algo == \"FordRollett\":\n gain,first_ave,ar,esds,k = overlap.find_gain_fr(dataset,weights,start_gain,pixel_mask=pixel_mask)\n else:\n raise ValueError(\"No such algorithm: %s\" % algo)\n chisquared,residual_map = overlap.get_statistics_fr(gain,first_ave,dataset,dataset.var,pixel_mask)\n old_result = first_ave #store for later\n chisq_history = [chisquared]\n k_history = [k]\n if iter_no > 0: \n no_iters = iter_no\n else:\n no_iters = abs(iter_no)\n for cycle_no in range(no_iters+1):\n esdflag = (cycle_no == no_iters) # need esds as well, and flags the last cycle\n print 'Cycle %d' % cycle_no\n if cycle_no > 3 and iter_no < 0:\n esdflag = (esdflag or (abs(chisq_history[-2]-chisq_history[-1]))<0.005)\n if algo == \"FordRollett\":\n gain,interim_result,ar,esds,k = overlap.find_gain_fr(dataset,weights,gain,arminus1=ar,pixel_mask=pixel_mask,errors=esdflag)\n chisquared,residual_map = overlap.get_statistics_fr(gain,interim_result,dataset,dataset.var,pixel_mask)\n chisq_history.append(chisquared)\n k_history.append(k)\n if esdflag is True:\n break\n print 'Chisquared: ' + `chisq_history`\n print 'K: ' + `k_history`\n print 'Total cycles: %d' % cycle_no\n print 'Maximum shift/error: %f' % max(ar/esds)\n return gain,dataset,interim_result,residual_map,chisq_history,esds,first_ave,weights", "def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()", "def test_mapping(self, mapping: AbstractionMapping,\n allow_multi_iteration=False) -> Dict[Tuple[SerializedType, SerializedType], Any]:\n self.curr_mapping: AbstractionMapping = mapping\n\n icd = InterchangeDataset(mapping, self, collate_fn=self.collate_fn)\n icd_dataloader = icd.get_dataloader(batch_size=self.batch_size, shuffle=False)\n\n results = {} # this accumulates all results\n # dict of Tuple[keys] -> dict, usually\n iteration = 0\n while True:\n # print(f\"======== iteration {iteration} ========\")\n new_realizations: RealizationMapping = {}\n total_new_realizations = 0\n num_interventions = 0\n results_total = 0\n log.debug(f'Running DL with outer iteration {iteration}, DL size {len(icd_dataloader)}')\n if iteration >= 2:\n assert allow_multi_iteration, f'Loop will run more than 2 total iterations. This should happen only' \\\n f'if intervening on multiple high level nodes simultaneously'\n for batch in tqdm(icd_dataloader):\n # batch is a list of dicts, each dict contains a batched low and high intervention\n for minibatch in batch:\n minibatch = {k: v.to(self.device) \\\n if isinstance(v, (torch.Tensor, Intervention, GraphInput)) else v\n for k, v in minibatch.items()}\n\n low_intervention = minibatch[\"low_intervention\"]\n high_intervention = minibatch[\"high_intervention\"]\n\n actual_batch_size = low_intervention.get_batch_size()\n num_interventions += actual_batch_size\n\n high_base_res, high_ivn_res = self.high_model.intervene_all_nodes(high_intervention)\n low_base_res, low_ivn_res = self.low_model.intervene_all_nodes(low_intervention)\n\n if not high_intervention.is_empty():\n new_results_ct = self._add_results(\n results, high_intervention, low_intervention,\n high_base_res, high_ivn_res, low_base_res, low_ivn_res,\n actual_batch_size\n )\n results_total += new_results_ct\n\n realizations, num_new_realizations = \\\n self._create_new_realizations(\n icd, high_intervention, low_intervention,\n high_ivn_res, low_ivn_res, actual_batch_size)\n\n total_new_realizations += num_new_realizations\n merge_realization_mappings(new_realizations, realizations)\n\n log.debug(f'end of iter {iteration}; {total_new_realizations} new realizations')\n if iteration == 0:\n icd.did_empty_interventions = True\n iteration += 1\n if total_new_realizations == 0:\n # if intervening on only 1 HL var, then will terminate after 2 loops\n break\n else:\n icd.update_realizations(new_realizations)\n\n # potentially useful to not run out of cuda memory\n del icd\n del icd_dataloader\n\n log.info(f'looked at {results_total} total results')\n\n return results", "def generate_dictionary(self, sess, dict_type=\"S2T\"):\n avg1, avg2 = self.calc_avg_dist(sess)\n s2t_dico = self.get_candidates(sess, avg1, avg2)\n print(\"Completed generating S2T dictionary of size \" + str(len(s2t_dico)))\n if dict_type == \"S2T\":\n map_src_ind = np.asarray([s2t_dico[x][0] for x in range(len(s2t_dico))])\n tra_tgt_ind = np.asarray([s2t_dico[x][1] for x in range(len(s2t_dico))])\n return [map_src_ind, tra_tgt_ind]\n if dict_type == \"S2T&T2S\":\n # This case we are running Target 2 Source mappings\n t2s_dico = self.get_candidates(sess, avg2, avg1, swap_score=True)\n print(\"Completed generating T2S dictionary of size \" + str(len(t2s_dico)))\n t2s_dico = np.concatenate([t2s_dico[:, 1:], t2s_dico[:, :1]], 1)\n # Find the common pairs between S2T and T2S\n s2t_candi = set([(a, b) for a, b in s2t_dico])\n t2s_candi = set([(a, b) for a, b in t2s_dico])\n final_pairs = s2t_candi & t2s_candi\n dico = np.asarray(list([[a, b] for (a, b) in final_pairs]))\n print(\"Completed generating final dictionary of size \" + str(len(final_pairs)))\n return dico", "def simple(tmpdir):\n flowcells = [1, 2, 3, 4, 5, 6, 7, 8]\n lanes = [1, 2, 3]\n reads = [1, 2]\n\n _simple = {\"files\": [], \"data\": []}\n i = 0\n\n for read in reads:\n for flowcell in flowcells:\n for lane in lanes:\n content = _full_content()[i]\n file_path = create_file(tmpdir, flowcell, lane, read, content)\n\n _simple[\"files\"].append(file_path)\n\n data = create_file_data(file_path, flowcell, lane, read)\n _simple[\"data\"].append(data)\n i += 1\n\n return _simple", "def main():\n \n #opening all shakespeare files\n #infile1 = open('shakespeare1.txt',\"r\")\n linesToIterate = []\n \n with open('shakespeare1.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare2.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare3.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare4.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare5.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare6.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare7.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n with open('shakespeare8.txt',\"r\") as file:\n text = \"\"\n for line in file:\n text = text + line\n linesToIterate.append( text )\n\n \n wordlist = [\"hate\", \"love\", \"death\", \"night\", \"sleep\", \"time\",\n \"henry\", \"hamlet\", \"you\", \"my\", \"blood\", \"poison\", \n \"macbeth\", \"king\", \"heart\", \"honest\"]\n\n #print(linesToIterate)\n #starting timer\n start = time.time()\n\n dictOfWords = dictOfItems(linesToIterate,wordlist)\n \n #stopping timer\n stop_time = time.time() - start\n\n print(\"time: \")\n print(\"%s seconds\" % stop_time)\n print(\"\\n\")\n\n printResults(dictOfWords)", "def precalc_all(REPS):\n for sigma in [0.25, 1.5]:\n print('-'*60)\n\n N_RANGE = arange(5,105,5)\n\n filename = f'categorical_K2_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,n,n) for n in N_RANGE], C, sigma, M, REPS)\n\n filename = f'categorical_LOO_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,1,n) for n in N_RANGE], C, sigma, M, REPS)", "def dynamic_loop(loop_dict, cur_loop, loop_tmp, loop_result):\n max_loop_num = len(loop_dict) - 1\n for num in list(loop_dict.values())[cur_loop]:\n loop_tmp.append(num)\n if cur_loop == max_loop_num:\n loop_result.append([*loop_tmp])\n else:\n dynamic_loop(loop_dict, cur_loop+1, loop_tmp, loop_result)\n loop_tmp.pop()\n return loop_result", "def env_loop(environment):\n def scan(vars, vals):\n \"\"\"\n scans variables in a frame\n \"\"\"\n if isNull(vars):\n return env_loop(enclosing_env(environment)) # 5-4: env -> environment\n elif var == car(vars):\n return set_car(vals, val) #4-15\n else:\n return scan(cdr(vars), cdr(vals)) # 4-15\n if environment is the_empty_environment:\n raise UnboundLocalError(\"lookup_variable\")\n frame = first_frame(environment)\n return scan(frame_variables(frame), frame_values(frame)) # 4-15", "def run_std(self):\n print \"Initialising grid\"\n self.initialise_grid(50, 100, 3)\n \n self.initialise_shadow_map()\n \n self.num_iterations = 500\n self.jump_length = 1\n \n self.pd_s = 0.6\n self.pd_ns = 0.4\n \n self.avcount = np.zeros(self.num_iterations + 1)\n \n \n before = time.time()\n self.main_loop()\n after = time.time()\n \n time_taken = after - before\n \n print \"Took %f seconds\", time_taken", "def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []", "def __next__(self) -> dict:\n batches = {}\n terminations = 0\n for iterator in self.iterators:\n \n try:\n data, target = next(iterator)\n batches[data.location] = (data, target)\n\n except (TypeError, AttributeError) as e:\n logging.warning(f\"Dangling pointer detected! Skipping operation... Error: {e}\")\n \n except StopIteration:\n terminations += 1\n\n # Every cached iterator has been iterated through completely\n if terminations == len(self.iterators):\n raise StopIteration\n\n return batches", "def set_global_definitions(self):\n # TODO: Investigate how this could be combined with the creation of\n # self.configfiles in reffile_setup()\n\n self.global_subarray_definitions = {}\n self.global_readout_patterns = {}\n self.global_subarray_definition_files = {}\n self.global_readout_pattern_files = {}\n\n self.global_crosstalk_files = {}\n self.global_filtpupilcombo_files = {}\n self.global_filter_position_files = {}\n self.global_flux_cal_files = {}\n self.global_psf_wing_threshold_file = {}\n self.global_psfpath = {}\n # self.global_filter_throughput_files = {} ?\n\n for instrument in 'niriss fgs nircam miri nirspec'.split():\n if instrument.lower() == 'niriss':\n readout_pattern_file = 'niriss_readout_pattern.txt'\n subarray_def_file = 'niriss_subarrays.list'\n crosstalk_file = 'niriss_xtalk_zeros.txt'\n filtpupilcombo_file = 'niriss_dual_wheel_list.txt'\n filter_position_file = 'niriss_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'niriss_zeropoints.list'\n psf_wing_threshold_file = 'niriss_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'niriss/gridded_psf_library')\n elif instrument.lower() == 'fgs':\n readout_pattern_file = 'guider_readout_pattern.txt'\n subarray_def_file = 'guider_subarrays.list'\n crosstalk_file = 'guider_xtalk_zeros.txt'\n filtpupilcombo_file = 'guider_filter_dummy.list'\n filter_position_file = 'dummy.txt'\n flux_cal_file = 'guider_zeropoints.list'\n psf_wing_threshold_file = 'fgs_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'fgs/gridded_psf_library')\n elif instrument.lower() == 'nircam':\n readout_pattern_file = 'nircam_read_pattern_definitions.list'\n subarray_def_file = 'NIRCam_subarray_definitions.list'\n crosstalk_file = 'xtalk20150303g0.errorcut.txt'\n filtpupilcombo_file = 'nircam_filter_pupil_pairings.list'\n filter_position_file = 'nircam_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'NIRCam_zeropoints.list'\n psf_wing_threshold_file = 'nircam_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'nircam/gridded_psf_library')\n else:\n readout_pattern_file = 'N/A'\n subarray_def_file = 'N/A'\n crosstalk_file = 'N/A'\n filtpupilcombo_file = 'N/A'\n filter_position_file = 'N/A'\n flux_cal_file = 'N/A'\n psf_wing_threshold_file = 'N/A'\n psfpath = 'N/A'\n if instrument in 'niriss fgs nircam'.split():\n self.global_subarray_definitions[instrument] = self.get_subarray_defs(filename=os.path.join(self.modpath, 'config', subarray_def_file))\n self.global_readout_patterns[instrument] = self.get_readpattern_defs(filename=os.path.join(self.modpath, 'config', readout_pattern_file))\n self.global_subarray_definition_files[instrument] = os.path.join(self.modpath, 'config', subarray_def_file)\n self.global_readout_pattern_files[instrument] = os.path.join(self.modpath, 'config', readout_pattern_file)\n self.global_crosstalk_files[instrument] = os.path.join(self.modpath, 'config', crosstalk_file)\n self.global_filtpupilcombo_files[instrument] = os.path.join(self.modpath, 'config', filtpupilcombo_file)\n self.global_filter_position_files[instrument] = os.path.join(self.modpath, 'config', filter_position_file)\n self.global_flux_cal_files[instrument] = os.path.join(self.modpath, 'config', flux_cal_file)\n self.global_psf_wing_threshold_file[instrument] = os.path.join(self.modpath, 'config', psf_wing_threshold_file)\n self.global_psfpath[instrument] = psfpath", "def set_interaction(self):\n\n dirInd = \"/Users/asedeki/Drive/environement/Quasi1D/quasi1d/data/inddata\" # My laptop\n N = self.N\n array = np.load(f\"{dirInd}/array_index_n{N}.npy\")\n\n Temps = np.loadtxt(self.g_file, usecols=[\n 1], unpack=True, dtype=np.double)\n if self._temperatures.size == 0:\n self.temperatures = np.unique(Temps)\n\n self.g = {}\n for i in [1, 2, 3]:\n self.g[i] = {}\n G_i = np.loadtxt(self.g_file, usecols=[\n 4+i], unpack=True, dtype=np.double)\n for Ti in self.temperatures:\n GT = G_i[np.where(Temps == Ti)]\n self.g[i][Ti] = np.zeros([N, N, N], dtype=np.double)\n self.set_g(self.g[i][Ti], array, GT)", "def create(self) -> dict:\n variable_values = {}\n for simulation in self.simulation_list:\n name_array = []\n name_matrix = []\n list_name_col = []\n name_col = []\n name_row = []\n\n for line in self.filecontents(simulation):\n do_not_store_this_line = 0\n\n # checks if line contains name of subsequent array\n if line[0:12] == \" ! Variable \" and line[-2:] == \"#\\n\":\n name_array = (line[12:].split(\" \"))[0]\n name_matrix = []\n list_name_col = []\n name_col = []\n do_not_store_this_line = 1\n\n # checks if line contains name of columns (and also matrix)\n if name_array != []:\n if line.split(\"(\")[0] == \" \" + name_array:\n # line defines name of matrix if its equal to array name followed by \"(\"\n list_name_col = line.split(\",\")\n name_matrix = line.split(\")\")[0].split(\":\")[-1].strip(\"\\\"\")\n # defines name of matrix equal to the entry before the first \")\" then after the last \":\"\n do_not_store_this_line = 1 # matrix name line doesn't contain usable values\n\n # row name is just first entry of a line\n name_row = line.split(\",\")[0].strip()\n\n # foreach cell in a line, assigns its value to a dictionary with keys for the\n # array, matrix, col, and row names\n for i, cell in enumerate(line.split(\",\")):\n if name_array != [] and name_matrix != [] and name_row != [] and list_name_col != []:\n name_col = list_name_col[i].strip()\n if (do_not_store_this_line == 0 and i != 0 and name_col != \"\"):\n key = (simulation, name_array, name_matrix, name_row, name_col)\n variable_values[key] = float(cell.strip())\n\n return variable_values", "def load_iteration_dict(is_self_training):\n if len(ds.UNLABELED_DICT) > 0:\n\n temp_pos_dict = {}\n temp_neg_dict = {}\n temp_neu_dict = {}\n\n for key in ds.UNLABELED_DICT.keys():\n tweet = ds.UNLABELED_DICT.get(key)\n nl, is_success = predict(tweet, is_self_training)\n if is_success:\n if nl == 2.0:\n temp_pos_dict[key] = tweet\n if nl == -2.0:\n temp_neg_dict[key] = tweet\n if nl == 0.0:\n temp_neu_dict[key] = tweet\n else:\n temp_pos_dict = {}\n temp_neg_dict = {}\n temp_neu_dict = {}\n\n ds.POS_DICT_SELF = temp_pos_dict\n ds.NEG_DICT_SELF = temp_neg_dict\n ds.NEU_DICT_SELF = temp_neu_dict\n\n return", "def buildDict(self, dict):\n self.all_words = set(dict)\n self.wc_dict = collections.defaultdict(int)\n for w in dict:\n for wc in self.get_wildcards(w):\n self.wc_dict[wc] += 1", "def mix_iterator(self):\n self.job = OrderedDict()\n for list_i in self.grid_iterator():\n # Pick the values to be used in this run\n for (k, i) in zip(self.table.keys(), list_i):\n self.job[k] = self.table[k][i]\n # Do the string replace operations on the values themselves\n self.expand_values()\n yield self.job", "def task_3():\n threshold = [0.86, 0.87, 0.88, 0.89]\n for t in threshold: \n # Create a list to store the number of iteration that DE converge \n # @ given threshold for p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5) \n iter_p1 = []\n iter_p2 = [] \n iter_p3 = [] \n iter_p4 = []\n \n # Create a list to store the cost at the end of the DE \n # p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5)\n cost_p1 = []\n cost_p2 = []\n cost_p3 = []\n cost_p4 = [] \n \n # Run the experiment and record the result for the given threshold\n experiment(t)\n \n # Loop over experiment and record the number of iteration of each set of param\n # for 30 times\n for i in range(30):\n record = experiment(t) # replace the argument with the testing threshold\n iter_p1.append(record[0][0])\n iter_p2.append(record[1][0])\n iter_p3.append(record[2][0])\n iter_p4.append(record[3][0])\n \n cost_p1.append(record[0][1])\n cost_p2.append(record[1][1])\n cost_p3.append(record[2][1])\n cost_p4.append(record[3][1])\n \n # Convert the result into a dictionary then transform it to a pandas DataFrame\n iteration_dict = {\"iteration(5,40)\":iter_p1, \"iteration(10,20)\":iter_p2, \n \"iteration(20,10)\": iter_p3, \"iteration(40,5)\": iter_p4}\n cost_dict = {\"cost(5,40)\": cost_p1, \"cost(10,20)\": cost_p2, \n \"cost(20,10)\": cost_p3, \"cost(40,5)\":cost_p4}\n df_iteration = pd.DataFrame.from_dict(iteration_dict) \n df_iteration.to_csv(\"iteration_\" + str(t) + \".csv\")\n \n df_cost = pd.DataFrame.from_dict(cost_dict)\n df_cost.to_csv(\"cost_\" + str(t) + \".csv\")", "def setup(self):\n self.score = 0\n self.lives = 3\n self.state = GameStates.RUNNING\n self.focus_word = None\n \n self.star_list = set()\n self.word_list = set()\n\n for _ in range(5):\n self.create_word()\n for _ in range(25):\n self.create_star()", "def _function(self):\n\n\n\n def calculate_weights():\n \"\"\"\n calculate a weight inversely proportional to the expected to duration of the two steps in the\n script\n\n Returns: weights as a dictionary for the two steps\n\n \"\"\"\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights\n\n def run_scan(name):\n self.current_subscript = name\n sweeper_script.start()\n while self.current_subscript is name:\n time.sleep(0.1)\n\n def calc_new_range():\n\n\n df = self.settings['high_res_df']\n N = self.settings['high_res_N']\n\n r = sweeper_script.data[-1]['r']\n freq = sweeper_script.data[-1]['frequency']\n freq = freq[np.isfinite(r)]\n r = r[np.isfinite(r)]\n\n fo = freq[np.argmax(r)]\n\n f_start, f_end = fo - N/2 *df, fo + N/2 *df\n\n\n # make sure that we convert back to native python types (numpy file types don't pass the Parameter validation)\n return float(f_start), float(f_end), int(N)\n\n\n sweeper_script = self.scripts['zi sweep']\n #save initial settings, so that we can rest at the end of the script\n initial_settings = deepcopy(sweeper_script.settings)\n self.weights = calculate_weights()\n\n # take the signal from the subscript and route it to a function that takes care of it\n sweeper_script.updateProgress.connect(self._receive_signal)\n\n print('====== start quick scan ============')\n\n run_scan('quick scan')\n\n print('====== calculate new scan range ====')\n f_start, f_stop, N = calc_new_range()\n\n print('f_start, f_stop, N', f_start, f_stop, N)\n\n print('====== update sweeper ==============')\n sweeper_script.update({\n 'start' : f_start,\n 'stop' : f_stop,\n 'samplecount' : N\n })\n\n print('====== start high res scan =========')\n # print(sweeper_script.sweeper.finished())\n # print(sweeper_script.sweeper.progress())\n\n run_scan('high res scan')\n\n sweeper_script.updateProgress.disconnect()\n self.data = sweeper_script.data[-1]\n\n self._recording = False\n\n if self.settings['save']:\n self.save()\n\n # set the sweeper script back to initial settings\n sweeper_script.update(initial_settings)\n # make sure that progess is set 1o 100 because we check that in the old_gui\n self.updateProgress.emit(100)", "def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg", "def main() -> None:\n\n task_results = {}\n for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):\n task_results[task] = []\n for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:\n for single_sequence_id in (\n (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)\n ):\n category_result = evaluate_dbir_for_category(\n category, task=task, single_sequence_id=single_sequence_id\n )\n print(\"\")\n print(\n f\"Results for task={task}; category={category};\"\n + (\n f\" sequence={single_sequence_id}:\"\n if single_sequence_id is not None\n else \":\"\n )\n )\n pretty_print_nvs_metrics(category_result)\n print(\"\")\n\n task_results[task].append(category_result)\n _print_aggregate_results(task, task_results)\n\n for task in task_results:\n _print_aggregate_results(task, task_results)", "def rescan(self):\n self.__artists = {}\n self.__artists_by_name = {}\n self.__albums = {}\n self.__tracks = {}\n self.__playlists = {}\n self.__populate_library()", "def __init__(self, mdp, discount = 0.9, iterations = 100):\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Write value iteration code here\n \"*** YOUR CODE HERE ***\"\n start = 0\n\n while start < iterations:\n #substitute dictionary\n substitute = util.Counter()\n index = 0\n biggestQ = 0\n\n #loop through all states\n for state in self.mdp.getStates():\n\n\n #if the state is terminal, do not loop through\n if self.mdp.isTerminal(state) == False:\n actionArr = self.mdp.getPossibleActions(state)\n\n #copute Q(s,a) for first action\n biggestQ = self.computeQValueFromValues(state, actionArr[0])\n\n #loop through all actions to find maximum Q(state, action)\n for action in actionArr:\n possibleQ = self.computeQValueFromValues(state, action)\n if (possibleQ > biggestQ):\n biggestQ = possibleQ\n #self.values[state] = biggestQ\n substitute[state] = biggestQ\n #self.values[state] = biggestQ\n index = index + 1\n #self.values[state] = biggestQ\n\n #if all states have been checked transfer values to actual dictionary\n if index == len(self.mdp.getStates()):\n for key in substitute.keys():\n self.values[key] = substitute[key]\n\n\n start = start + 1", "def READ_DEFINE_AND_PROCESS_EVERYTHING(basedir, in__dir):\n\n import sys, ast, multiprocessing\n import pandas as pd\n import numpy as np\n\n # Output dictionanry\n dc = {}\n \n # Read command line arguments\n try:\n y_var = dc['y_var'] = str(sys.argv[1]) \n y_area = dc['y_area'] = str(sys.argv[2])\n experiment = dc['experiment'] = str(sys.argv[3])\n X_source = dc['X_source'] = str(sys.argv[4]) \n except:\n y_var = dc['y_var'] = 'T2M'\n y_area = dc['y_area'] = 'scandi'\n experiment = dc['experiment'] = 'CONTROL'\n X_source = dc['X_source'] = 'ERA-20C'\n \n # Define details \n if(experiment=='CUTFIRSTYRS'): \n yr1 = dc['yr1'] = '1945' \n else:\n yr1 = dc['yr1'] = '1915' #'1900' \n \n yr2 = dc['yr2'] = '2010'\n \n \n rstate = dc['rstate'] = 70 \n n_folds = dc['n_folds'] = 5\n p_smpl = dc['p_smpl'] = 0.50\n p_feat = dc['p_feat'] = 0.33\n n_smpls = dc['n_smpls'] = 1000\n tst_len = dc['tst_len'] = 25\n\n ncomps_sst = dc['ncomps_sst'] = 5\n ncomps_snc = dc['ncomps_snc'] = 3\n ncomps_gpt = dc['ncomps_gpt'] = 3\n\n if(experiment=='NO_LAGS'):\n lags_sst = dc['lags_sst'] = (1,)\n lags_snc = dc['lags_snc'] = (1,)\n lags_gpt = dc['lags_gpt'] = (1,)\n else:\n lags_sst = dc['lags_sst'] = (1,2,3,4,5)\n lags_snc = dc['lags_snc'] = (1,2)\n lags_gpt = dc['lags_gpt'] = (1,2)\n \n \n \n n_jobs = dc['n_jobs'] = np.min([28, int(0.9*(multiprocessing.cpu_count()))])\n seasons = dc['seasons'] = ('DJF', 'MAM' ,'JJA', 'SON')\n\n # Define training and test periods\n all_yrs = dc['all_yrs'] = list(np.arange(int(yr1),int(yr2)+1))\n tst_yrs = dc['tst_yrs'] = all_yrs[-tst_len:] \n trn_yrs = dc['trn_yrs'] = list(np.array(all_yrs)[~np.isin(all_yrs,tst_yrs)])\n\n # Define a skeleton for naming output files \n basename = dc['basename'] = 'fittings_'+experiment+'_HadCRUT4-'+y_var+ \\\n '_nsmpls'+str(n_smpls)+'_ntestyrs'+str(tst_len)+ \\\n '_'+X_source+'-SST'+str(ncomps_sst)+'-'+str(lags_sst[-1])+ \\\n '_'+X_source+'-GPT'+str(ncomps_gpt)+'-'+str(lags_gpt[-1])+ \\\n '_'+yr1+'-'+yr2+'_'+y_area\n\n # Variables, form: 'name_of_variable': ['domain', n_comps, lags, year_range]\n X_var_definitions = dc['X_var_definitions'] = {\n 'SST': ['global', ncomps_sst, lags_sst, trn_yrs, all_yrs, X_source],\n 'GPT': ['norhem', ncomps_gpt, lags_gpt, trn_yrs, all_yrs, X_source],\n #'SNC': ['norhem', ncomps_snc, lags_snc, trn_yrs, all_yrs, X_source],\n }\n \n # Optional variables from https://climexp.knmi.nl/, form: 'name_of_index': [lags]\n X_clxp_definitions = dc['X_clxp_definitions'] = {\n #'M1i':(1,), 'M2i':(1,), 'M3i':(1,), \n #'M4i':(1,), 'M5i':(1,), 'M6i':(1,),\n #'NAO':(1,), 'NINO12':(1,), 'NINO3':(1,), 'NINO34':(1,), 'NINO4',:(1,),\n #'AMO1':(1,), 'AMO2':(1,), 'PDO1':(1,), 'PDO2':(1,), 'SOI',:(1,),\n }\n\n # Read and preprocess the predictand data using xarray etc.\n y_eur, Y, cl, tr = dc['y_eur'], dc['Y'], dc['Y_clim'], dc['Y_trend'] = \\\n read_manipulate_Y_data(y_var, in__dir, {}, {}, all_yrs, all_yrs, y_area)\n\n # Read and preprocess the raw predictor data using xarray etc.\n X_vars, cl, tr = dc['X_vars'], dc['X_clim'], dc['X_trnd'] = \\\n read_manipulate_X_data(in__dir, X_var_definitions, {}, {})\n\n\n if(experiment=='INCLPERSIS'): \n include_persistence=True\n else:\n include_persistence=False\n\n # Compress raw data with PCA, apply lagging, and create a Pandas dataframe \n X,p,ei,er = dc['X'], dc['X_PCAs'], dc['X_EIGs'], dc['X_ERRs'] = prepare_X_array(Y, \n y_var, X_vars, {}, X_var_definitions, X_clxp_definitions, include_persistence=include_persistence)\n\n if(experiment=='FOLLAND'):\n # Folland et al. 2012, Hall et al. 2017\n for i,vrb in enumerate(X.columns):\n if((vrb[0:4] == 'SST1')|(vrb[0:4] == 'sst1')):\n X[vrb] = StandardScaler().fit_transform(X[vrb][:,np.newaxis])\n X[vrb][ np.abs(X[vrb]) < 1 ] = 0\n X[vrb][ X[vrb] < -1 ] = -1\n X[vrb][ X[vrb] > 1.75 ] = 0\n X[vrb][ X[vrb] > 1 ] = 1\n print(X[vrb])\n\n # Extract variable names\n vrbl_names = dc['vrbl_names'] = X.columns\n \n return dc", "def mystery1(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(5):\n counter += 1", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def fill_map(self):\n\n sim = Pong(max_steps=None)\n s = sim.empty_state()\n s[DEFAULT_DIMS] = DEFUALT_VALUES\n\n # Optimization issues:\n next_state = self.next_state\n next_reward = self.next_reward\n d = self.d\n\n # Make the terminal state a self-loop\n next_state[self.n] = self.n\n\n t0 = clock()\n for i in range(0, self.n, 1000000):\n for j in range(i, min(i + 1000000, self.n)):\n s[TRAIN_DIMS] = d.index_to_state(j)\n for a in c.ACTIONS:\n sim.fast_set_and_step(s, c.A_STAY, a)\n if sim.hit == \"r\":\n next_reward[j, a] = 1\n next_state[j, a] = -1\n elif sim.miss == \"r\":\n next_reward[j, a] = -1\n next_state[j, a] = -1\n else:\n next_state[j, a] = d.state_to_index(sim.s[TRAIN_DIMS])\n print(i, clock() - t0)", "def declare_variables(self):\n\n\t\tvar_prefixes = ['W_in', 'W_rnn', 'b_rnn', 'W_out', 'b_out']\n\t\tself.var_dict = {}\n\n\t\twith tf.variable_scope('network'):\n\t\t\tfor p in var_prefixes:\n\t\t\t\tself.var_dict[p] = tf.get_variable(p, initializer=par[p+'_init'])", "def __init__(self):\n\n # names of atoms that make up relevant segements of each chain\n self.chains = {'a': {'C': 'C1', 'C1': 'C2', 'C2': 'C3', 'C3': 'C4', 'C4': 'C5', 'H': 'H1', 'H1': 'H2',\n 'H2': 'H3', 'H3': 'H4', 'H4': 'H5'},\n 'b': {'C45': 'C1', 'C44': 'C2', 'C43': 'C3', 'C42': 'C4', 'C41': 'C5', 'H81': 'H1', 'H80': 'H2',\n 'H79': 'H3', 'H78': 'H4', 'H77': 'H5'}\n }\n\n self.nchains = len(list(self.chains.keys()))\n\n self.chain_numbers = {'a': 0, 'b': 1} # used to number chains\n\n # self.initial_types = {'C1': 'c2', 'C2': 'ce', 'C3': 'ce', 'C4': 'c2', 'H1': 'ha', 'H2': 'ha', 'H3': 'ha',\n # 'H4': 'ha', 'H5': 'ha'}\n\n # all indices numbered from 0. D1, D2, ... correspond to dummies attached to C1, C2, ... respectively\n self.indices = {'a': {'C1': 0, 'C2': 1, 'C3': 2, 'C4': 3, 'C5': 4, 'H1': 52, 'H2': 53, 'H3': 54, 'H4': 55,\n 'H5': 56, 'D1': 136, 'D2': 137, 'D3': 138, 'D4': 139},\n 'b': {'C1': 49, 'C2': 48, 'C3': 47, 'C4': 46, 'C5': 45, 'H1': 133, 'H2': 132, 'H3': 131,\n 'H4': 130, 'H5': 129, 'D1': 140, 'D2': 141, 'D3': 142, 'D4': 143}\n }\n\n self.dummy_connectivity = {'a': {'C': 'D1', 'C1': 'D2', 'C2': 'D3', 'C3': 'D4'},\n 'b': {'C45': 'D1', 'C44': 'D2', 'C43': 'D3', 'C42': 'D4'}}\n\n self.hydrogen_connectivity = {'C': ['H1', 'H2'], 'C1': ['H3'], 'C2': ['H4'], 'C3': ['H5'],\n 'C45': ['H1', 'H2'], 'C44': ['H3'], 'C43': ['H4'], 'C42': ['H5']}\n\n self.dummy_mass = 1.008 # mass of hydrogen\n\n # write these in order of priority\n # for efficiency, don't repeat things. For example self.carbons['C1']: self.carbons['C2'] is the same as\n # self.carbons['C2']: self.carbons['C1']. Otherwise, computational expense goes up and a new reaction has\n # to be defined below.\n self.carbons = {'C1': ['C', 'C45'], 'C2': ['C1', 'C44'], 'C3': ['C2', 'C43'], 'C4': ['C3', 'C42']}\n self.bonds_with = [[self.carbons['C1'], self.carbons['C2']]]\n\n # define which improper dihedrals to remove -- written in same order as .itp file!!!\n # note that the order of the atoms may be different for each chain\n # NOTE: C3 not tested\n self.impropers = {'a': {'C1': ['H2', 'C1', 'H1', 'C2'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']},\n 'b': {'C1': ['C2', 'H2', 'C1', 'H1'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']}}", "def general_simulation_data(self):\n iterations = {}\n nstates = {}\n natoms = {}\n for phase in self.phases:\n positions = self.ncfiles[phase].variables['positions']\n iterations[phase], nstates[phase], natoms[phase], spatial = positions.shape\n\n leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2\n lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2\n lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2\n lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phases]) + 2\n\n lines = []\n headstring = ''\n headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'\n headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'\n headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'\n headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')\n lines.append(headstring)\n lenline = len(headstring)\n topdiv = '=' * lenline\n lines.append(topdiv)\n for phase in self.phases:\n phasestring = ''\n phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'\n phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'\n phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])\n lines.append(phasestring)\n lines.append('-' * lenline)\n\n for line in lines:\n print(line)\n self.iterations = iterations\n self._general_run = True", "def _init_results(self) -> None:\n pt_bond_dimensions = {}\n for site, pt in enumerate(self._process_tensors):\n if pt is not None:\n pt_bond_dimensions[site] = pt.get_bond_dimensions()\n\n self._results = {\n 'time':[],\n 'norm': [],\n 'bond_dimensions': [],\n 'dynamics': {},\n 'pt_bond_dimensions': pt_bond_dimensions,\n }\n for sites in self._dynamics_sites:\n self._results['dynamics'][sites] = Dynamics(name=f\"site{sites}\")", "def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return", "def _mapping(self, pdb_id, loop_type, normalizer):\n\n mapping = {}\n with self.session() as session:\n query = self.query(session, pdb_id).filter_by(type=loop_type)\n for result in query:\n unit_ids = normalizer(result.unit_ids)\n if unit_ids in mapping:\n self.logger.error(\"Loop %s duplicates %s\",\n result.loop_id, mapping[unit_ids])\n continue\n mapping[unit_ids] = result.loop_id\n return mapping", "def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase", "def do_setup(self, ants): \n log_filter = LogFilter()\n getLogger().addFilter(log_filter)\n\n self.hills = []\n self.directions = []\n\n self.seen = [] #areas that have been seen, use this to avoid repetition\n self.unseen = []\n self.stepped_on = []\n\n self.intent = {}\n self.lc = {} #center of mass for a location\n self.i = {} #number of iterations for an ant\n\n for row in range(ants.rows):\n for col in range(ants.cols):\n self.unseen.append((row, col))\n self.intent[(row,col)] = Intent.GATHER\n\n self.lc[(row,col)] = (-1.0,-1.0) #set up center of mass\n self.i[(row,col)] = -1", "def outer_loop_lp(self, profile, missed_winners):\r\n\r\n # Initialize\r\n stats = self.Stats()\r\n\r\n wmg = profile.getWmg()\r\n known_winners = set()\r\n I = list(wmg.keys())\r\n\r\n G = nx.DiGraph()\r\n G.add_nodes_from(I)\r\n\r\n E = nx.DiGraph()\r\n E.add_nodes_from(I)\r\n for cand1, cand2 in itertools.permutations(wmg.keys(), 2):\r\n if wmg[cand1][cand2] > 0:\r\n E.add_edge(cand1, cand2, weight=wmg[cand1][cand2])\r\n\r\n # print(wmg)\r\n # self.output_graph(E)\r\n\r\n # Add any bridge edges from any tier in E\r\n # These are guaranteed to never be in a cycle, so will always be in the final graph after RP procedure\r\n Gc = G.copy()\r\n Gc.add_edges_from(E.edges())\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n E.remove_edges_from(bridges)\r\n\r\n stats.num_initial_bridges = len(bridges)\r\n\r\n\r\n # Each node contains (G, E, T, P)\r\n # P is path, where each item is of form (G, E, K, a)\r\n # root = Node(value=(self.edges2string(G.edges(), I), self.edges2string(E.edges(), I)))\r\n root = Node(value=(G, E, [], []))\r\n stackNode = []\r\n stackNode.append(root)\r\n\r\n hashtable = set()\r\n\r\n END = self.BEGIN + self.TIMEOUT\r\n\r\n self.missed_winners = set(missed_winners)\r\n\r\n self.data = {}\r\n for w in missed_winners:\r\n self.data[w] = []\r\n\r\n while stackNode:\r\n # Pop new node to explore\r\n node = stackNode.pop()\r\n (G, E, T, P) = node.value\r\n\r\n if time.perf_counter() > END:\r\n print(\"TIMEOUT\")\r\n return sorted(known_winners), stats\r\n\r\n # Check hash\r\n hash_state = self.edges2string(G.edges(), I) + self.edges2string(E.edges(), I) + self.edges2string(T, I)\r\n if hash_state in hashtable:\r\n stats.num_hashes += 1\r\n if self.debug_mode == 3:\r\n print(\"hashed in outer hashtable\")\r\n continue\r\n hashtable.add(hash_state)\r\n\r\n stats.num_nodes += 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"Popped new node: \")\r\n print(\"G:\", sorted(G.edges()))\r\n print(\"E:\", sorted(E.edges()))\r\n print(\"T:\", sorted(T))\r\n\r\n # Flag for whether expanding the current tier required finding max children\r\n f_found_max_children = 0\r\n\r\n # Continue performing RP on this state as long as tie-breaking order doesn't matter\r\n while len(E.edges()) != 0 or len(T) != 0:\r\n if self.stop_conditions(G, E, T, P, I, known_winners, stats) != -1:\r\n # Stop condition hit\r\n break\r\n\r\n if len(T) == 0:\r\n # Get a new tier\r\n (max_weight, max_edge) = max([(d['weight'], (u, v)) for (u, v, d) in E.edges(data=True)])\r\n T = [(u, v) for (u, v, d) in E.edges(data=True) if d['weight'] == max_weight]\r\n E.remove_edges_from(T)\r\n\r\n if self.debug_mode == 3:\r\n print(\"New tier =\", T)\r\n\r\n if len(T) == 1:\r\n # Tier only has one edge, just add it\r\n if self.debug_mode == 3:\r\n print(\"Only 1 edge in tier\")\r\n\r\n if nx.has_path(G, max_edge[1], max_edge[0]) is False:\r\n E.add_edges_from(T)\r\n P.append((self.edges2string(G.edges(), I), self.edges2string(E.edges(), I), known_winners.copy(), max_edge))\r\n E.remove_edges_from(T)\r\n G.add_edges_from(T)\r\n continue\r\n\r\n\r\n # Perform reductions every step:\r\n\r\n # Compute \"bridge edges\" which are not in any cycle\r\n Gc = G.copy()\r\n Gc.add_edges_from(T)\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n T = list(set(T) - bridges)\r\n\r\n G_tc = nx.transitive_closure(G)\r\n\r\n # Remove \"inconsistent edges\" that cannot be added to G without causing cycle\r\n reverse_G = nx.DiGraph.reverse(G_tc)\r\n T = list(set(T) - set(reverse_G.edges()))\r\n\r\n # Remove \"redundant edges\": if there is already path from e[0] to e[1], can immediately add e\r\n redundant_edges = set()\r\n for e in T:\r\n if G_tc.has_edge(e[0], e[1]):\r\n redundant_edges.add(e)\r\n G.add_edges_from([e])\r\n stats.num_redundant_edges += len(redundant_edges)\r\n T = list(set(T) - redundant_edges)\r\n\r\n if len(T) == 0:\r\n # No need to find further children, as tier is now empty\r\n if self.debug_mode == 3:\r\n print(\"Tier empty\")\r\n continue\r\n\r\n # Used to break ties\r\n index = 0\r\n\r\n # Add each edge to stack by priority\r\n children = dict()\r\n T = sorted(T)\r\n for e in T:\r\n if not G_tc.has_edge(e[1], e[0]):\r\n f_found_max_children = 1\r\n\r\n Gc = G.copy()\r\n Gc.add_edges_from([e])\r\n Ec = E.copy()\r\n Tc = copy.deepcopy(T)\r\n Tc.remove(e)\r\n Pc = copy.deepcopy(P)\r\n\r\n EUT = E.copy()\r\n EUT.add_edges_from(T)\r\n Pc.append((self.edges2string(G.edges(), I), self.edges2string(EUT.edges(), I), known_winners.copy(), e))\r\n child_node = Node(value=(Gc,Ec,Tc,Pc))\r\n\r\n # LPwinners\r\n G_in_degree = Gc.in_degree(I)\r\n potential_winners = set([x[0] for x in G_in_degree if x[1] == 0])\r\n priority = len(potential_winners - known_winners)\r\n\r\n children[child_node] = (priority, index)\r\n index = index + 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"added edge\", e)\r\n\r\n children_items = sorted(children.items(), key=lambda x: (x[1][0], x[1][1]))\r\n sorted_children = [key for key, value in children_items]\r\n stackNode += sorted_children\r\n break\r\n\r\n if len(E.edges()) == 0 and f_found_max_children == 0:\r\n # E is empty\r\n if self.debug_mode >= 2:\r\n print(\"E is empty\")\r\n self.add_winners(G, P, I, known_winners, stats)\r\n\r\n return sorted(known_winners), stats, self.data", "def __getLoopBoundScanningStmts(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # (optimization) generate code that determines the loop bounds of full tiles at compile time\n if self.affine_lbound_exps:\n return self.__staticLoopBoundScanning(\n stmts, tile_level, outer_loop_inames, loop_info_table\n )\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # iterate over each statement to find loop bounds that are functions of outer loop iterators\n min_int = ast.NumLitExp(-2147483648, ast.NumLitExp.INT)\n max_int = ast.NumLitExp(2147483647, ast.NumLitExp.INT)\n lb_exps_table = {}\n ub_exps_table = {}\n pre_scan_stmts = []\n post_scan_stmts = []\n scan_loops = SimpleLoops()\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # generate loop-bound scanning code for the prolog\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_prolog:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), min_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(lb_name),\n ast.FunCallExp(\n ast.IdentExp(\"max\"),\n [ast.IdentExp(lb_name), lb_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(lb_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n\n # generate loop-bound scaning code for the epilog\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_epilog:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), max_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(ub_name),\n ast.FunCallExp(\n ast.IdentExp(\"min\"),\n [ast.IdentExp(ub_name), ub_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(ub_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n\n # build a new loop information tabe for generating the loop-bound scanning code\n n_loop_info_table = {}\n for iname, linfo in loop_info_table.items():\n _, _, _, st_exp, _ = linfo\n n_loop_info_table[iname] = (\n self.__getTileSizeName(iname, tile_level),\n self.__getTileIterName(iname, tile_level),\n st_exp,\n )\n\n # convert the \"SimpleLoop\" abstractions into loop ASTs\n scan_loop_stmts = scan_loops.convertToASTs(tile_level, n_loop_info_table)\n\n # merge all scanning statements\n scan_stmts = pre_scan_stmts + scan_loop_stmts + post_scan_stmts\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)", "def get_dict_Y(ar_iterations, forecast_cycle, output_k):\n dict_Y = {}\n for i in range(ar_iterations + 1):\n dict_Y[i] = get_idx_forecast(\n idx_start=0,\n ar_iteration=i,\n forecast_cycle=forecast_cycle,\n output_k=output_k,\n )\n return dict_Y", "def _identifyModels(self):\n self._models = {}\n metals = np.unique(self.dat[::, 0])\n ys = np.unique(self.dat[::, 1])\n ls = np.unique(self.dat[::, 2])\n masses = np.unique(self.dat[::, 3])\n nl = pyaC.NestedLoop([len(metals), len(ys), len(ls), len(masses)])\n for i in nl:\n indi = np.where(\n np.logical_and(\n self.dat[::, 0] == metals[i[0]],\n np.logical_and(\n self.dat[::, 1] == ys[i[1]],\n np.logical_and(\n self.dat[::, 2] == ls[i[2]], self.dat[::, 3] == masses[i[3]]\n ),\n ),\n )\n )[0]\n if len(indi) > 0:\n self._models[\n (metals[i[0]], ys[i[1]], ls[i[2]], masses[i[3]])\n ] = indi.copy()", "def main():\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_row'+str(row_start).zfill(3), Imin=12, Imax=136)\n\n Marcov_Chain_MLE(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '', 160.0, [90.0, 70.0, 50.0, 30.0], 0.0, 0.5)\n plt.show()\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_all')\n\n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAsource_VBdrain', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaS-VbD_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, forward' , 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaD-VbS_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, reversed', 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n\n #hist_IDS_VGS(0, 14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Hist-IDSAT_MLC-rv1-01020304_reverse-read_', range(0, 128), 'MLC programming {2ms, 10ms, 40ms, 200ms} pulses, VGS=1.8, VDS=2.0 for level=1-2-3-4\\nhistogram of read-IDSAT (VGS=VDS=0.8V)', 0, 150, 0, 150, 1000)\n #\n #t_label = []\n #for t in np.arange(0, 0.002*(71) + 0.0001, 0.002):\n # t_label.append(str(t))\n #\n ##MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [21], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row-21', Imin=82, Imax=142)\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row_'+str(row_start).zfill(3), Imin=80, Imax=142)\n\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01', Imin=80, Imax=142)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle0102', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle010203', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01020304', 10, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle0102', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle010203', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 40, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01020304', 10, 125, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n # (L, Nfin, VT_flavor, Nrow, Imax)\n col_list = [(36, 1, 'ULVT', 32 , 60 ), (36, 1, 'LVT', 32 , 50 ), (36, 1, 'SVT', 32 , 45 ),\n (36, 1, 'ULVT', 128, 60 ), (36, 1, 'LVT', 128, 50 ), (36, 1, 'SVT', 128, 45 ),\n (20, 1, 'ULVT', 32 , 75 ), (20, 1, 'LVT', 32 , 60 ), (20, 1, 'SVT', 32 , 50 ),\n (20, 1, 'ULVT', 128, 75 ), (20, 1, 'LVT', 128, 60 ), (20, 1, 'SVT', 128, 50 ),\n (16, 1, 'ULVT', 32 , 80 ), (16, 1, 'LVT', 32 , 65 ), (16, 1, 'SVT', 32 , 60 ),\n (16, 1, 'ULVT', 128, 80 ), (16, 1, 'LVT', 128, 65 ), (16, 1, 'SVT', 128, 60 ),\n (36, 2, 'ULVT', 32 , 115), (36, 2, 'LVT', 32 , 95 ), (36, 2, 'SVT', 32 , 85 ),\n (36, 2, 'ULVT', 128, 115), (36, 2, 'LVT', 128, 95 ), (36, 2, 'SVT', 128, 85 ), \n (20, 2, 'ULVT', 32 , 135), (20, 2, 'LVT', 32 , 115), (20, 2, 'SVT', 32 , 100),\n (20, 2, 'ULVT', 128, 135), (20, 2, 'LVT', 128, 120), (20, 2, 'SVT', 128, 100),\n (16, 2, 'ULVT', 32 , 150), (16, 2, 'LVT', 32 , 125), (16, 2, 'SVT', 32 , 115),\n (16, 2, 'ULVT', 128, 150), (16, 2, 'LVT', 128, 125), (16, 2, 'SVT', 128, 115)]\n\n #MLC_IDSAT_algorithm_rv1(11, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [135+20], [0.2], 1, np.arange(0, 0.01*16+0.0001, 0.01), '', ['../Data/chip11/MLC_programming_Chip11_Col21_2msPULSE_VG1p8_VD2p4_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_rv1_cycle01_EfficientPython')\n\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', '0.9-1.2-1.5-1.8', 2.4, 128, range(0, 128), [59+16, 72+40, 80+31, 68+23], [0.2, 0.2, 0.2, 0.2], 4, [0, 15, 15.1, 37.5, 37.6, 59.8, 59.9, 78.1], ['0', '15', '', '37.4', '', '59.6', '', '77.8'], ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG-0p9-1p2-1p5-1p8_VD2p4', '_rv1_cycle01020304')\n\n t_ratio_lst = [(0, 0.17), (0.16, 0.34), (0.33, 0.505), (0.495, 0.67), (0.66, 0.84), (0.83, 1)]\n\n #t_label = []\n #for t in np.arange(0, 0.2*(59+16) + 0.0001, 0.2):\n # t_label.append(str(t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(0, 128), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(59+16), t_ratio[1]*0.2*(59+16)])\n # segment += 1\n\n #t_label = []\n #for t in np.arange(0, 0.2*(72+40) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(0, 128), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(72+40), t_ratio[1]*0.2*(72+40)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(80+31) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + t))\n ##MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(0, 128), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(80+31), t_ratio[1]*0.2*(80+31)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(68+23) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + 0.2*(80+31) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(68+23), t_ratio[1]*0.2*(68+23)])\n # segment += 1\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle010203', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle010203', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle010203', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle010203', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle01', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle01', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle01', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle01', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle0102', 16, 110)\n # \n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle0102', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle0102', 14, 133)\n # \n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle0102', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 20, 140)", "def _parse_initsol(self) :\n\t\tlogging.debug(\"Parsing initsol initial solution\")\n\n\t\t# Init initsol as an empty dict\n\t\tself.initsol = {}\n\n\t\tfor varname in ['alpha','beta','g','h'] : \n\t\t\tself._parse_var_initsol(varname)", "def train(self, iterable):\n for ngram in generate_ngrams(iterable, self.n + 1):\n self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])\n self.prob_dict.update([ngram[: self.n]])", "def __call__(self, num_runs=1, seed=None):\n\n # Use the seed provided.\n numpy.random.seed(seed)\n\n # A list of DictionaryLearningRandomDataSample instances\n results = []\n\n for i in nanshe.util.iters.irange(num_runs):\n # Where the result will be stored\n each_result = DictionaryLearningRandomDataSample()\n\n # Generates a numpy array that has a shape of self.frame_shape with\n # a fixed number of randomly selected (equally likely) non-zero\n # entries\n each_result.points = NumpyRandomArrayDiscreteUniformDistributionGenerator(\n self.frame_shape)(self.num_objects).astype(float)\n\n # Creates a point generator that selects from the non-zero points\n # generated for activation to create groups\n # as an index array (tuple of 1D numpy.ndarrays)\n selected_points = each_result.points.nonzero()\n # convert to a single numpy.ndarrays\n selected_points = numpy.array(selected_points)\n # simpler, lightweight way of doing zip(*selected_points)\n selected_points = selected_points.T\n selected_points = selected_points.tolist()\n point_groups_gen = MappingDiscreteGeometricDistributionGenerator(\n *selected_points\n )\n\n # Using a mean group size and the number of groups creates point\n # groups (these should in someway relate to the basis images)\n point_groups = point_groups_gen(\n 1.0 / float(self.mean_group_size), self.num_groups)\n\n # Will store the essential frames that indicate which points will\n # be active in each frame\n each_result.centroid_activation_frames = []\n for each_point_group in point_groups:\n # Get an index array\n each_point_group_index_array = nanshe.util.iters.list_indices_to_index_array(\n each_point_group\n )\n\n # Create an empty activation frame\n each_centroid_activation_frame = numpy.zeros(self.frame_shape)\n\n # Set the active points to be randomly distributed\n each_centroid_activation_frame_points_shape = each_centroid_activation_frame[each_point_group_index_array].shape\n\n # Set the active points to be randomly distributed\n each_centroid_activation_frame[each_point_group_index_array] = numpy.random.random(\n each_centroid_activation_frame_points_shape\n )\n\n # Rescale the active points\n each_centroid_activation_frame[each_point_group_index_array] *= self.object_intensity_range\n\n # Translate the active points\n each_centroid_activation_frame[each_point_group_index_array] += self.object_min_intensity\n\n # add to the stack of centroid activations\n each_result.centroid_activation_frames.append(\n each_centroid_activation_frame\n )\n\n # convert to numpy array\n each_result.centroid_activation_frames = numpy.array(\n each_result.centroid_activation_frames\n )\n\n # Holds the frames without noise\n each_result.noiseless_frames = []\n\n # Takes each centroid activation frame and creates objects that dim\n # over time\n for each_centroid_activation_frame in each_result.centroid_activation_frames:\n # Determines how much to spread each active point\n # (self.object_spread is like the average spread)\n sigma = 2 * self.object_spread * numpy.random.random()\n for each_frame_num in nanshe.util.iters.irange(self.num_frames):\n # Determines a linear rescaling of each image (where they\n # slowly become dimmer)\n rescale = float(\n self.num_frames - each_frame_num\n ) / float(self.num_frames)\n # Convolves each frame to generate a frame with objects\n # (uses the same spread for each simply dims over time)\n each_matrix_convolved = scipy.ndimage.filters.gaussian_filter(\n rescale * each_centroid_activation_frame, sigma\n )\n # Adds to the stack of frames\n each_result.noiseless_frames.append(each_matrix_convolved)\n\n # Converts the form of the noiseless frames\n each_result.noiseless_frames = numpy.array(\n each_result.noiseless_frames\n )\n\n # Creates frames that contain some background noise from a normal\n # distribution\n each_result.frames = each_result.noiseless_frames.copy()\n each_result.frames += numpy.random.normal(\n scale=self.background_noise_intensity,\n size=each_result.frames.shape\n )\n\n # Append to our list of results\n results.append(each_result)\n\n\n return(results)", "def __init__(self):\n\t\tappionScript.AppionScript.__init__(self)\n\t\tself.rundata = {}\n\t\t### extra appionLoop functions:\n\t\tself._addDefaultParams()\n\t\tself.setFunctionResultKeys()\n\t\tself._setRunAndParameters()\n\t\t#self.specialCreateOutputDirs()\n\t\tself._initializeDoneDict()\n\t\tself.result_dirs={}", "def loop_threaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n def thread_createOnFunction(path, data, str_namePrefix, fn_thread):\n \"\"\"\n Simply create a thread function and return it.\n \"\"\"\n nonlocal index\n ta = threading.Thread(\n name = '%s-%04d.%d' % (str_namePrefix, index, self.numThreads),\n target = fn_thread,\n args = (path, data, index),\n kwargs = kwargs\n )\n return ta\n\n def threadsInBatches_run(l_threadAnalysis):\n \"\"\"\n Run threads in batches of self.numThreads\n and also handle any remaining threads.\n \"\"\"\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n # Read\n if fn_inputReadCallback:\n index = 1\n for path, data in iterator:\n dret_inputSet = inputSet_read(path, data)\n # filesRead += dret_inputSet['filesRead']\n index += 1\n\n # Analyze\n if fn_analysisCallback:\n index = 1\n l_threadAnalysis = []\n for path, data in iterator:\n l_threadAnalysis.append(thread_createOnFunction(\n path, data,\n 'analysisThread',\n # t_analyze\n analysis_do\n )\n )\n index += 1\n\n # And now batch them in groups\n threadsInBatches_run(l_threadAnalysis)\n tree_removeDeadBranches()\n # Write\n if fn_outputWriteCallback:\n index = 1\n for path, data in iterator:\n dret_outputSet = outputSet_write(path, d_tree[path])\n # filesSaved += dret_outputSet['filesSaved']\n index += 1" ]
[ "0.60553885", "0.5839073", "0.5820277", "0.5816718", "0.5751431", "0.5579861", "0.5559903", "0.55534524", "0.5551435", "0.55467737", "0.5465791", "0.5446664", "0.54339755", "0.54255664", "0.53983235", "0.5374087", "0.53734773", "0.53571624", "0.53484", "0.53369623", "0.5333001", "0.5323921", "0.5288923", "0.5288657", "0.52676123", "0.52584344", "0.52571315", "0.52535546", "0.52184725", "0.5215378", "0.5214238", "0.521088", "0.5203182", "0.5181803", "0.5177167", "0.5173176", "0.5167987", "0.51630276", "0.5161928", "0.51595587", "0.5153712", "0.51487863", "0.51479554", "0.51358813", "0.5134178", "0.51163954", "0.5116108", "0.5114937", "0.5103928", "0.5103767", "0.50997704", "0.5097505", "0.5090106", "0.5079526", "0.5077028", "0.50704163", "0.5058106", "0.50497156", "0.5034236", "0.50290525", "0.50279963", "0.50072855", "0.49941716", "0.49924064", "0.4983311", "0.49734977", "0.49730268", "0.4972533", "0.49682283", "0.49658036", "0.49631009", "0.49629766", "0.49564442", "0.49525204", "0.4948458", "0.4947409", "0.49470308", "0.49403656", "0.49344608", "0.4933616", "0.49317417", "0.49207753", "0.49200222", "0.49096614", "0.4903864", "0.49027637", "0.4900452", "0.4897537", "0.48949727", "0.48935872", "0.48787582", "0.48784703", "0.48733237", "0.48697686", "0.4868856", "0.4867483", "0.48637223", "0.48636097", "0.4861814", "0.48563644" ]
0.59351087
1
Prepare a combination list of looping indices.
def get_combo_list(self, loopdict, pegged=0): combolist=list() flatlists=list() loopkeys = list(loopdict.keys()) loopkeys.sort() if pegged == 0: for loopkey in loopkeys: numloop = len(loopdict[loopkey]['looplist']) loopct=0 flatlist=list() while loopct < numloop: flatlist.append(str(loopkey) + '-' + str(loopct)) loopct = loopct + 1 flatlists.append(flatlist) import itertools prod_list = itertools.product(*flatlists) stopiter = 0 while not stopiter: try: mycomb = prod_list.next() except StopIteration: stopiter = 1 if stopiter == 0: combolist.append(list(mycomb)) elif pegged == 1: if len(loopkeys) == 0: return combolist #Empty list numloop = len(loopdict[loopkeys[0]]['looplist']) #all same len numct=0 while numct < numloop: flatlist=list() for loopkey in loopkeys: flatlist.append(str(loopkey) + '-' + str(numct)) numct = numct + 1 combolist.append(flatlist) #print "TTM DEBUG: ", flatlists return combolist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_index_iterator(indexes, length):\n return combinations(indexes, length)", "def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)", "def create_combinations(rnd_sol_1, start_id):\n\n store_all_combinations = []\n rnd_sol_1 = rnd_sol_1[1:]\n route_index = range(len(rnd_sol_1)-1)\n list_of_n = list(combinations(route_index, 2))\n\n for swap1, swap2 in list_of_n:\n x_swap = rnd_sol_1[:]\n x_swap[swap1], x_swap[swap2] = x_swap[swap2], x_swap[swap1]\n store_all_combinations.append([start_id] + x_swap)\n \n return store_all_combinations", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def add_self_loops_to_indexlist(indices):\r\n max_ind = np.max(indices)\r\n self_loops = np.arange(max_ind+1,dtype=np.int)\r\n self_loops = np.concatenate([np.expand_dims(self_loops,axis=-1),np.expand_dims(self_loops,axis=-1)],axis=-1)\r\n added_loops = np.concatenate([indices,self_loops],axis=0)\r\n clean_index = np.unique(added_loops,axis=0)\r\n index_order = np.argsort(clean_index[:,0])\r\n out_indices = clean_index[index_order]\r\n return out_indices", "def make_indices(dimensions):\n\n level = len(dimensions)\n\n if level == 1:\n return range(dimensions[0])\n\n indices = [[]]\n\n while level:\n\n _indices = []\n\n for j in range(dimensions[level - 1]):\n\n _indices += [[j] + i for i in indices]\n\n indices = _indices\n\n level -= 1\n\n try:\n return [tuple(i) for i in indices]\n except TypeError:\n return indices", "def _create_two_group_jackknife_indexes(x0, x1, is_paired):\n\n if is_paired and len(x0) == len(x1):\n out = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_jackknife_indexes(x1)]\n )\n )\n else:\n jackknife_c = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_repeated_indexes(x1)]\n )\n )\n\n jackknife_t = list(zip([i for i in create_repeated_indexes(x0)],\n [j for j in create_jackknife_indexes(x1)]\n )\n )\n out = jackknife_c + jackknife_t\n del jackknife_c\n del jackknife_t\n\n return out", "def expander(i_start, j_end, length):\n\n # create list of paired i indices\n i_s = list(np.arange(i_start, i_start + length))\n\n # create list of corresponding j indices:\n j_s = list(np.arange(j_end, j_end + length))\n\n # form list of tuple pairs\n pairs = list(zip(i_s, j_s))\n\n return pairs", "def create_iterables(self):\n iterables = [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1]]\n self.states = []\n for t in itertools.product(*iterables):\n self.states.append(t)", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def __get_all_combinations(self, list_of_items):\r\n return [itertools.combinations(list_of_items, index+1)\r\n for index in range(len(list_of_items))]", "def make_indexed_list(x_iter):\n return [\"{} {}\".format(bracket_pad_num(i + 1, len(x_iter)), x_iter[i]) for i in range(len(x_iter))]", "def crearIndices(self):\n l = self.encontrarCaracteres()\n i=0\n for c in l:\n self.indices[c] = i\n i+=1", "def play(self, indices) :\n cards = [self.cards[i] for i in indices]\n combination = Combination(cards)\n\n return combination", "def _build_iteration_indexes(data_len, num_iterations,\n verbose=False, random_generator=None,\n use_epochs=False):\n if use_epochs:\n iterations_per_epoch = arange(data_len)\n if random_generator:\n random_generator.shuffle(iterations_per_epoch)\n iterations = tile(iterations_per_epoch, num_iterations)\n else:\n iterations = arange(num_iterations) % data_len\n if random_generator:\n random_generator.shuffle(iterations)\n if verbose:\n return _wrap_index__in_verbose(iterations)\n else:\n return iterations", "def _generate_combinations(self, param_idx, params):\n\n if param_idx == len(self.grid) - 1:\n # last parameter, just return list of values for this parameter\n return [[value] for value in self.grid[params[param_idx]]]\n else:\n subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations\n result = []\n\n # iterate over all values of current parameter\n for value in self.grid[params[param_idx]]:\n for subcombination in subcombinations:\n result.append([value] + subcombination)\n\n return result", "def indices_grid(frame_len, frame_step, num_frames):\n indices = np.tile(np.arange(0, frame_len), (num_frames, 1)) + \\\n np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_len, 1)).T\n indices = np.array(indices, dtype=np.int32)\n return indices", "def part_2():\n return itertools.permutations(range(5, 10))", "def part_1():\n return itertools.permutations(range(5))", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def genSubset2(L):\n import itertools\n result = []\n for i in range(len(L) + 1):\n result += list(itertools.combinations(L, i))\n return result", "def create_repeated_indexes(data):\n from numpy import arange\n\n index_range = arange(0, len(data))\n return (index_range for i in index_range)", "def make_combinations(items):\n\n def inner(items, r):\n \"\"\"\n recursively yields partitioned remainders of original partition lists\n \"\"\"\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition\n\n def outter(items, r):\n \"\"\"\n combines partition lists\n \"\"\"\n items = set(items)\n for i in range(len(items), -1, -r):\n if i == 0:\n for partition in inner(items, r):\n yield partition\n elif i != r:\n for combination in combinations(items, i):\n for partition in inner(items.difference(combination), r):\n yield partition + (combination, )\n\n # step through length of origin combination partitions to ensure full list\n for i in range(1, len(items)):\n gen = outter(items, i)\n for row in gen:\n yield row", "def _combinations(n_features, n_args, interaction_only):\n comb = combinations if interaction_only else combinations_w_r\n return comb(range(n_features), n_args)", "def get_interbreeding_indexes(index_count, including_same = False, cross = False, deep = 0) -> 'list':\n indexes = []\n correct = 1 if not including_same else 0\n for i in range(index_count - correct):\n index_from = deep\n index_to = i + deep + correct\n\n indexes.append([\n index_from,\n index_to\n ])\n\n if cross and index_from != index_to:\n indexes.append([\n index_to,\n index_from\n ])\n\n if index_count - correct > 1:\n indexes += get_interbreeding_indexes(index_count - 1, including_same, cross, deep + 1)\n\n return indexes", "def combinations(arrays):\n return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))", "def create_all_llrs_combinations(self):\n binary_combinations = np.array(list(itertools.product(range(2), repeat=self.rate_inverse)))\n bpsk_mapped = (-1) ** binary_combinations\n self.all_llrs_combinations_mat = np.fliplr(np.flipud(bpsk_mapped)).copy()\n self.all_llrs_combinations = torch.Tensor(self.all_llrs_combinations_mat).T", "def swp_combo_iter(self) -> Iterable[Tuple[Any, ...]]:\n return itertools.product(*(self._sweep_params[var] for var in self._swp_var_list))", "def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList", "def generate_permutation(self, idxs):\n ret = _CAPI_DGLNDArrayPartitionGeneratePermutation(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs)\n )\n return F.zerocopy_from_dgl_ndarray(ret(0)), F.zerocopy_from_dgl_ndarray(\n ret(1)\n )", "def combinations( l1, l2 ):\n for i in range(len(l1)):\n yield zip( l1,l2)\n l1.insert(0,l1.pop())", "def create_jackknife_indexes(data):\n from numpy import arange, delete\n\n index_range = arange(0, len(data))\n return (delete(index_range, i) for i in index_range)", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def join_indices(\n self, *inds, dirs=None, return_transposed_shape_data=False\n ):\n # Format index_batches to be a list of lists of indices.\n if isinstance(inds[0], Iterable):\n index_batches = list(map(list, inds))\n else:\n index_batches = [list(inds)]\n # Remove empty batches.\n index_batches = [b for b in index_batches if len(b) > 0]\n\n if dirs is None:\n warnings.warn(\n \"In join_indices, dirs was not given and is thus generated to \"\n \"be [1,...,1].\"\n )\n dirs = [1] * len(index_batches)\n else:\n if not isinstance(dirs, Iterable):\n dirs = [dirs]\n assert len(dirs) == len(index_batches)\n\n if not index_batches:\n # Nothing to be done. However, join_indices should always return an\n # array independent of the original, so we take a view.\n if return_transposed_shape_data:\n return (\n self.view(),\n self.shape.copy(),\n self.qhape.copy(),\n self.dirs.copy(),\n )\n else:\n return self.view()\n\n # Group dirs together with index_batches so that they get sorted\n # together.\n index_batches_with_dirs = [\n b + [d] for b, d in zip(index_batches, dirs)\n ]\n\n # Create the permutation for transposing the tensor. At the same time\n # transpose and sort index_batches.\n # We create trivial one-index batches for all the indices that are not\n # going to be joined, so that all indices are in some batch. Then we\n # sort the batches by the first index in each one.\n joined = set(sum(index_batches, []))\n not_joined = [[i] for i in range(len(self.shape)) if i not in joined]\n all_batches = not_joined + index_batches_with_dirs\n all_batches.sort(key=opr.itemgetter(0))\n # The batches are now in right order, and we just have to turn this\n # into a permutation of the indices.\n # The a[:-1] conditional statement leaves out the dirs when creating\n # the permutation.\n perm = sum((a[:-1] if len(a) > 1 else a for a in all_batches), [])\n # Filter out the trivial batches we added a few lines above.\n index_batches_with_dirs = [b for b in all_batches if len(b) > 1]\n # Separate the dirs and the batches now that sorting is done.\n dirs = [b[-1] for b in index_batches_with_dirs]\n index_batches = [b[:-1] for b in index_batches_with_dirs]\n # Sort the indices inside each batch according to the permutation perm.\n index_batches = [list(map(perm.index, b)) for b in index_batches]\n res = self.transpose(perm)\n\n if return_transposed_shape_data:\n transposed_shape = res.shape.copy()\n transposed_qhape = res.qhape.copy()\n transposed_dirs = res.dirs.copy()\n\n # For each batch that consists of a single index, we only need to flip\n # its direction to match what's in dirs. Do that, and then remove those\n # batches from index_batches.\n # We traverse index_batches in reverse order so that removing elements\n # from the end doesn't mess up the loop.\n for i, b in reversed(tuple(enumerate(index_batches))):\n if len(b) == 1:\n if res.dirs[b[0]] != dirs[i]:\n res = res.flip_dir(b[0])\n del dirs[i]\n del index_batches[i]\n\n if not index_batches:\n # If no indices are left, there is no need to join anything.\n if return_transposed_shape_data:\n return res, transposed_shape, transposed_qhape, transposed_dirs\n else:\n return res\n\n # Find out the remaining, new indices after the joining.\n cumulant = 0\n new_inds = []\n for b in index_batches:\n new_inds.append(b[0] - cumulant)\n cumulant += len(b) - 1\n\n # Reverse index_batches and dirs for the future so that we first\n # process the indices at the end.\n index_batches.reverse()\n dirs.reverse()\n\n # For every non-zero block in res, reshape the block and add it to the\n # right key in new_sects. However, every item in new_sects will consist\n # of several blocks that need to be concatenated. Because of this,\n # new_sects[k] is a list of lists [k_part1, k_part2, ..., k_partn,\n # reshaped_block], where k_parts are the qnums of the indices that were\n # joined. Thus by later sorting these lists we get them in the right\n # order for concatenation.\n new_sects = {}\n # Go through every valid index instead of every key in sects, because\n # blocks of zeros may be concatenated with other blocks.\n valid_ks = (\n qcomb\n for qcomb in itt.product(*res.qhape)\n if res.is_valid_key(qcomb)\n )\n del_slcs = [slice(b[1], b[-1] + 1) for b in index_batches]\n get_slcs = [slice(b[0], b[-1] + 1) for b in index_batches]\n dir_batches = [[res.dirs[i] for i in batch] for batch in index_batches]\n for k in valid_ks:\n v = res[k]\n new_k = list(k)\n new_shp = list(v.shape)\n k_parts = []\n for b, dir_b, dir_new, del_slc, get_slc in zip(\n index_batches, dir_batches, dirs, del_slcs, get_slcs\n ):\n k_part = k[get_slc]\n k_parts.append(k_part)\n k_part = map(opr.mul, k_part, dir_b)\n new_qnum = self._qod_func(sum(k_part) * dir_new)\n new_k[b[0]] = new_qnum\n del new_k[del_slc]\n new_shp[b[0]] = fct.reduce(opr.mul, v.shape[get_slc])\n del new_shp[del_slc]\n k_parts.reverse()\n new_k = tuple(new_k)\n l = new_sects.setdefault(new_k, [])\n l.append(k_parts + [v.reshape(new_shp)])\n\n # Concatenator is a helper function that recursively concatenates the\n # pieces together. It is called once for every index in a batch.\n def concatenator(l, i=0):\n if i == len(l[0]) - 2:\n l = [el[-1] for el in l]\n else:\n l = [tuple(g) for k, g in itt.groupby(l, opr.itemgetter(i))]\n l = tuple(map(lambda k: concatenator(k, i=i + 1), l))\n return np.concatenate(l, new_inds[i])\n\n for k, v in new_sects.items():\n # These are the new blocks, just need to concatenate.\n v.sort()\n new_sects[k] = concatenator(v)\n res.sects = new_sects\n\n # Compute the new shape, qhape and dir.\n for new_d, batch in zip(dirs, index_batches):\n product_of_tuple = lambda l: fct.reduce(opr.mul, l)\n cart_prod_of_dims = itt.product(\n *tuple(res.shape[i] for i in batch)\n )\n new_dim = list(map(product_of_tuple, cart_prod_of_dims))\n\n qhps = ([q * res.dirs[i] for q in res.qhape[i]] for i in batch)\n cartesian_product_of_qims = itt.product(*tuple(qhps))\n new_qim = map(sum, cartesian_product_of_qims)\n new_qim = (q * new_d for q in new_qim)\n new_qim = list(map(self._qod_func, new_qim))\n\n # Still need to concatenate.\n # Sort by new_qim.\n if new_qim:\n new_qim, new_dim = zip(*sorted(zip(new_qim, new_dim)))\n new_qim, new_dim = list(new_qim), list(new_dim)\n n = 0\n q = new_qim[n]\n i = 1\n while i < len(new_qim):\n if new_qim[i] == q:\n new_dim[n] += new_dim[i]\n del new_qim[i]\n del new_dim[i]\n else:\n n = i\n q = new_qim[n]\n i += 1\n\n res.shape[batch[0]] = new_dim\n del res.shape[batch[1] : batch[0] + len(batch)]\n res.qhape[batch[0]] = new_qim\n del res.qhape[batch[1] : batch[0] + len(batch)]\n res.dirs[batch[0]] = new_d\n del res.dirs[batch[1] : batch[0] + len(batch)]\n\n if return_transposed_shape_data:\n return res, transposed_shape, transposed_qhape, transposed_dirs\n else:\n return res", "def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)", "def indices(shape):\n iterables = [range(v) for v in shape]\n return product(*iterables)", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def gen_tuples_for_loops(range_len: int, limit: int) -> list:\n ranges = [(n * range_len, (n + 1) * range_len) for n in range(limit // range_len)]\n if limit % range_len > 0:\n ranges.append((range_len * (limit // range_len), limit))\n return ranges", "def generate_direction_masks(cols):\n return list(product('01', repeat=cols))", "def build_amg_index_sets(L_sizes):\n neqns = L_sizes[0][0]\n velocityDOF=[]\n for start in range(1,3):\n velocityDOF.append(np.arange(start=start,\n stop=1+neqns,\n step=3,\n dtype='i'))\n velocityDOF_full=np.vstack(velocityDOF).transpose().flatten()\n velocity_u_DOF = []\n velocity_u_DOF.append(np.arange(start=0,\n stop=2*neqns//3,\n step=2,\n dtype='i'))\n velocity_u_DOF_full = np.vstack(velocity_u_DOF).transpose().flatten()\n velocity_v_DOF = []\n velocity_v_DOF.append(np.arange(start=1,\n stop=1+2*neqns//3,\n step=2,\n dtype='i'))\n velocity_v_DOF_full = np.vstack(velocity_v_DOF).transpose().flatten()\n isvelocity = PETSc.IS()\n isvelocity.createGeneral(velocityDOF_full)\n isu = PETSc.IS()\n isu.createGeneral(velocity_u_DOF_full)\n isv = PETSc.IS()\n isv.createGeneral(velocity_v_DOF_full)\n return [isvelocity, isu, isv]", "def crange(*args):\r\n result = [[]]\r\n for arg in args:\r\n result = [x + [y] for x in result for y in range(arg)]\r\n return result", "def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list", "def pick_loop_indices(self, loop_index, layer_index=None, entity_index=None):\n return (loop_index, layer_index)", "def combinations(iterable, r):\n pool = tuple(iterable)\n n = len(pool)\n if r > n:\n return\n indices = list(range(r))\n yield tuple(pool[i] for i in indices)\n while True:\n for i in reversed(range(r)):\n if indices[i] != i + n - r:\n break\n else:\n return\n indices[i] += 1\n for j in range(i+1, r):\n indices[j] = indices[j-1] + 1\n yield tuple(pool[i] for i in indices)", "def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations", "def _generate_iterator(self) -> Iterable:\n params: List[Tensor] = []\n for angle_range in self._ranges:\n lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)\n params.append(lin_space)\n power: int\n dims: int\n for i in range(0, self._num_params):\n power = len(self._ranges) - 1 - i\n dims = i\n params[i] = params[i].repeat_interleave(self._num_steps ** power)\n params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()\n return zip(*params)", "def gen_input_permutation():\n return [(arch, src, dst) for arch in architecture.ARCH_ACCEPTED for src in PRODUCT_TYPE for dst in PRODUCT_TYPE]", "def seq_along(along_with: Iterable[Any], base0_: bool = None) -> ArrayLikeType:\n base0_ = get_option(\"index.base.0\", base0_)\n return Array(range(len(along_with))) + int(not base0_)", "def CreateIndicesForBasisFilter(indices):\n indices = np.array(indices, dtype = \"uint32\")\n for ind, it in enumerate(indices):\n indices[ind] = ind * it\n return indices", "def base_idx_neighbor_idx_simplices(n_base, n_neighbors=5, n_dim=2):\n combinations = np.array(list(itertools.combinations(np.arange(1,\n n_neighbors),\n n_dim-1))).astype(int)\n base_indices = np.repeat(np.arange(n_base), len(combinations))\n all_simplices = np.vstack([base_indices,\n np.tile(combinations, (n_base, 1)).T]).T\n #print('simplices', os.getpid(), len(all_simplices), flush=True)\n return all_simplices", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def get_all_possible_os_pairings(indices_list):\n pairs = []\n itr = 0\n\n for links in indices_list:\n\n for item in links:\n for i in range(itr,len(links)):\n\n if item == links[i]:\n continue\n else:\n pair = item, links[i]\n pairs.append(pair)\n return pairs", "def enumerate_list(seq):\n return zip(xrange(len(seq)), seq)", "def CombinationMethods(nums, elements_number):\n res = list(c(nums, elements_number))\n return res, Combination(len(nums), elements_number)", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def get_indices(max_passes: int) -> Iterable[Tuple[int, int]]:\n base_indices = (0, max_passes)\n pw_indices = (max_passes, max_passes * 2)\n ip_indices = (max_passes * 2, max_passes * 3)\n strand_indices = (max_passes * 3, max_passes * 4)\n ccs_indices = (max_passes * 4, max_passes * 4 + 1)\n sn_indices = (max_passes * 4 + 1, max_passes * 4 + 5)\n return base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices", "def _make_index_list(self, used_sample_id_list, num_id_repeats=1):\n if used_sample_id_list is None:\n self.index_list = [i for i in range(self.data.shape[0])]\n\n else:\n self.index_list = [i for i in range(self.data.shape[0])\n if self.data[i][DATA_ID_INDEX] in used_sample_id_list\n ]\n\n if len(self.index_list) != len(used_sample_id_list):\n warnings.warn(\"Not all images found. \\\n Found: {}, requested: {}\".format(len(self.index_list),\n len(used_sample_id_list))\n )\n\n # for small datasets,\n # the ids can be repeated to get a reasonable batch size working\n self.index_list = self.index_list*num_id_repeats", "def indicated_combinations(n, k):\n # - This singleton list is mutated and yielded, in order not to waste too\n # much memory.\n # - * is safe as integers are immutable\n # - I'm using integers so it's easier to skim when debugging\n indicator = [0] * n\n for combination in combinations(range(n), k):\n for i in combination:\n indicator[i] = 1\n yield indicator\n for i in combination:\n indicator[i] = 0", "def available_combinations(self):\n result = []\n\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 0:\n result.append((i, j))\n\n return result", "def make_range_temp_K_prot(DB_version, DB_type, i1, i2):\n\n for index in range(i1, i2):\n print(index)\n make_temp_K_prot(DB_version, DB_type, index)", "def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()", "def genAllPerm(nx, compk, base):\n nbase = len(base)\n basePerm = [list(item) for item in uperm(base)]\n nperm = len(basePerm)\n perm = []\n for indp in range(nperm):\n perm.append(np.zeros(nx))\n for indb in range(nbase):\n perm[indp][np.where(compk == indb)[0]] = basePerm[indp][indb]\n return perm, nperm", "def make_bindings(lumpy, iterator):\n seq = [Binding(lumpy, Index(lumpy, k), make_thing(lumpy, v))\n for k, v in iterator]\n return seq", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def swap_indices(a_list):\n new_list = []\n for i in range(len(a_list[0])):\n new_list.append([a_list[j][i] for j in range(len(a_list))])\n return new_list", "def selections(xs):\n for i, x in enumerate(xs):\n yield (x, xs[:i] + xs[i + 1:])", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1", "def generate_index(self):\n begin_o, end_o, begin_a, end_a = 0, 0, 0, 0\n for obs_space, act_space in zip(self.env.observation_space, self.env.action_space):\n end_o = end_o + obs_space.shape[0]\n if isinstance(act_space, Box):\n end_a = act_space.shape[0]\n else:\n end_a = act_space.n\n range_o = (begin_o, end_o)\n range_a = (begin_a, end_a)\n self.observation_index.append(range_o)\n self.action_index.append(range_a)\n begin_o = end_o\n begin_a = end_a", "def mainIndices(self):\n return self.i1, self.i2", "def corner_combinations(zdim: int):\n return combinations(range(zdim), 2)", "def generarCombinaciones(self):\n combi = [list(x) for x in itertools.combinations(self.ResultConsultaLibre, 2)]\n self.CombiConsultaLibre=combi\n #print(self.CombiConsultaLibre)", "def tc_gen(n):\r\n comb = (list(tuple) for tuple in itertools.product([True,False], repeat=n))\r\n return list(comb)", "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def gen(length):\n return itertools.product(LABELS,repeat=length)", "def coset_reps(self):\n from .all import SL2Z\n N = self.level()\n if N == 1: # P1List isn't very happy working modulo 1\n yield SL2Z([1,0,0,1])\n else:\n for z in sage.modular.modsym.p1list.P1List(N):\n yield SL2Z(lift_to_sl2z(z[0], z[1], N))", "def generatePossibleEncounters(self):\n possibleEncounters = [i for i in combinations(self.freeMonomers,r=2)]\n self.possibleEncounters = np.array(possibleEncounters)", "def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list", "def cycles(p: List[int]) -> List[Set[int]]:\n validate_permutation(p)\n\n todo = list(range(len(p)))\n cycles = []\n\n while todo:\n start = todo.pop(0)\n\n cycle = (start,)\n position = p[start]\n\n while position != start:\n todo.remove(position)\n cycle += (position, )\n position = p[position]\n\n cycles.append(cycle)\n\n return cycles", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def vertex_cyclic_swap(nvars, l, i):\n if i == 0 or not l:\n return l\n ll = []\n F = l[0].parent()\n for v in l:\n assert not v[-i:]\n ll.append(F(tuple(v[-i:]) + tuple(v[:-i])))\n for v in ll: v.set_immutable()\n return tuple(ll)", "def _build_ID_sets(self):\n raise NotImplementedError", "def generate_combinations(k: int, n: int):\n result = list()\n for i in range(1, k + 1):\n for bits in itertools.combinations(range(n), i):\n s = [0] * n\n for bit in bits:\n s[bit] = 1\n result.append(s)\n\n return pd.DataFrame(result)", "def generate_pairs(number: int) -> List[List[int]]:\n return [\n [top, inner]\n for top in range(number + 1)\n for inner in range(top, number + 1)\n ]", "def gen_k_ary_ind_from_cliques(k: int, E: Iterable[Edge]) -> FrozenSet[Edge]:\n result = set()\n for i in E:\n result.update(map(Edge, itertools.permutations(i, k)))\n return frozenset(result)", "def get_triplet_composition(seq):\n out = [] \n for i in range(len(seq)):\n \n if i+3 > len(seq):\n break\n out.append(seq[i:i+3])\n return out", "def _repack(linear, n=3):\n return list(zip(*[iter(linear)] * n))", "def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)", "def alt_bases_from_indices(alt_allele_indices, alternate_bases):\n alleles = [alternate_bases[i] for i in alt_allele_indices]\n # Avoiding '/' to support use in file paths.\n return '-'.join(alleles)", "def scatter_counts_to_indices(input: torch.LongTensor) -> torch.LongTensor:\n return torch.repeat_interleave(torch.arange(input.size(0), device=input.device), input).long()", "def combinations(*comb, **kw):\n return _fixture_functions.combinations(*comb, **kw)", "def mapping(index: Union[int, List[int]]) -> Union[int, List[int]]:\n if isinstance(index, int):\n return indexMapping[index]\n else:\n mappedList = []\n for item in index:\n mappedList.append(indexMapping[item])\n return mappedList", "def generate_grid_search_trials(flat_params, nb_trials):\n trials = list(itertools.product(*flat_params))\n if nb_trials:\n trials = trials[0:nb_trials]\n return trials", "def AllCombinations(data, comblength):\n return [c for c in itertools.combinations(data, comblength)]", "def oracle(indices):\n return [all_states[inds] for inds in indices]", "def oracle(indices):\n return [all_states[inds] for inds in indices]", "def combinarink(list, k):\n global visited\n global indexes\n visited = [0 for x in range(0, len(list) + 1)] # init with 0\n indexes = [x for x in range(0, len(list) + 1)] # init indexes with 0...n-1\n output = combinari(1, len(list), k, list, [])\n print (output)", "def generateCombos(vars,constants):\n # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS\n assert len(vars) == 2 and len(constants) == 2\n combs = []\n for c1 in constants:\n for c2 in constants:\n combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))\n return combs" ]
[ "0.6275713", "0.6154315", "0.6077849", "0.6049278", "0.5934963", "0.5888981", "0.5846226", "0.5782302", "0.57822573", "0.5774816", "0.5757438", "0.57275736", "0.56466776", "0.56323755", "0.56219804", "0.55682474", "0.5567099", "0.5565119", "0.55425763", "0.54685265", "0.5451253", "0.5448279", "0.5433632", "0.54202455", "0.5411361", "0.5399441", "0.53852165", "0.53823936", "0.5360601", "0.5348983", "0.53289795", "0.5315647", "0.531157", "0.53089136", "0.5304817", "0.5302938", "0.5302938", "0.5289594", "0.5288042", "0.52839655", "0.5283756", "0.52822685", "0.52821314", "0.5264406", "0.5248046", "0.52469116", "0.5230753", "0.52299577", "0.5222358", "0.5212725", "0.52115744", "0.5211414", "0.52005893", "0.5200155", "0.5180936", "0.51791227", "0.51663804", "0.51557815", "0.5146254", "0.5115966", "0.5111501", "0.51077205", "0.510274", "0.5099792", "0.5082893", "0.5081218", "0.5045176", "0.5041842", "0.5031329", "0.50190514", "0.50111425", "0.50106055", "0.5008347", "0.500133", "0.49996778", "0.4998749", "0.49974737", "0.49906215", "0.49904156", "0.4982831", "0.49783695", "0.49756548", "0.49677452", "0.49676543", "0.4963131", "0.49626628", "0.49601597", "0.4955674", "0.49479714", "0.49474964", "0.49281973", "0.49258283", "0.49252108", "0.49246103", "0.49239996", "0.4916922", "0.49101958", "0.49101958", "0.49059618", "0.4904312" ]
0.51132363
60
Combine two pegged lists and one independent list.
def combine_three_combo_lists(self, indeplist, peglist1, peglist2): templist=list() threelist=list() templist = self.combine_combo_lists(indeplist, peglist1) threelist = self.combine_combo_lists(templist, peglist2) return threelist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "def listExpend(input_list_1, input_list_2):\r\n output_list = []\r\n for element_1, element_2 in zip(input_list_1, input_list_2):\r\n output_list += [element_1]*element_2\r\n return output_list", "def merge(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> List[Any]:\n return [*list1, *list2]", "def combinelists(oldlst, newlst):\n combined = oldlst\n if newlst not in oldlst:\n combined.append(newlst)\n return combined", "def concat_lists(list1, list2):\n\n # return list1 + list2\n for item in list2:\n list1.append(item)\n\n return list1", "def listops_union(list_a,list_b):\r\n\r\n retlist = list_a[:]\r\n for item in list_b: \r\n if item not in list_a:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "def add_lists(first, second):\n\n copy = []\n for (i, j) in zip(first, second):\n i += j\n copy.append(i)\n\n return copy", "def concat(a: list[int], b: list[int]) -> list[int]:\n result: list[int] = list()\n\n for x in a:\n result.append(x)\n for y in b: \n result.append(y)\n return result", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1", "def interleave(list1, list2):\r\n result = [] #Create an empty list which later we use it to add our result in it.\r\n extra = [] #Create an empty list which later we use it to sort out the extra cards.\r\n if len(list2) > len(list1):\r\n new_list = zip(list2, list1)\r\n for idx in range(len(list1),len(list2)):\r\n extra.append(list2[idx])\r\n else:\r\n new_list = zip(list1, list2)\r\n for idx in range(len(list2),len(list1)):\r\n extra.append(list1[idx])\r\n for item1, item2 in new_list:\r\n result.append(item1)\r\n result.append(item2)\r\n for item in extra:\r\n result.append(item)\r\n return result", "def merger(self, *lists):\n\t\tself.merged=[]\n\t\tfor i in range(len(lists[0][0])):\n\t\t\tself.temp=[]\n\t\t\tfor j in range(len(lists[0])):\n\t\t\t\tself.temp.append(lists[0][j][i])\n\t\t\tself.merged.append(self.temp)\n\t\treturn self.merged", "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "def combinations( l1, l2 ):\n for i in range(len(l1)):\n yield zip( l1,l2)\n l1.insert(0,l1.pop())", "def union(llist_1 : LinkedList, llist_2 : LinkedList) -> LinkedList:\n # Convert to set to remove repeated entries in each list\n lset_1 = list_to_set(llist_1)\n lset_2 = list_to_set(llist_2)\n \n # Combine the two sets to create a union\n union_list = LinkedList()\n list_of_added = []\n for item in lset_1:\n union_list.append(item)\n list_of_added.append(item)\n\n for item in lset_2:\n if item not in list_of_added:\n union_list.append(item)\n\n return union_list", "def merge_lists(lists1, lists2):\n merged_lists = []\n for list1, list2 in zip(lists1, lists2):\n merged_lists.append(list1 + list2)\n return merged_lists", "def merge(items1, items2):\r\n # TODO: Repeat until one list is empty\r\n # TODO: Find minimum item in both lists and append it to new list\r\n # TODO: Append remaining items in non-empty list to new list\r", "def concat(x: list[int], y: list[int]) -> list[int]:\n con = list()\n i: int = 0\n count: int = 0\n while len(y) > i: \n con.append(y[i])\n i += 1\n while len(x) > count: \n con.append(x[count])\n count += 1 \n return con", "def add_two_lists(list1, list2):\n return list(map(lambda m: m[0] + m[1], list(zip(list1, list2))))", "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "def merge(left_list, right_list):\n if not len(left_list) or not len(right_list):\n return left_list or right_list\n\n result = []\n i, j = 0, 0\n left_trips_dict = {trip.trip_id: trip for trip in left_list}\n right_trips_dict = {trip.trip_id: trip for trip in right_list}\n while (len(result) < len(left_list) + len(right_list)):\n ranked_two_trips_ids = fixtures.rank_trips([left_list[i],right_list[j]])\n # if ids[0] belogs to left, ad the trip of id[0] to result and inc the left\n if ranked_two_trips_ids[0] in left_trips_dict.keys():\n result.append(left_trips_dict[ranked_two_trips_ids[0]])\n i+= 1\n else:\n result.append(right_trips_dict[ranked_two_trips_ids[0]])\n j+= 1\n if i == len(left_list) or j == len(right_list):\n result.extend(left_list[i:] or right_list[j:])\n break \n return result", "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged", "def merge(x, y):\n try:\n from itertools import izip\n except ImportError:\n izip = zip\n from numpy import concatenate\n return (concatenate((a, b)) for a, b in izip(x, y))", "def union(list_a: list, list_b: list) -> list:\n if list_a is None:\n list_a = [None]\n if list_b is None:\n list_b = [None]\n return list(set(list_a) | set(list_b))", "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def left_join_list_two():\n return[\n ['wrath', 'anger', None],\n ['fond', 'enamored', 'averse'],\n ['guide', 'usher', 'jam'],\n ['outfit', 'garb', None],\n ['diligent', 'employed', 'idle'],\n ]", "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result", "def __listunion(self, c1, c2):\n s1 = {}\n for delta in c1:\n s1[delta] = 1\n\n\tc = c1[:]\n\tfor delta in c2:\n if not s1.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def add_lists(a,b):\r\n\r\n for i in range(len(a)):\r\n a[i] += b[i]\r\n return a", "def merge_two_sorted_lists(lst1, lst2):\n\n dummy_head = tail = ListNode() # head and tail start pointing to the same dummy node, then tail converges\n while lst1 and lst2:\n if lst1.data < lst2.data:\n tail.next = lst1 # the FIRST tail.next node is where the actual merge begins\n lst1 = lst1.next\n else:\n tail.next = lst2\n lst2 = lst2.next\n tail = tail.next\n # append the remaining nodes of list 1 or list 2\n tail.next = lst1 or lst2 # when one list becomes None, the 'or' returns the remaining nodes of the other\n return dummy_head.next # dummy_head.next is the node appended with the FIRST tail.next statement", "def merge(*args):\n return reduce(list.__add__, args, list())", "def join(self: \"_List[_List[T]]\") -> \"_List[T]\":\n return ListMonad(*[element for lists in self for element in lists])", "def concat_list(in_list):\n return list(itertools.chain(*in_list))", "def merge(lst1, lst2):\n if not lst1 or not lst2:\n return lst1 + lst2\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])", "def concatenate_lists(*layers, **kwargs):\n ...", "def list_update(l1, l2):\n return filter(lambda e : e not in l2, l1) + list(l2)", "def interleave_lists(before, after):\n if len(before) != len(after):\n print \"Error: arrays must of same length in interleave_lists\"\n return None\n else:\n output = before + after\n output[::2] = before\n output[1::2] = after\n return output", "def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list", "def extend(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> None:\n list1.extend(list2)", "def _combine(self, results_list):\n pass", "def merge(a, b):\n result = []\n\n # Append smallest values to result until either list is exhausted\n i = j = 0\n while i < len(a) and j < len(b):\n if compare(a[i], b[j]) < 0:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n j += 1\n\n # Append all remaining values from the unexhausted list\n if i < len(a):\n result.extend(a[i:])\n else:\n result.extend(b[j:])\n\n return result", "def lcombine( v1, v2, k1, k2 ):\n return [ x*k1 + y*k2 for (x,y) in izip(v1,v2) ]", "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "def add_lists (list1, list2):\n \n output = []\n for item1, item2 in zip(list1, list2):\n output.append(item1 + item2)\n return output", "def add(a: list, b: list) -> list:\n temp = zip(a, b)\n print(list(temp))", "def merge_many(*lsts):\n if not lsts:\n return []\n elif len(lsts) == 1:\n return lsts[0][:]\n elif len(lsts) == 2:\n return merge(*lsts)\n else:\n left = lsts[len(lsts) // 2:]\n right = lsts[:len(lsts) // 2]\n\n return merge(merge_many(*left), merge_many(*right))", "def merge(list1, list2):\n res = []\n index_i, index_j = 0, 0\n while index_i < len(list1) and index_j < len(list2):\n if list1[index_i] <= list2[index_j]:\n res.append(list1[index_i])\n index_i += 1\n else:\n res.append(list2[index_j])\n index_j += 1\n res += list1[index_i:]\n res += list2[index_j:]\n return res", "def listify(lhs, rhs):\n if not lhs:\n return rhs\n if not rhs:\n return lhs\n if is_listing(lhs) and is_listing(rhs):\n return lhs + rhs\n if is_listing(lhs):\n return lhs + [rhs]\n if is_listing(rhs):\n return [lhs] + rhs\n return [lhs, rhs]", "def _merge_partition_lists(partition_lists):\n dst = list(partition_lists[0])\n for src in partition_lists[1:]:\n if len(src) != len(dst):\n raise ValueError(\"All ragged inputs must have the same ragged_rank.\")\n for i in range(len(dst)):\n # pylint: disable=protected-access\n dst[i] = dst[i]._merge_precomputed_encodings(src[i])\n return dst", "def concatenateList(list1, list2):\n outputList = []\n\n ## list1\n # if it's an empty list\n if len(list1) == 0:\n outputList.append(list1)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list1[0], list):\n for i in range(len(list1)):\n outputList.append(list1[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list1)\n\n ## list2\n # if it's an empty list\n if len(list2) == 0:\n outputList.append(list2)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list2[0], list):\n for i in range(len(list2)):\n outputList.append(list2[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list2)\n\n return outputList", "def _add_list_values(a, b):\n new_list = []\n for i in range(len(a)):\n new_list.append(a[i] + b[i])\n return new_list", "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "def merge(first_list, second_list):\r\n result_list = []\r\n\r\n def check_for_group():\r\n \"\"\"Inner function,so that it has access to merges' local variables,\r\n that checks for groups\"\"\"\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False\r\n\r\n while first_list and second_list:\r\n if first_list[0] > second_list[0]:\r\n if not check_for_group():\r\n result_list.append(second_list[0])\r\n second_list.remove(second_list[0])\r\n else:\r\n if not check_for_group():\r\n result_list.append(first_list[0])\r\n first_list.remove(first_list[0])\r\n empty_lists(first_list, second_list, result_list)\r\n return result_list", "def longzip(a, b):\n aiter = iter(a)\n biter = iter(b)\n try:\n for item1 in aiter:\n yield item1, next(biter)\n except StopIteration:\n for item1 in aiter:\n yield item1, None\n else:\n for item2 in biter:\n yield None, item2", "def merge_list(list1, list2):\n\n current1 = list1.head\n current2 = list2.head\n\n if current1 == None and current2 == None:\n raise Exception(\"lists are empty\")\n\n if not current1:\n list1.head = list2.head\n return list1.head\n\n if not current2:\n return list1.head\n\n temp = current2.next\n\n while current1.next and current2.next:\n current1.next, current2.next = current2, current1.next\n current1 = current2.next\n current2, temp = temp, temp.next\n\n if not current1.next:\n current1.next = current2\n return list1.head\n\n if not current2.next:\n current1.next, current2.next = current2, current1.next\n return list1.head", "def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list", "def mergeKLists(self, lists: 'List[ListNode]') -> 'ListNode':\n if not lists:\n return None\n nodes = []\n for head in lists:\n while head:\n nodes.append(head.val)\n head = head.next\n\n nodes.sort()\n\n newList = ListNode(0)\n curr = newList\n for node in nodes:\n curr.next = ListNode(node)\n curr = curr.next\n return newList.next", "def merge_down(lists):\r\n lst1 = transpose(lists)\r\n lst2 = merge_AllRight(lst1)\r\n lst3 = transpose(lst2)\r\n\r\n lists = lst3\r\n\r\n return lists", "def _extend(cls, li1, li2):\n return li1 + li2", "def append(x, ys):\n return list(ys) + [x]", "def zipLists(list1, list2):\n list1_current = list1.head\n list2_current = list2.head\n\n while list1_current and list2_current:\n list1_next = list1_current.next\n list2_next = list2_current.next\n\n list1_current.next = list2_current\n list2_current.next = list1_next\n\n last_list1_current = list1_current.next\n\n list1_current = list1_next\n list2_current = list2_next\n \n if not list1_current and list2_current:\n last_list1_current.next = list2_current\n\n return list1", "def union(list1, list2):\n new_list = list1\n for literal in list2:\n negate_literal = copy.deepcopy(literal)\n negate_literal.negate = not negate_literal.negate\n if negate_literal in list1:\n new_list.remove(negate_literal)\n continue\n if literal not in list1:\n new_list.append(literal)\n return new_list", "def merge(l1, l2):\n\n #Reverse the lists\n l1 = list(reversed(l1))\n l2 = list(reversed(l2))\n\n ret = []\n\n while True:\n # If either list is empty, reverse the other one and append it to the end\n if not l1:\n ret.extend(reversed(l2))\n return ret\n if not l2:\n ret.extend(reversed(l1))\n return ret\n\n # Append the lowest last element of the two lists\n ret.append(l1.pop() if l1[-1] < l2[-1] else l2.pop())", "def merge(l1, l2):\n i = j = 0\n output = []\n\n while i < len(l1) and j < len(l2):\n if l1[i] <= l2[j]:\n output.append(l1[i])\n i += 1\n else:\n output.append(l2[j])\n j += 1\n\n output.extend(l1[i:] + l2[j:])\n\n return output", "def union(self, other: list) -> 'List':\n if not isinstance(other, list):\n raise ValueError('The comparing element is not a list')\n\n return List(self + other).unique()", "def merge(list_of_lists):\n merged = list()\n for l in list_of_lists:\n merged.extend(l)\n return merged", "def _merge_two_sorted_list(sorted_list_head, sorted_list_tail):\n sorted_list_result = list()\n head_index = 0\n tail_index = 0\n len_head = len(sorted_list_head)\n len_tail = len(sorted_list_tail)\n\n while head_index < len_head and tail_index < len_tail:\n print(sorted_list_head, ' : ', sorted_list_tail)\n if sorted_list_head[head_index] < sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n head_index += 1\n elif sorted_list_head[head_index] > sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_tail[tail_index])\n tail_index += 1\n elif sorted_list_head[head_index] == sorted_list_tail[tail_index]:\n sorted_list_result.append(sorted_list_head[head_index])\n sorted_list_result.append(sorted_list_tail[tail_index])\n head_index += 1\n tail_index += 1\n\n if head_index < len_head:\n sorted_list_result.extend(sorted_list_head[head_index:])\n elif tail_index < len_tail:\n sorted_list_result.extend(sorted_list_tail[tail_index:])\n\n return sorted_list_result", "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n raise NotImplementedError(\"You need to implement this as part of the assignment.\")", "def append_list(list_a, list_b):\n for i, a in enumerate(list_a):\n list_b[i].append(a)\n\n return list_b", "def zipLists(list1, list2):\n\n # return = [*transform* *iteration* *filter* ] \n # transform -> sub[item]\n # iteration -> for item in range\n # filter -> (loop) for sub in [list1, list2]\n\n # lst1 = [1, 2, 3]\n # lst2 = ['a', 'b', 'c']\n\n return [sub[item] for item in range(len(list2))\n for sub in [list1, list2]]\n # [ [0] of list1, [0] of list2 ] -> [ 1, 'a' ]\n # [ [1] of list1, [1] of list2 ] -> [ 1, 'a', 2, 'b' ]\n # [ [2] of list1, [2] of list2 ] -> [1, 'a', 2, 'b', 3]\n # [ 1, 'a' ]", "def merge(lst1, lst2):\n\n results = []\n i = 0\n j = 0\n\n while i <= len(lst1) - 1 and j <= len(lst2) - 1:\n\n if lst1[i] < lst2[j]:\n results.append(lst1[i])\n i += 1\n else:\n results.append(lst2[j])\n j += 1\n\n if i == len(lst1):\n results.extend(lst2[j:])\n else:\n results.extend(lst1[i:])\n\n return results", "def union(a, b):\r\n return list(set(a) | set(b))", "def merge(f_list):\n f0, f1 = f_list\n n = 2 * len(f0)\n f = [0] * n\n for i in range(n // 2):\n f[2 * i + 0] = f0[i]\n f[2 * i + 1] = f1[i]\n return f", "def merge_ordered_list(in_list1: list, in_list2: list) -> list:\n _list1 = in_list1.copy()\n _list2 = in_list2.copy()\n _output_list = []\n idx_2 = 0\n for element in _list1:\n while idx_2 < len(_list2) and element > _list2[idx_2]:\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n _output_list.append(element)\n while idx_2 < len(_list2):\n _output_list.append(_list2[idx_2])\n idx_2 += 1\n return _output_list", "def _merge(S1, S2, mylist):\n i = 0\n j = 0\n while i + j < len(mylist):\n if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):\n mylist[i+j] = S1[i] # Copy ith element of S1 as next item of mylist\n i += 1\n else:\n mylist[i+j] = S2[j] # Copy jth element of S2 as next item of mylist\n j += 1", "def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items", "def concat(iterables):\n return itertools.chain.from_iterable(iterables)", "def merge(l1, l2):\n # Edge cases, where nothing is to be done.\n if l1 is None and l2 is None: return l1\n if l1 is None: return l2\n if l2 is None: return l1\n\n # Vars to hold,\n # head -> a dummy head to keep a reference to the start of the merged\n # list.\n # _iter -> to move through the merged list.\n head = ListNode(float('-inf'))\n _iter = head\n\n # As long as both the lists are not exhausted,\n while l1 and l2:\n\n # Make the next of _iter as the smaller node.\n if l1.val <= l2.val:\n _iter.next = l1\n l1 = l1.next\n else:\n _iter.next = l2\n l2 = l2.next\n # Move _iter forward.\n _iter = _iter.next\n\n # If either of the lists remain, add them to the end,\n # Note: at-least one of the lists would be exhausted by now,\n # and the remaining one is sorted in itself, which is why this works.\n if not l1: _iter.next = l2\n if not l2: _iter.next = l1\n\n # Return a reference to the start of the merged list.\n return head.next", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def concat(list_of_lists):\n return reduce(lambda a,b : a+b,list_of_lists,[])", "def merge_lists(src, new):\n l_min, l_max = (src, new) if len(src) < len(new) else (new, src)\n\n l_min.extend(None for i in range(len(l_min), len(l_max)))\n\n for i, val in enumerate(new):\n if isinstance(val, dict) and isinstance(src[i], dict):\n new[i] = merge_dicts(src[i], val)\n elif isinstance(val, list) and isinstance(src[i], list):\n new[i] = merge_lists(src[i], val)\n elif val is not None:\n new[i] = val\n else:\n new[i] = src[i]\n\n return new", "def mergeWith(self, others):", "def union(a, b):\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "def merge(left, right):\n aList = []\n lt = 0\n rt = 0\n\n #Repeatedly move the smallest of left and right to the new list\n while lt < len(left) and rt < len(right):\n if left[lt] < right[rt]:\n aList.append(left[lt])\n lt += 1\n else:\n aList.append(right[rt])\n rt += 1\n\n #There will only be elements left in one of the original two lists.\n\n #Append the remains of left (lt..end) on to the new list.\n while lt < len(left):\n aList.append(left[lt])\n lt += 1\n \n #Append the remains of right (rt..end) on to the new list.\n while rt < len(right):\n aList.append(right[rt])\n rt += 1\n\n return aList", "def merge_in(list_a: list, list_b: list):\n end_a = 0\n\n while list_a[end_a] is not None:\n end_a += 1\n end_a -= 1\n\n assert (end_a + len(list_b) < len(list_a))\n\n a_index = end_a\n b_index = len(list_b) - 1\n\n for k in range(len(list_a) - 1, -1, -1):\n if b_index < 0 or (a_index >= 0 and list_a[a_index] > list_b[b_index]):\n list_a[k] = list_a[a_index]\n a_index -= 1\n else:\n list_a[k] = list_b[b_index]\n b_index -= 1", "def combine_list(lines):\n results = []\n for l in lines:\n results.extend(l)\n return list(filter(_remove, results))", "def concat_map(f, xs):\n return concat(imap(f, xs))", "def merge_two(l, r):\n new = []\n i1, i2 = 0, 0\n while i1 != len(l) and i2 != len(r):\n if l[i1] < r[i2]:\n new.append(l[i1])\n i1 += 1\n else:\n new.append(r[i2])\n i2 += 1\n\n new.extend(l[i1:])\n new.extend(r[i2:])\n return new", "def concat(seqs):\n return itertools.chain.from_iterable(seqs)", "def merge(a: List[int], b: List[int]) -> List[int]:\n merged = []\n i = j = 0\n alen = len(a)\n blen = len(b)\n while i < alen or j < blen:\n aval = a[i] if i < alen else float(\"inf\")\n bval = b[j] if j < blen else float(\"inf\")\n if aval <= bval:\n merged.append(a[i])\n i += 1\n else:\n merged.append(b[j])\n j += 1\n return merged", "def __cross(self,A, B):\n return [s+t for s in A for t in B]", "def add(a, b):\n if len(a) < len(b):\n aa = itertools.chain(reversed(a), itertools.repeat(0))\n bb = reversed(b)\n elif len(b) < len(a):\n bb = itertools.chain(reversed(b), itertools.repeat(0))\n aa = reversed(a)\n else:\n aa = reversed(a)\n bb = reversed(b)\n cc = _add_stream(aa, bb)\n return list(reversed(cc))", "def combine_permutations(p1, p2):\n p = tuple(map(p2.__getitem__, p1))\n return p" ]
[ "0.7555634", "0.7349701", "0.7179665", "0.70421684", "0.6992282", "0.6892206", "0.6813232", "0.68128705", "0.6796102", "0.67734545", "0.6721897", "0.6630253", "0.6618824", "0.65558565", "0.6551609", "0.6527701", "0.64997417", "0.64883775", "0.64660096", "0.6453299", "0.64290607", "0.6422815", "0.64205945", "0.6400244", "0.6398063", "0.63936293", "0.6393221", "0.63896066", "0.63751405", "0.6374449", "0.6362629", "0.6318776", "0.62958264", "0.6278376", "0.6260569", "0.62572", "0.6250747", "0.62502885", "0.62411785", "0.6238223", "0.6230779", "0.6228906", "0.621899", "0.62130725", "0.62124", "0.62052274", "0.6202787", "0.62013984", "0.6192614", "0.6173675", "0.61661565", "0.61656964", "0.61622775", "0.61610323", "0.6136995", "0.6133588", "0.61253214", "0.61238056", "0.6112951", "0.6111794", "0.6111555", "0.61108565", "0.60989696", "0.609707", "0.6081639", "0.603142", "0.6030242", "0.60242873", "0.6021901", "0.60192126", "0.6000737", "0.59922934", "0.5990992", "0.5985272", "0.59841347", "0.59740233", "0.596008", "0.59490466", "0.59427506", "0.5935471", "0.59298885", "0.59252244", "0.59214705", "0.5916134", "0.5909039", "0.5900972", "0.5895617", "0.5883234", "0.5883234", "0.58722407", "0.585533", "0.58163816", "0.58131105", "0.58117247", "0.580924", "0.58080435", "0.58057815", "0.5790498", "0.5781957", "0.5779495" ]
0.6404212
23
Prepare looped lines from looping dictionary.
def prepare_looped_lines(self, alldict, comblist): loopline_dict=dict() for stridx in comblist: lidx = int(stridx.split('-')[0]) loopidx = int(stridx.split('-')[1]) loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\n' return loopline_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict", "def _clean_up_loop_dict(loop_dict):\n \n # Remove the 'data_header' tag if it exists\n # since it is a list of dataframes\n # Then re-attach each of them one at a time\n if u'data_header' in loop_dict.keys():\n header_df_list = loop_dict.pop(u'data_header')\n \n if isinstance(header_df_list, list):\n for df in enumerate(header_df_list):\n loop_dict[u'data_header_'+str(df[0]+1)] = df[1]\n else:\n loop_dict[u'data_header_1'] = header_df_list\n \n return loop_dict", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))", "def fill_line(self, dct):\n return self._line_format % self.list_values(dct)", "def process_lines(self):\n\n for line in self.all_lines:\n container_re = re.compile(r'(.*?) bags')\n bags_re = re.compile(r'(?:(\\d+)|no other) (.*?) bags*')\n container_name = re.match(container_re, line).group(1)\n bags = re.findall(bags_re, line)\n self.all_bags[container_name] = bags", "def process(self):\n first_line = self.setup[\"first_line\"]\n last_line = self.setup[\"last_line\"]\n\n self.logger.info(\"Using lines %s - %s\", first_line, last_line)\n\n path_temp = \"{}_\".format(self.path)\n\n with open(self.path, \"r\") as src, open(path_temp, \"w\") as dest:\n lines = src.r..\n copy_lines = lines[first_line-1:last_line]\n dest.write(\"\".join(copy_lines))\n\n os.rename(path_temp, self.path)", "def refresh_lines(self):\n for line_data in self._data_lines:\n line = BasketLine.from_dict(self, line_data)\n pricing_context = PricingContext(shop=self.shop, customer=self.customer, supplier=line.supplier)\n line.cache_info(pricing_context)\n self._add_or_replace_line(line)", "def get_unidecode_lines(self, lines_dict):\n for line in lines_dict:\n line['parnter_name'] = unicode(line['partner_name'])\n return lines_dict", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _loopPreparation(self, stimNumber):\n self.nbFrames=10000 #TO DO --> better place for this line of code\n\n self.stimName= self.experimentName+'_S%(number)03d' % {\"number\": stimNumber} #%02d return a 2 char string : 1-->01\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.stimName,\n self.nbFrames,\n self.maxFrames)\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n self.arduinoSync()", "def __init__(self, lines):\n self.tiles = {}\n self.parse(lines)\n self.find_neighbors()\n self.find_corners()\n self.build_grid_top()\n self.build_grid_left()\n self.fill_grid()\n self.stitch_image()", "def modify_body(lines, PE_dims, var_map): \n loop_bodies = []\n # Locate the user statements\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('hls_pipeline') != -1:\n # extract the loop body\n body_start = line_id\n r_minus_l = -1\n nxt_line_id = line_id + 1 \n while nxt_line_id < len(lines):\n nxt_line = lines[nxt_line_id]\n if nxt_line.find('}') != -1:\n r_minus_l += 1\n if nxt_line.find('{') != -1:\n r_minus_l -= 1\n if r_minus_l == 0:\n body_end = nxt_line_id - 1\n break\n nxt_line_id += 1\n loop_body = lines[body_start : body_end + 1]\n #print(loop_body)\n loop_bodies.append({'pos': [body_start, body_end], 'lines': loop_body})\n \n # Modidy the loop bodies\n #for body in loop_bodies:\n body_offset = 0\n for idx in range(len(loop_bodies)):\n body = loop_bodies[idx]\n body_lines = body['lines'] \n group_names = []\n has_data_trans = True\n data_trans_info = extract_data_trans_info(body_lines, PE_dims)\n # Remove the in transfer\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('read_channel_intel') != -1:\n has_data_trans = True\n # Locate the read block and the write block\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n m = re.search(r'\\((.+?)\\)', line) \n fifo_name = m.group(1)\n group_name = fifo_name.split('_')[1]\n group_names.append(group_name)\n break\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n # Remove the out transfer\n has_data_trans = True\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in group_names:\n has_data_trans = True\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n #print(body_lines)\n # Wrap the body with space loops\n for dim_idx in range(len(PE_dims)):\n dim = PE_dims[dim_idx] \n line = f'#pragma unroll\\nfor (int s{dim_idx} = 0; s{dim_idx} < {dim}; s{dim_idx}++) {{\\n'\n body_lines.insert(dim_idx, line) \n for dim in PE_dims:\n body_lines.append('}\\n')\n\n # Modify the index\n body_lines = modify_index(body_lines, var_map, PE_dims)\n #print(body_lines)\n\n # Insert the data transfer stmts\n body_lines = insert_data_trans(body_lines, data_trans_info, PE_dims)\n #loop_bodies[idx]['lines'] = body_lines\n\n # Replace the loop bodies\n body_pos = body['pos'] \n lines = lines[: body_offset + body_pos[0]] \\\n + body_lines \\\n + lines[body_offset + body_pos[1] + 1 :] \n body_offset += len(body_lines) - (body_pos[1] - body_pos[0] + 1)\n\n return lines", "def update_lines(self):\n self._checkfigure()\n for ld in self.lines:\n line = ld['line']\n\n color = ld['color']\n line.set_color(color)\n\n lw = ld['linewidth']\n hlf = ld['highlight factor']\n highlight = hlf if ld['highlighted'] else 1.0\n lw = lw*highlight\n line.set_linewidth(lw)\n\n for vline in ld['vlines']:\n vline.set_color(color)\n vline.set_linestyle('--')\n vline.set_linewidth(lw)\n\n for hline in ld['vlines']:\n hline.set_color(color)\n hline.set_linestyle('--')\n hline.set_linewidth(lw)", "def reset_lines(chat_lines):\n for line in xrange(24):\n chat_lines[line].setText(chat_lines[line+1].getText())\n chat_lines[line].setTextColor(chat_lines[line+1].getTextColor())\n chat_lines[24].setText(\"\")", "def _preprocess(self, stream):\n unfinished = ' <unfinished ...>'\n resumed = '<... [^ ]+ resumed> (.*)$'\n in_progressed = {}\n\n for line in stream:\n pid, timestamp, rest = line.rstrip().split(None, 2)\n\n # Save any lines that are unfinished.\n # Line must *end* with the string unfinished.\n i = rest.rfind(unfinished)\n if i != -1 and i == len(rest) - len(unfinished):\n partial_line = rest[:i]\n in_progressed[pid] = (timestamp, partial_line)\n continue\n\n # Resume lines. Line must *start* with resumed string.\n match = re.search(resumed, line)\n if match:\n resumed_line = match.groups()[0]\n timestamp, partial_line = in_progressed.pop(pid)\n line = '{} {} {}{}'.format(\n pid, timestamp, partial_line, resumed_line)\n\n yield line", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def preprocess(args, id2info, mapping):\n polyline_spans = []\n keys = list(id2info.keys())\n assert 'AV' in keys\n assert 'AGENT' in keys\n keys.remove('AV')\n keys.remove('AGENT')\n keys = ['AGENT', 'AV'] + keys\n vectors = []\n two_seconds = mapping['two_seconds']\n mapping['trajs'] = []\n mapping['agents'] = []\n for id in keys:\n polyline = {}\n\n info = id2info[id]\n start = len(vectors)\n if args.no_agents:\n if id != 'AV' and id != 'AGENT':\n break\n\n agent = []\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n agent.append((line[X], line[Y]))\n\n if args.visualize:\n traj = np.zeros([args.hidden_size])\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n traj = traj[:i * 2].copy()\n break\n traj[i * 2], traj[i * 2 + 1] = line[X], line[Y]\n if i == len(info) - 1:\n traj = traj[:(i + 1) * 2].copy()\n traj = traj.reshape((-1, 2))\n mapping['trajs'].append(traj)\n\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n x, y = line[X], line[Y]\n if i > 0:\n # print(x-line_pre[X], y-line_pre[Y])\n vector = [line_pre[X], line_pre[Y], x, y, line[TIMESTAMP], line[OBJECT_TYPE] == 'AV',\n line[OBJECT_TYPE] == 'AGENT', line[OBJECT_TYPE] == 'OTHERS', len(polyline_spans), i]\n vectors.append(get_pad_vector(vector))\n line_pre = line\n\n end = len(vectors)\n if end - start == 0:\n assert id != 'AV' and id != 'AGENT'\n else:\n mapping['agents'].append(np.array(agent))\n\n polyline_spans.append([start, end])\n\n assert_(len(mapping['agents']) == len(polyline_spans))\n\n assert len(vectors) <= max_vector_num\n\n t = len(vectors)\n mapping['map_start_polyline_idx'] = len(polyline_spans)\n if args.use_map:\n vectors, polyline_spans = get_sub_map(args, mapping['cent_x'], mapping['cent_y'], mapping['city_name'],\n vectors=vectors,\n polyline_spans=polyline_spans, mapping=mapping)\n\n # logging('len(vectors)', t, len(vectors), prob=0.01)\n\n matrix = np.array(vectors)\n # matrix = np.array(vectors, dtype=float)\n # del vectors\n\n # matrix = torch.zeros([len(vectors), args.hidden_size])\n # for i, vector in enumerate(vectors):\n # for j, each in enumerate(vector):\n # matrix[i][j].fill_(each)\n\n labels = []\n info = id2info['AGENT']\n info = info[mapping['agent_pred_index']:]\n if not args.do_test:\n if 'set_predict' in args.other_params:\n pass\n else:\n assert len(info) == 30\n for line in info:\n labels.append(line[X])\n labels.append(line[Y])\n\n if 'set_predict' in args.other_params:\n if 'test' in args.data_dir[0]:\n labels = [0.0 for _ in range(60)]\n\n if 'goals_2D' in args.other_params:\n point_label = np.array(labels[-2:])\n mapping['goals_2D_labels'] = np.argmin(get_dis(mapping['goals_2D'], point_label))\n\n if 'lane_scoring' in args.other_params:\n stage_one_label = 0\n polygons = mapping['polygons']\n min_dis = 10000.0\n for i, polygon in enumerate(polygons):\n temp = np.min(get_dis(polygon, point_label))\n if temp < min_dis:\n min_dis = temp\n stage_one_label = i\n\n mapping['stage_one_label'] = stage_one_label\n\n mapping.update(dict(\n matrix=matrix,\n labels=np.array(labels).reshape([30, 2]),\n polyline_spans=[slice(each[0], each[1]) for each in polyline_spans],\n labels_is_valid=np.ones(args.future_frame_num, dtype=np.int64),\n eval_time=30,\n ))\n\n return mapping", "def _line_wrapper( self, diffs ):\n\n\t\t# pull from/to data and flags from mdiff iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\t# check for context separators and pass them through\n\t\t\tif flag is None:\n\t\t\t\tyield fromdata, todata, flag\n\t\t\t\tcontinue\n\t\t\t( fromline, fromtext ), ( toline, totext ) = fromdata, todata\n\t\t\t# for each from/to line split it at the wrap column to form\n\t\t\t# list of text lines.\n\t\t\tfromlist, tolist = [], []\n\t\t\tself._split_line( fromlist, fromline, fromtext )\n\t\t\tself._split_line( tolist, toline, totext )\n\t\t\t# yield from/to line in pairs inserting blank lines as\n\t\t\t# necessary when one side has more wrapped lines\n\t\t\twhile fromlist or tolist:\n\t\t\t\tif fromlist:\n\t\t\t\t\tfromdata = fromlist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\tfromdata = ( '', ' ' )\n\t\t\t\tif tolist:\n\t\t\t\t\ttodata = tolist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\ttodata = ( '', ' ' )\n\t\t\t\tyield fromdata, todata, flag", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def initialize(lines, dim):\n start_gen = defaultdict(int)\n for i, line in enumerate(lines):\n for j, letter in enumerate(line):\n if letter == \"#\":\n start_gen[(i, j) + (0,) * (dim - 2)] = 1\n return start_gen", "def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template", "def postprocess_ini_section_items(items: Union[Mapping, Iterable]) -> Generator:\n splitter_re = re.compile('[\\n\\r\\t]+')\n if isinstance(items, Mapping):\n items = items.items()\n for k, v in items:\n if v.startswith('\\n'):\n v = splitter_re.split(v[1:])\n v = [vv.strip() for vv in v if vv.strip()]\n v = [vv for vv in v if not vv.startswith('#')] # remove commented lines\n yield k, v", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def _finalize_strokes(self, strokes, lines=None):\n for i, offsets in tqdm(enumerate(strokes)):\n if lines and not lines[i]:\n print(\"Empty line? Stroke:\")\n print(offsets[:10])\n continue\n\n offsets[:, :2] *= 1.5\n curr_strokes = drawing.offsets_to_coords(offsets)\n curr_strokes = drawing.denoise(curr_strokes)\n curr_strokes[:, :2] = drawing.align(curr_strokes[:, :2])\n\n # Normalize\n curr_strokes[:, 1] -= np.min(curr_strokes[:, 1])\n max_y = np.max(curr_strokes[:, 1])\n if max_y:\n curr_strokes[:, :2] /= max_y\n else:\n warnings.warn(f\"max y is zero {curr_strokes}\")\n\n # Convert end points to start points\n #curr_strokes = eos_to_sos(curr_strokes)\n\n yield curr_strokes", "def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):\r\n\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n update += \"%(var)s_iter += %(var)s_jump%(index)s_%(i)s;\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n return \"\"\"\r\n %(preloop)s\r\n for (int %(iterv)s = %(suitable_n)s; %(iterv)s; %(iterv)s--) {\r\n %(code)s\r\n %(update)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n if len(loop_tasks) == 1:\r\n s = preloops.get(0, \"\")\r\n else:\r\n s = \"\"\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def set_all_lines_to_initial_positions(self):\n self.lines[1] = [None for _ in range(self.lines[0].__len__())]\n for line_no in range(0, self.lines[0].__len__()):\n self[line_no].move_to(\n self.get_center() + self.lines_initial_positions[line_no]\n )\n return self", "def preprocess(self, code, line_no=1):\n\n debug(\"PARSING PRAGMAS\", self)\n\n new_code = {}\n\n for m_begin in self.leaderPragmaRE().finditer(code):\n loop_code=''\n pos = m_begin.start()\n pos_end = m_begin.end()\n pragma_line = m_begin.group()\n # Extract the loops\n\n m_end = self.__trailer_pragma_re.search(code, pos)\n if m_end:\n loop_code = code[pos_end:m_end.start()]\n else:\n err('Did not find expected #pragma orio loop end', doexit=True)\n debug('Found Orio pragma in position %d: %s. Loop:%s' % (pos, pragma_line, loop_code), self)\n self.annotation_counter += 1\n\n # parse the loop\n line_no = code[:pos_end + 1].count('\\n')\n\n # stmts = parser.getParser(line_no).parse(loop_code,tracking=1,debug=1)\n stmts = parser.getParser(line_no).parse(loop_code)\n debug(\"Parsed pragma-annotated loop:\\n %s\" % stmts, self, level=4)\n\n # Extract information needed for generating the annotation\n loop_info = LoopInfoVisitor()\n loop_info.visit(stmts[0])\n debug(\"LOOP info: bounds={}, maxnest={}, vars={}\".format(\n repr(loop_info.loop_bounds), loop_info.maxnest, repr(loop_info.vars)),self)\n \"\"\"Example loop_info:\n #pragma orio loop begin \"C = C * beta\". Loop:\n for (i = 0; i < n1; i++) {\n for (j = 0; j < n2; j++) {\n C[ldc * i + j] *= beta;\n }\n }\n ('LOOP inFo:', {'i': ('0', 'n1'), 'j': ('0', 'n2')}, 2, set(['n1', 'n2']))\n \"\"\"\n\n # Process the AST to generate the annotation and the associated tuning spec\n # This also updates the tuning parameters self.tspec_params\n ann = self._generate_annotation(stmts[0],loop_info)\n new_code[(pos, pos_end)] = ann\n\n\n # Insert the annotation in the place of the #pragma orio begin loop\n annotated_code = ''\n prev = 0\n for (pos, pos_end), ann in sorted(new_code.items()):\n annotated_code += code[prev:pos] + ann\n prev = pos_end + 1\n annotated_code += code[prev:]\n annotated_code = annotated_code.replace('#pragma orio loop end', '/*@ end @*/')\n #debug('Annotated code:\\n{}'.format(annotated_code),self)\n\n # Generate the tuning spec for this file\n tuning_spec = self._generate_tuning_spec()\n debug('Tuning spec:\\n{}'.format(tuning_spec), self)\n return annotated_code", "def _initialize_attributes(self, string_as_file):\n for row in string_as_file:\n first = row[0]\n second = row[1]\n third = row[3]\n match first:\n case 'quadrat':\n self.quadrat = { 'id': second, 'comment': third }\n case 'waypoint':\n self.waypoint = { 'name': second, 'comment': third }", "def _init_outliners(self):\n\n for outliner in self._outliners.values():\n outliner.refresh()", "def setUp(self):\r\n self.map_f = map_lines.split('\\n')\r\n self.dm_f = dm_lines.split('\\n')\r\n self.too_many_colors_map_f = too_many_colors_map_lines.split('\\n')", "def Flowline_CSV(filename, nlines=None, has_width=False, flip_order=True):\n \n f = open(filename,'r')\n \n header = f.readline() #header line\n hdr = header.strip('\\r\\n')\n keys = hdr.split(',') #get names of variables\n #keys[-1] = keys[-1].strip('\\r\\n')\n \n data = {k : [] for k in keys} #end of line has hidden characters, so 'point_m' does not get read\n #data['Line number'] = []\n data['Length_ID'] = collections.OrderedDict() #new dictionary that counts how many points (i.e. lines of file) are in each flowline. Must be ordered for later iteration!\n #if nlines is not None:\n # data['Lineslist'] = [[] for k in range(nlines)] \n data['Lineslist'] = [] #initialize as empty list\n \n lines = f.readlines()\n f.close()\n \n temp = []\n j = 0\n for i,l in enumerate(lines):\n linstrip = l.strip('\\r\\n')\n parts = linstrip.split(',')\n \n #data['Line-number'].append(parts[0])\n #data['x-coord'].append(parts[1])\n #data['y-coord'].append(parts[2])\n \n x_coord = float(parts[1])\n y_coord = float(parts[2])\n \n if parts[0] not in data['Length_ID'].keys(): #finding out where lines separate \n temp = []\n data['Lineslist'].append(temp) #initialize new empty array that can be modified in-place later\n data['Length_ID'][parts[0]] = 1\n j+=1 \n else:\n data['Length_ID'][parts[0]] += 1\n #if xbounds[0]<x_coord<xbounds[1]: #taking out values outside of map area\n # if ybounds[0]<y_coord<ybounds[1]: \n \n if has_width:\n width = float(parts[3])\n temp.append((x_coord, y_coord, width))\n else:\n temp.append((x_coord, y_coord))\n \n data['Lineslist'][j-1] = np.array(temp) #need to modify an existing array rather than append to keep correct indexing\n\n #data['Lineslist'][j] = np.array(temp) \n \n if nlines is None:\n nlines = len(data['Length_ID'].keys())\n \n if flip_order: \n centrelines_list = [np.array(data['Lineslist'][j])[::-1] for j in range(nlines)] #making arrays, reversed to start at terminus rather than peak\n else:\n centrelines_list = [np.array(data['Lineslist'][j]) for j in range(nlines)] # arrays already start at terminus\n\n \n return centrelines_list", "def createLineItems(self):\n text_data = [\"Test name\", \"Measured result\", \"Expected\",\n \"Units\", \"Status\"]\n\n d = []\n font_size = 9\n \n centered = ParagraphStyle(name=\"centered\", alignment=TA_CENTER)\n \n for text in text_data:\n ptext = \"<font size=%s><b>%s</b></font>\" % (font_size, text)\n p = Paragraph(ptext, centered)\n d.append(p)\n \n data = [d]\n \n line_num = 1\n \n formatted_line_data = []\n \n for x in range(10):\n line_data = [\"TX output power\", \"20\", \n \"20\", \"dBm\", \"Pass\"]\n \n for item in line_data:\n ptext = \"<font size=%s>%s</font>\" % (font_size-1, item)\n p = Paragraph(ptext, centered)\n formatted_line_data.append(p)\n data.append(formatted_line_data)\n formatted_line_data = []\n line_num += 1\n \n table = Table(data, colWidths=[200, 100, 100, 100, 60])\n \n self.story.append(table)", "def buildFromPattern(self,p):\n ## reformalute the pattern with complete values: start:stop:step for each dimension (2D)\n ## split with ',' : then by : if just ':' NoneNoneNone, \n\n for index,lDicts,lFields in p:\n# print index, lFields\n self._lLabellingInstruction.append((index,lDicts,lFields))", "def holepolylinedictarraycopy(holepolylinedict):\r\n holepolylinearraydict={}\r\n for e in holepolylinedict: #对通孔图层多段线字典进行遍历,将里面的多段线向上阵列\r\n holepolylinedataset=[]\r\n for row in range(0,globalconfig.Y_ARRAY_NUM): \r\n holepolylinedataset.extend(datasetjustcopy(holepolylinedict[e], 1, 0, globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO*row))\r\n holepolylinearraydict[e]=holepolylinedataset\r\n return holepolylinearraydict", "def preparing(fasta_list, pdb_dict):\n for item1 in fasta_list:\n matchObj = re.search( '^(.*)_([a-zA-Z0-9])$', item1[0])\n fasta1= item1[1]\n if matchObj:\n original_name1= matchObj.group(1)\n original_structure1=pdb_dict[original_name1]\n chain_1= matchObj.group(2) \n yield fasta1, [original_structure1, chain_1]", "def setup_lines(self):\n self.center_lines()\n self.space_lines()", "def render_buffer(self, lines):\n for line in lines:\n try:\n yield self.render_line(line.body)\n except KeyError:\n self.log.exception(\"Rendering exception\")", "def init_line_list():\n # Get str lengths from defs\n len_line = defs.str_len()['ion']\n len_src = defs.str_len()['Source']\n # Load sources to check\n sources = arcl_io.load_source_table()\n src_files = sources['File'].data\n if len(src_files[0]) > len_src:\n raise ValueError(\"Source filename now exceeds table. Should fix source name\")\n dummy_src = str('#')*len_src\n # Arc Line name\n dummy_line = str('#')*len_line\n #\n\n # Dict for Table\n idict = OrderedDict()\n idict['ion'] = dummy_line\n idict['wave'] = 0.\n idict['NIST'] = 0\n idict['Instr'] = 0 # Flag for instrument\n idict['amplitude'] = 0\n idict['Source'] = dummy_src\n\n # Table\n tkeys = idict.keys()\n lst = [[idict[tkey]] for tkey in tkeys]\n init_tbl = Table(lst, names=tkeys)\n\n # Return\n return init_tbl", "def reformat_data(inp):\n lines = iter(inp)\n\n # First, find the start of the map records, which we assume for the moment\n # is the line that starts with 'CASEID'.\n for line in lines:\n if line.startswith('CASEID'):\n break\n else:\n print(\"CASEID record not found\")\n return\n\n while True:\n specline = line.strip()\n values = []\n for line in lines:\n if line[0].strip():\n # Start of next block.\n break\n values.append(line.strip())\n yield reformat_block(specline, values)\n if not line[0].strip() or line.startswith('-------------'):\n # End of data, because either we've seen the line introducing\n # the footer, or 'line' still has the last line in it, that\n # didn't have a non-blank first character.\n break", "def lineBuilders() :\n return dict(_lineBuilders)", "def _generate_template(dictionary):\n task_dict = dict(dictionary)\n lines = []\n for key in sorted(TaskInfo._READ_ONLY_FIELDS):\n if key not in task_dict:\n continue\n\n value = TaskInfo._dpop(task_dict, key)\n lines.extend([\n \"# {}:\".format(key),\n \"# {}\".format(\"\\n#\".join(value.splitlines())),\n \"\",\n ])\n\n for key in sorted(task_dict.keys()):\n lines.extend([\n \"{}:\".format(key),\n str(task_dict[key]),\n \"\",\n ])\n\n return \"\\n\".join(lines)", "def get_loops(self, obj):\n for k, v in obj.items():\n self._loop_name.append(k)\n self._loop_name_aux.append(k)\n in_k, in_v = list(v.items())[-1]\n n = 0\n while not isinstance(in_v, str):\n n = n + 1\n k = '+' * n + ' ' + in_k\n self._loop_name.append(in_k)\n self._loop_name_aux.append(k)\n in_k, in_v = list(in_v.items())[-1]\n if (n > self._max_level):\n self._max_level = n", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def mix_iterator(self):\n self.job = OrderedDict()\n for list_i in self.grid_iterator():\n # Pick the values to be used in this run\n for (k, i) in zip(self.table.keys(), list_i):\n self.job[k] = self.table[k][i]\n # Do the string replace operations on the values themselves\n self.expand_values()\n yield self.job", "def process_lines(self, lines):\n line_index = 0\n n_lines = len(lines)\n while line_index < n_lines:\n if lines[line_index].startswith(\"HIERARCHY\"):\n line_index = self._read_skeleton(lines, line_index, n_lines)\n if lines[line_index].startswith(\"MOTION\"):\n self._read_frametime(lines, line_index+2)\n line_index = self._read_frames(lines, line_index+3, n_lines)\n else:\n line_index += 1", "def preprocess_map(map_dict):\n\n for city_name in map_dict:\n ways = map_dict[city_name]['way']\n nodes = map_dict[city_name]['node']\n polylines = []\n polylines_dict = {}\n for way in ways:\n polyline = []\n points = way['nd']\n points = [nodes[int(point['@ref'])] for point in points]\n point_pre = None\n for i, point in enumerate(points):\n if i > 0:\n vector = [float(point_pre['@x']), float(point_pre['@y']), float(point['@x']), float(point['@y'])]\n polyline.append(vector)\n point_pre = point\n\n if len(polyline) > 0:\n index_x = round_value(float(point_pre['@x']))\n index_y = round_value(float(point_pre['@y']))\n if index_x not in polylines_dict:\n polylines_dict[index_x] = []\n polylines_dict[index_x].append(polyline)\n polylines.append(polyline)\n\n map_dict[city_name]['polylines'] = polylines\n map_dict[city_name]['polylines_dict'] = polylines_dict", "def _build_list(self, lines, assays):\n # keep place in assays sequence\n assay_iter = iter(assays)\n for line in lines:\n # assume building new assays\n build = True\n for assay in assay_iter:\n if assay.line_id == line.pk:\n # when assay sequence overlaps line sequence, assays win\n yield assay\n # don't build assays once existing assays found\n build = False\n continue\n elif build:\n yield self._build_assay(line)\n # put current assay back into iteration\n assay_iter = chain([assay], assay_iter)\n break\n else:\n # assay sequence exhausted\n if build:\n yield self._build_assay(line)", "def preprocess_ini_section_items(items: Union[Mapping, Iterable]) -> Generator:\n if isinstance(items, Mapping):\n items = items.items()\n for k, v in items:\n if isinstance(v, list):\n v = '\\n\\t' + '\\n\\t'.join(v)\n yield k, v", "def expand_values(self):\n for k, v in self.job.items():\n foundkey = re.search(self.key_pattern, v)\n # This is done iteratively so that it doesn't matter what order\n # lines appear in a bake parameter file\n while foundkey:\n v = v.replace(\n foundkey.group(0),\n self.job[foundkey.group(0)])\n foundkey = re.search(self.key_pattern, v)\n self.job[k] = v", "def polylinedictarraycopy(d):#d——原始图层多段线字典 \r\n dictlist=[]\r\n ratiolist=[] #放缩率列表\r\n rationumaccumulationlist=[] #放缩率数量累加列表\r\n \r\n eachrationum=globalconfig.X_ARRAY_NUM//globalconfig.RATIO_NUM\r\n leftrationum=globalconfig.X_ARRAY_NUM%globalconfig.RATIO_NUM\r\n \r\n eachrationumlist=[eachrationum]*globalconfig.RATIO_NUM #各个放缩率对应数量的列表\r\n \r\n for i in range((globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2,(globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2+leftrationum):\r\n eachrationumlist[i]=eachrationumlist[i]+1 #将整除后的余值加入到靠中间放缩率的方案中。\r\n \r\n rationumaccumulationlist.append(0) \r\n \r\n for i in range(1,globalconfig.RATIO_NUM): #计算放缩率数量累加列表\r\n rationumaccumulationlist.append(rationumaccumulationlist[i-1]+eachrationumlist[i-1])\r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #计算放缩率列表\r\n ratiolist.append((globalconfig.CENTER_RATIO-((globalconfig.RATIO_NUM+1)//2-1)*globalconfig.RATIO_DIFF)+i*globalconfig.RATIO_DIFF) \r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #每种放缩率\r\n for j in range(0,eachrationumlist[i]): #每种放缩率对应数量\r\n newdict={}\r\n for e in d: #将字典中值即每一图层对应的多段线列表进行复制并移动到指定位置\r\n newdict[e]=polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist)) \r\n #newdict.append([e,polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist))])\r\n dictlist.append(newdict) \r\n return (dictlist,ratiolist,eachrationumlist)", "def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe", "def clean_line_generator_v1(df_pkl=None):\n\tdata_df = get_df(df_pkl)\n\t\n\twith open('/home/sus118/rdoc_w2v/data/one-abstract-per-line.txt', 'w') as f:\n\t\tcount = 0\n\t\tfor abst in iter(data_df.content):\n\t\t\tif not abst:\n\t\t\t\tcontinue\n\t\t\tabst = clean_sent(abst)\n\t\t\tf.write(abst+'\\n')\n\t\t\tcount+=1\n\t\t\tif count%10000==0:\n\t\t\t\tprint(f'{count} done')", "def represent_loopdict(self, data, flow_style=False):\n ## self is the yaml.Representer object\n\n # Prepare a dict without loopvars or attributes\n dnorm = data.copy()\n for loop in data.loops:\n for k in loop[0]:\n dnorm.pop(k, None) # None because of possible duplicates\n for att in loop[1]:\n dnorm.pop(k+att, None)\n origloops = dnorm.pop('=loops=', None)\n # Output the reduced dict\n node, value = prepare_mappingnode(self, dnorm)\n # Output loops\n loops_key = self.represent_data('=loops=')\n lseq = []\n for loop in data.loops:\n cols = list(loop[0]) # New copy to avoid YAML references\n lmap = {'$cols' : cols }\n for att in loop[1]:\n attlist = []\n for k in loop[0]:\n attlist.append(data.get(k+att))\n lmap['+'+att] = attlist\n valseq = []\n for i in range(len(data[loop[0][0]])):\n for k in loop[0]:\n valseq.append(data[k][i])\n lmap['~vals'] = ColumnSequence(valseq, len(loop[0]))\n lseq.append(lmap)\n if origloops:\n lseq.append({'=loops=.orig': origloops})\n value.append((loops_key, self.represent_data(lseq)))\n return node", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def _init():\n line.set_data([], [])\n return line,", "def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def _create_examples_modified(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n a_label = int(line[\"label\"])\n q_type = line[\"type\"]\n if a_label == 0 and q_type != \"qLookup\":\n #print(\"discontinue\")\n continue\n sentence_number = 0\n premise_text = line[\"premise\"]\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n \n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples", "def process(raw):\n #global weekNum\n field = None\n entry = {}\n cooked = []\n number = -1\n\n for line in raw:\n log.debug(\"Line: {}\".format(line))\n line = line.strip()\n if len(line) == 0 or line[0] == \"#\":#if # is the first character, skip\n log.debug(\"Skipping\")\n continue\n parts = line.split(':')#split lines to before and after \":\"\n if len(parts) == 1 and field:#adds additional content to whatever the previously used field is\n entry[field] = entry[field] + line + \" \" \n continue\n if len(parts) == 2:#if there are 2 parts, the field is the first part and the content is the second part\n field = parts[0]\n content = parts[1]\n else:#if none of the above are correct there is an issue\n raise ValueError(\"Trouble with line: '{}'\\n\".format(line) +\n \"Split into |{}|\".format(\"|\".join(parts)))\n\n if field == \"begin\":#checking if this is the line with the start date\n try:#begin only triggers once (at least it should only trigger once)\n base = arrow.get(content, \"MM/DD/YYYY\")#get the date as an object named \"base\", will need to use this to determine start date and current week, arrow must have a \"current date\"?\n # base is the \"week 1\" date, DD = 1, DD + 7 = 2, DD + 14 = 3, DD + 21 = 4, etc\n #now i will make variables for the start date of each week, or find a way to take the difference between 2 dates\n #end = base#arrow.get(base, \"MM/DD/YYYY\")\n #end = end.shift(weeks=+10)\n #today = arrow.now()\n #today.format(\"MM/DD/YYYY\")\n #if today == base:\n # weekNum = 1\n #number = -1\n \"\"\"weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n if today > weeks[10]:\n number = 10\n elif today < weeks[0]:\n number = 0\n #base = arrow.format(\"MM/DD/YYYY\")\n else:\n raise ValueError(\"Big error calculating week\")\n #for index in range(1,70):\n # base = base.shift(days=+1)\n # if today == base:\n # weekNum = weekNum + (index % 7)\n # break \n base = base.format(\"MM/DD/YYYY\")\"\"\"\n except:\n raise ValueError(\"Unable to parse date {}\".format(content))#date is incorrectly formatted, should be MM/DD/YYYY\n #now I need to check if either of these weeks is the current week\n# for r in arrow.Arrow.span_range('day',\n elif field == \"week\":#this is the week number\n if entry:\n cooked.append(entry)\n entry = {}#make entry empty again\n #if content == currentWeekNum:\n #print(\"Content: \" + content)\n #print(\"Week Number: \" + currentWeekNum + \"\\n\")\n #print(\"Is Current Week?\" + currentWeekBool + \"\\n\")\n # currentWeekBool = True\n entry['topic'] = \"\"#these are all \"classes\" in the HTML document\n entry['project'] = \"\"\n entry['week'] = content#put the week number into the \"week\" field in the html document\n #entry['isCurrentWeek'] = currentWeekBool\n #currentWeekBool = False\n #if content == weekNum:\n # entry['bool'] = True\n #else:\n # entry['bool'] = True\n \"\"\"if \n if content == currentWeekNum:\n entry['isCurrentWeek'] = True\n else:\n entry['isCurrentWeek'] = False\"\"\"\n\n elif field == 'topic' or field == 'project':#from if len == 2, set the entry for the field to the content in the html doc\n entry[field] = content\n\n else:\n raise ValueError(\"Syntax error in line: {}\".format(line))\n #entryn = entry + \"\\n\"\n\t#cookedn = cooked + \"\\n\"\n\t#fieldn = field + \"\\n\"\n\t#print(\"Entry: \" + entryn)\n #print(\"Cooked: \" + cookedn)\n #print(\"Field: \" + fiieldn)\n if entry:#appends whatever added stuff to the whole docuemnt\n cooked.append(entry)\n\t#returns formatted document after it has been looped throughi\n #number = getWeekNum(raw)\n weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n return [cooked, i+1]\n if today < weeks[0]:\n number = 0\n else:\n number = 10\n return [cooked, number]", "def prepare_file(lines):\n return \" \".join(line.strip() for line in lines)", "def generate_from_keyed_source(self, keyed_source, key):\n\n for item in keyed_source:\n line = item[key]\n self.item_lookup[line] = item\n poem = self.add_line(line)\n if poem:\n yield tuple(self.item_lookup[k] for k in poem)", "def __construct_bill_lineitems(bill_lineitems: List[BillLineitem]) -> List[Dict]:\n lines = []\n\n for line in bill_lineitems:\n line = {\n 'Description': line.description,\n 'DetailType': 'AccountBasedExpenseLineDetail',\n 'Amount': line.amount,\n 'AccountBasedExpenseLineDetail': {\n 'AccountRef': {\n 'value': line.account_id\n },\n 'CustomerRef': {\n 'value': line.customer_id\n },\n 'ClassRef': {\n 'value': line.class_id\n }\n }\n }\n lines.append(line)\n\n return lines", "def AutoSplitlines(self):\n\t\ttry:\n\t\t\tends_with_cr = self.content.endswith('\\n')\n\t\t\tself.lines = self.content.splitlines()\n\t\t\tyield\n\t\tfinally:\n\t\t\tself.content = '\\n'.join(self.lines)\n\t\t\tif ends_with_cr:\n\t\t\t\tself.content += '\\n'", "def new_loop_packet(self, event):\n for obs_type in self.corrections:\n try:\n event.packet[obs_type] = eval(self.corrections[obs_type], None, event.packet)\n except (TypeError, NameError):\n pass\n except ValueError, e:\n syslog.syslog(syslog.LOG_ERR, \"engine: StdCalibration loop error %s\" % e)", "def groupLines(self, parts):\n\t\tline = []\n\t\thasDifference = False\n\t\treplaceline = lambda line: list((p[0], self.color_linedifferent if hasDifference and p[1] == self.color_normal else p[1]) for p in line)\n\t\tfor part in parts:\n\t\t\tif part[1] != self.color_normal:\n\t\t\t\thasDifference = True\n\t\t\tif not len(part[0]): continue\n\t\t\tline += [part]\n\t\t\tif part[0][-1] == \"\\n\":\n\t\t\t\tyield replaceline(line)\n\t\t\t\tline = []\n\t\t\t\thasDifference = False\n\t\tyield replaceline(line)", "def createDict(self):\n data = d.Dictionary.dictionary\n while True:\n filtered = [line.strip() for line in data if len(line) == self.wordLen]\n if len(filtered) == 0:\n self.setNewLen()\n else:\n break\n return filtered", "def linestyle_generator(colors=_colors, lines=_lines,\n markers=_markers, hollow_styles=_marker_types):\n\n # If both lines and markers are empty or None, do nothing\n is_nothing = False\n if not lines and not markers:\n is_nothing = True\n\n if colors:\n color_cycle = itertools.cycle(colors)\n else: # default line color is almost_black\n color_cycle = itertools.cycle([almost_black])\n\n if lines:\n line_cycle = itertools.cycle(lines)\n else: # empty list or None supplied, disable line connection\n line_cycle = itertools.cycle([''])\n\n if markers and hollow_styles: # solid and hollow markers\n full_markers = itertools.product(markers, hollow_styles)\n elif markers and not hollow_styles: # all solid markers\n full_markers = itertools.product(markers, [None])\n else: # no markers\n full_markers = itertools.product(['None'], [None])\n marker_cycle = itertools.cycle(full_markers)\n\n while True:\n if not is_nothing:\n # Use next() instead of .next to work with both Python 2 & 3\n color = next(color_cycle)\n linestyle = next(line_cycle)\n marker, hollow = next(marker_cycle)\n if hollow is None: # only filled markers\n mew = 1\n mec = color\n mfc = color\n elif hollow: # make hollow markers\n mew = 1\n mec = color\n mfc = 'None'\n else: # otherwise, make filled markers\n mew = 1\n mec = color\n mfc = color\n yield {'color': color, 'linestyle': linestyle,\n 'marker': marker, 'mew': mew, 'mec': mec, 'mfc': mfc}\n else:\n yield {}", "def __init__(self, lines):\n self.table = OrderedDict()\n\n # Load bp file, put lines in table, where the key is the key\n # and the value is the list of values on that bp line\n for line in lines:\n elements = line.split(';')\n self.table[elements[0]] = elements[1:]", "def line_replacer(config,change_this_line,key):\n for arg in config['HyperParameter'][key]: \n pattern=r'{}[ ]*=.*,'.format(arg)\n replace_value=config['HyperParameter'][key][arg][counter]\n if type(replace_value) is str:\n replace_value=\"'\"+replace_value+\"'\"\n change_this_line=re.sub(pattern,\"{}= {},\".format(arg,replace_value),change_this_line)\n return change_this_line", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n line = '{name} = {value}\\n'.format(\r\n name=name,\r\n value=value,\r\n )\r\n yield line", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def get_id2line(self):\n id2line = {}\n id_index = 0\n text_index = 4\n with open(self.movie_lines_filepath, 'r', encoding='iso-8859-1') as f:\n for line in f:\n items = line.split(self.DELIM)\n if len(items) == 5:\n line_id = items[id_index]\n dialog_text = items[text_index].strip()\n dialog_text = clean_text(dialog_text)\n id2line[line_id] = dialog_text\n return id2line", "def preprocess(\n self, data: List[Dict[str, Any]]\n ) -> Generator[Dict[str, Any], None, None]:\n raise NotImplementedError", "def prepare(self):\n\n for i in range(len(self.__corpora)):\n string = self.__corpora[i]\n string = sub(r'[\\n\\t]| {2,}', ' ', string.lower())\n string = sub(r'[^{0}]'.format(self.alphabet + ' '), '', string)\n\n if self.lang == 'uk':\n string = sub(r'[ґ]', 'г', string)\n\n elif self.lang == 'ru':\n string = sub(r'[ё]', 'е', string)\n\n self.__corpora[i] = string", "def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n row = self._alchemy_extractor.extract()\n while row:\n yield row\n row = self._alchemy_extractor.extract()", "def __fill_lip_lines(self, outer, inner):\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n count = len(inner[0]) - 1\n last_inner = [inner[0][count], inner[1][count]]\n for o_point, i_point in itertools.zip_longest(\n outer_curve, inner_curve, fillvalue=last_inner\n ):\n line = scipy.interpolate.interp1d(\n [o_point[0], i_point[0]], [o_point[1], i_point[1]], 'linear')\n xpoints = list(np.arange(o_point[0], i_point[0], 1))\n self.lip_x.extend(xpoints)\n self.lip_y.extend([int(point) for point in line(xpoints)])\n return", "def parse(self, lines):\n # Keep count of the current line number.\n i = 0\n # list tables and content\n tables = dict()\n attr_param = list()\n\n skipped_lines = list() # DEBUG\n\n # Loop through all lines.\n for i in range(0, len(lines)):\n line_stripped = lineNormalise(lines[i])\n skip = True\n\n for keyword in self.target_keywords:\n\n # Look for keywords at the beginning of the line.\n if line_stripped.startswith(keyword):\n # print(\"{} : {}\".format(i, line_stripped)) # DEBUG\n skip = False\n\n # Found one, do parse\n expression = re.search(r'(\\w+) (\\w+)', line_stripped)\n if keyword is self.target_keywords[0]: # class/table\n # get table name\n table_name = expression.group(2)\n\n # add it in tables if not already in\n # tables (classes) may be at differant place in a PlantUML file\n if table_name not in tables:\n tables[table_name] = list()\n # print(\"Table : «{}» ajoutee\".format(expression.group(2))) # DEBUG\n print(\"{} : +table «{}»\".format(i, table_name)) # DEBUG\n\n elif keyword is self.target_keywords[1]: # primary key\n # import pdb; pdb.set_trace()\n # get related table\n attr_param = (re.sub(r'(pyk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n elif keyword is self.target_keywords[2]: # foreign key\n # get related table\n attr_param = (re.sub(r'(fnk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n\n elif keyword is self. target_keywords[3]: # primary foreign key\n # get related table\n attr_param = (re.sub(r'(pfk\\()|\\)|,|\\n', r' ', line_stripped).strip().split())\n tables[table_name].extend(attr_param)\n print(\"{} :\\t«{}» +{}\".format(i, table_name, attr_param)) # DEBUG\n\n else: # attribute\n # print(line_stripped) # DEBUG\n print(\"{} : \\t«{}» Attribute? {}\".format(i, line_stripped)) # DEBUG\n\n if skip:\n skipped_lines.append(i)\n\n print(\"\\nNumbers of tables : {}\\n\".format(len(tables)))\n pp = pprint.PrettyPrinter(indent=4, compact=True)\n print(\"Scraped data:\")\n pp.pprint(tables) # DEBUG\n print(\"\\nSkipped lines: {}\\n\".format(skipped_lines)) # DEBUG", "def repair_lines(self) -> None:\n if len(self.lines) == 0:\n self.create_lines()\n else:\n for line in self.lines:\n connection = line.connection\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location\n + end_component.pin_locations[connection.end_pin]\n )\n\n # If the line can be straight we do that\n if (\n start_pin_location.x == end_pin_location.x\n or start_pin_location.y == end_pin_location.y\n ):\n line.locations = [start_pin_location, end_pin_location]\n\n if not (\n start_pin_location == line.locations[0]\n and end_pin_location == line.locations[-1]\n ):\n # Change locations of lines when components move\n if len(line.locations) < 4:\n # Add a bend if the line was previously straight\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n line.locations = [start_pin_location, *bends, end_pin_location]\n else:\n # Otherwise, just change the y of the existing points to match\n line.locations[0] = start_pin_location\n line.locations[1].y = start_pin_location.y\n line.locations[-2].y = end_pin_location.y\n line.locations[-1] = end_pin_location", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def _preprocess(self):\n for f in self._variables:\n self._path.joinpath(f).mkdir(parents=True, exist_ok=True)\n\n for i in tqdm(range(self._size)):\n linear, w = self._get_spectrograms(i)\n self._store_entry(i, linear, w)", "def genLoopPackets(self):\n\n for p in self.get_observations():\n ts = int(time.time() + 0.5)\n packet = pywws2weewx(p, ts,\n self._last_rain_loop, self._last_rain_ts_loop,\n self.max_rain_rate)\n self._last_rain_loop = packet['rainTotal']\n self._last_rain_ts_loop = ts\n if packet['status'] != self._last_status:\n log.info('station status %s (%s)' % \n (decode_status(packet['status']), packet['status']))\n self._last_status = packet['status']\n yield packet", "def task1_mapper():\n line_count = 0\n for line in sys.stdin:\n # Clean input and split it\n lines = line.strip().split(\",\")\n line_count += 1\n # Check that the line is of the correct format and filtering the HEADER record \n # If line is malformed, we ignore the line and continue to the next line\n if line_count == 1:\n continue\n else:\n if len(lines) != 12:\n continue\n \n category = lines[3].strip()\n videoid = lines[0].strip()\n country = lines[11].strip()\n k_key = category+','+videoid\n\n print(\"{}\\t{}\".format(k_key, country))", "def prepare_typerec_sample(self, line, for_training=True):\n sample = {}\n\n sample['text'] = line['text']\n #sample['text_tokenized'] = None # set by add_tokens()\n #sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['item_name']\n #self.add_tokens(sample)\n #sample['text_mention_mask'] = None # set by add_mention_mask()\n #self.add_mention_mask(sample)\n sample['text_and_mention_tokenized'] = None # set by add_text_and_mention()\n sample['text_and_mention_mask'] = None # set by add_text_and_mention()\n self.add_text_and_mention(sample)\n\n if for_training:\n sample['item_id'] = line['item_id']\n sample['item_type'] = line['item_type']\n #sample['item_type_index'] = None # set by add_type_index()\n #self.add_type_index(sample)\n sample['item_type_onehot'] = None # set by add_type_onehot()\n self.add_type_onehot(sample)\n\n return sample", "def replace(lines):\n for index, line in enumerate(lines):\n if not line == '\\n':\n token_line = tokenizer.tokenize_line(line)\n for ind, tok in enumerate(token_line):\n if token_line[ind][1] in replacement_dic.keys() and token_line[ind][1] not in ignore_variable:\n if ind > 1 and token_line[ind-2][1] in import_list:\n continue\n if token_line[ind][0] == token.NAME and token_line[ind+1][1] == '(':\n continue\n token_line[ind][1] = replacement_dic.get(token_line[ind][1])\n\n lines[index] = tokenizer.untokenize_line(token_line)\n return lines", "def _process_line(line, status):\n\n if line.startswith(ADAPTER_LINE_STARTSWITH):\n status.add_block('adapter', 'name', line)\n return\n elif line.startswith(EXIT_LINE_STARTSWITH):\n status.consolidate()\n return\n\n key, value = [el.strip(' \\t\\r\\n') for el in line.split(':', 1)]\n\n if key in KEY_TO_CONTEXT.keys():\n status.add_block(KEY_TO_CONTEXT[key], key, value)\n else:\n status.set_property(key, value)", "def _split_by_keypair(self, osw_dict={}): \n lst = osw_dict\n keypair_dict = []\n for d in lst:\n if d['key'] == 'raw_line':\n keypair_lst = re.split(r',',d['value'])\n \n for k,v in keypair_lst:\n _d = [{'timestamp':d['timestamp'] , \n 'category': d['category'], \n 'sub_category': d['sub_category'], \n 'key': k, \n 'value': v}]\n keypair_dict.extend(_d)\n \n return keypair_dict", "def insert_lines(self, lines, color=defcolor):\n for i in range(len(lines)):\n self.insert_line(lines[i], 0, i*(self.font['height']+1), color)", "def collate_batch(self) -> Dict[str, Any]:\n pass", "def pre_process_data(linelist):\r\n for index in range(len(linelist)):\r\n if not linelist[index]:\r\n linelist[index] = '0'\r\n return linelist", "def _prepare_analytic_line(self, cr, uid, obj_line, context=None):\n return {'name': obj_line.name,\n 'date': obj_line.date,\n 'account_id': obj_line.analytic_account_id.id,\n 'unit_amount': obj_line.quantity,\n 'product_id': obj_line.product_id and obj_line.product_id.id or False,\n 'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,\n 'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),\n 'general_account_id': obj_line.account_id.id,\n 'journal_id': obj_line.journal_id.analytic_journal_id.id,\n 'ref': obj_line.ref,\n 'move_id': obj_line.id,\n 'user_id': uid,\n }", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def dynamic_loop(loop_dict, cur_loop, loop_tmp, loop_result):\n max_loop_num = len(loop_dict) - 1\n for num in list(loop_dict.values())[cur_loop]:\n loop_tmp.append(num)\n if cur_loop == max_loop_num:\n loop_result.append([*loop_tmp])\n else:\n dynamic_loop(loop_dict, cur_loop+1, loop_tmp, loop_result)\n loop_tmp.pop()\n return loop_result", "def process(self):\n parser = csv.reader(self.reader,delimiter=self.delimiter_DIC[self.delim])\n firstRec = True\n for fields in parser:\n if firstRec:\n fieldNames = fields\n firstRec = False\n else:\n self.dicts.append({})\n for i,f in enumerate(fields):\n try:\n self.dicts[-1][fieldNames[i]] = f\n except:\n import pdb\n pdb.set_trace()\n if self.eng is \"spectrumMill\":\n for i,row in enumerate(self.dicts):\n fileSM = row[self.engine[self.eng][0]]\n acNoSM = row[self.engine[self.eng][1]]\n masSM=row[self.engine[self.eng][2]]\n chrgeSM=row[self.engine[self.eng][3]]\n preAmSM=row[self.engine[self.eng][4]].replace('(','').replace(')','')\n pepSM=row[self.engine[self.eng][5]]\n nAmSM=row[self.engine[self.eng][6]].replace('(','').replace(')','')\n modSM=row[self.engine[self.eng][7]].split('\\s')+row[self.engine[self.eng][8]].split('\\s')\n modLis = [mod.strip() for mod in modSM if mod!=' ']\n modSM = ';'.join(modLis)\n scoreSM=row[self.engine[self.eng][9]]\n descrimentSM=row[self.engine[self.eng][10]]\n if modSM !='':\n modPepInHupaFormat=self.modTermDic.spectrumMill(preAmSM,pepSM,nAmSM,modSM,self.eng)\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+modPepInHupaFormat+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+preAmSM+'.'+pepSM+'.'+nAmSM+'\\t'+'-'+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"mascot\":\n \"\"\"\n In Mascot, under every gi (protein) corresponding peptide information will be there\n \"\"\"\n giFound=True\n for i,row in enumerate(self.dicts):\n if row[self.engine[self.eng][0]]!='':\n giAsKey = row[self.engine[self.eng][0]]\n giFound=False\n if giFound==False:\n massM=row[self.engine[self.eng][1]]\n chargeM=row[self.engine[self.eng][2]]\n preAmM=row[self.engine[self.eng][3]]\n pepM=row[self.engine[self.eng][4]]\n nAmM = row[self.engine[self.eng][5]]\n modM=row[self.engine[self.eng][6]]\n modSiteM=row[self.engine[self.eng][7]]\n scoreM=row[self.engine[self.eng][8]]\n evalM=row[self.engine[self.eng][9]]\n if modM !='':\n \"\"\"\n modificationFormat from modification.py creates a MASTER_UNIMOD dictionary \n Where all modifications of unimod would be available. \n At same time formatMod function in modificationFormat class would convert modification format \"\"\"\n modPepInHupaFormat=self.modTermDic.mascot(preAmM,pepM,nAmM,modSiteM,modM,self.eng)\n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+modPepInHupaFormat+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n \n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+preAmM+'.'+pepM+'.'+nAmM+'\\t'+'-'+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"inspect\":\n \"\"\"\n InSpect does not have mass information in TSV file\n So we need to fetch it from spectrum file (this is not yet done)\n \"\"\"\n for i,row in enumerate(self.dicts):\n data = row[self.engine[self.eng][0]]+'\\t'+row[self.engine[self.eng][1]]+'\\t'+row[self.engine[self.eng][2]]+'\\t'+row[self.engine[self.eng][3]]+'\\t'+row[self.engine[self.eng][4]]+'\\t'+row[self.engine[self.eng][5]]+'\\n'\n #return data\n #data = self.mapCaller(data)\n #self.writer.write(data)\n\n if self.eng is \"omssa\":\n #OMSSA csv doesnot contain start and last residue of the peptide, instead contains position. So start and last residue need to fetch from protein sequence\n for i,row in enumerate(self.dicts):\n giO = row[self.engine[self.eng][6]]\n massO= row[self.engine[self.eng][5]]\n chargeO=row[self.engine[self.eng][8]]\n preAmO = row[self.engine[self.eng][1]]#position in protein\n pepO = row[self.engine[self.eng][2]]\n nextAmO = row[self.engine[self.eng][3]] #position in protein\n scoreO= row[self.engine[self.eng][4]]\n modO=row[self.engine[self.eng][7]]\n if modO !='':\n #parsedData=giO+'#'+massO+'#'+chargeO+'#'+preAmO+'.'+pepO+'.'+nextAmO+'#'+modO+'#'+scoreO\n modPepInHupaFormat=self.modTermDic.omssa(preAmO,pepO,nextAmO,modO,self.eng)\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+modPepInHupaFormat+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)\n else:\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+preAmO+'.'+pepO+'.'+nextAmO+'\\t'+'-'+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)", "def _iterate_lines(cls, text) -> typing.Generator[str, None, None]:\n for line in text.split('\\n'):\n yield line, line.lstrip().startswith(cls._CHECKBOX)", "def code_input(dict_, i):\n for key in dict_:\n dict_[key] = i + dict_[key]\n return dict_" ]
[ "0.62352186", "0.5535244", "0.540953", "0.538531", "0.53788745", "0.5370846", "0.53634304", "0.5115916", "0.510933", "0.50691956", "0.5020476", "0.5014774", "0.50135577", "0.50128126", "0.5011278", "0.5003016", "0.49997228", "0.49952468", "0.49902463", "0.49880245", "0.49821338", "0.49678752", "0.4942249", "0.493176", "0.49271667", "0.4912079", "0.49110472", "0.49006313", "0.48940745", "0.48726735", "0.4859783", "0.48503965", "0.48493028", "0.48420706", "0.48391375", "0.48275712", "0.48268968", "0.48100066", "0.48085713", "0.48081422", "0.4805739", "0.48027074", "0.47940847", "0.47902605", "0.47747433", "0.47692537", "0.4765568", "0.4759024", "0.47537756", "0.47522584", "0.47500113", "0.47469053", "0.47382826", "0.47288793", "0.472311", "0.47226742", "0.4722418", "0.47202533", "0.4716946", "0.47159928", "0.4713898", "0.46922985", "0.46845403", "0.4679855", "0.46797508", "0.46726245", "0.46706736", "0.46666366", "0.46664524", "0.4659358", "0.46561256", "0.46548653", "0.46510002", "0.46498612", "0.4646639", "0.46406955", "0.46396023", "0.4633603", "0.46280223", "0.46187645", "0.46132192", "0.4608189", "0.4603608", "0.45970538", "0.45869657", "0.45867544", "0.45838037", "0.45835653", "0.45810688", "0.45777783", "0.45749372", "0.4573967", "0.45714584", "0.45699087", "0.45669797", "0.45648247", "0.45648098", "0.45645612", "0.45614603", "0.4561317" ]
0.7311217
0
Prepare looped datasets from looping lines.
def prepare_looped_datasets(self, alldict, allcombs): datasets_dict=dict() numcombs = len(allcombs) combct = 0 while combct < numcombs: newdata = list(self.baseinput.data) loopedlines = dict() loopedlines = self.prepare_looped_lines(alldict, allcombs[combct]) for lvalidx in loopedlines.keys(): newdata[lvalidx] = loopedlines[lvalidx] datasets_dict[combct] = newdata combct = combct + 1 return datasets_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _prepare_sets(self):\n\n ds_images, ds_labels = self._load_images_labels()\n\n ds_images_2 = ds_images.take(self.val_count)\n ds_labels_2 = ds_labels.take(self.val_count)\n ds_images_1 = ds_images.skip(self.val_count)\n ds_labels_1 = ds_labels.skip(self.val_count)\n\n ds_1 = (ds_images_1, ds_labels_1)\n ds_2 = (ds_images_2, ds_labels_2)\n\n return ds_1, ds_2", "def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def dataset_read(self):\n # while self.running:\n # grab current data_list and own it locally per cycle\n # to avoid mid-parse changes\n self.local_data_list = self.data_list\n\n # set a random duration for reading from random line\n # before choosing another from current set\n dataset_read_dur = (random.randrange(3000, 13000) / 1000) * self.glob_speed\n\n # prepare start line to read\n starting_line = self.line_to_read()\n\n # sorts out durations\n if self.debug_choose:\n print('B1 dataset line read duration = ', dataset_read_dur)\n end_time = self.end_time_calc(dataset_read_dur)\n\n # determine if read is to be looped or sequential\n looped = self.is_loop()\n\n while time.time() < end_time:\n # calc baudrate and cycle clock for speed of line read\n baudrate = self.baudrate()\n\n # if looped\n if looped > 0:\n loop_end = time.time() + looped\n\n # reset the start read point\n line_to_read = starting_line\n\n # for each loop\n while time.time() < loop_end:\n active_line = self.local_data_list[line_to_read]\n self.parse_active_line(active_line)\n line_to_read += 1\n if self.debug_read:\n print(f'******** line to read LOOPING {line_to_read}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)\n else:\n # if no loop\n active_line = self.local_data_list[starting_line]\n self.parse_active_line(active_line)\n starting_line += 1\n if self.debug_read:\n print(f'******** line to read NO LOOP {starting_line}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def prepare_dataset(fpath):\n raise NotImplementedError", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def prepare_typerec_dataset(self, data_raw):\n\n self._logger.info(f'Preparing Wikidata-TypeRec dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n\n try:\n sample = self.prepare_typerec_sample(line)\n data.append(sample)\n sample_count += 1\n except Exception as e:\n self._logger.info(str(e))\n sample_count_failed += 1\n\n self._logger.info(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def recreate_and_prepare_datasets_for_training(self, datasets: List[str], width: int, height: int,\n use_fixed_canvas: bool,\n stroke_thicknesses_for_generated_symbols: List[int],\n staff_line_spacing: int,\n staff_line_vertical_offsets: List[int],\n random_position_on_canvas: bool) -> None:\n self.__delete_dataset_directory()\n self.__download_and_extract_datasets(datasets, width, height, use_fixed_canvas, staff_line_spacing,\n staff_line_vertical_offsets, stroke_thicknesses_for_generated_symbols,\n random_position_on_canvas)", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def _create_examples(self, lines, set_type):\n # Parallelizing a bit batch computation because it is quite slow...\n #lines = lines[:500]\n step = 18 # 17 sentences per input sequence\n #encoded_dict = self.tokenizer.encode('[CLS] ' + ' [SEP] [CLS] '.join(lines) + ' [SEP]')\n #tokens = np.array(encoded_dict.tokens)\n #ids = np.array(encoded_dict.ids)\n \n n = len(lines)\n \n def f(i, sequence):\n guid = \"%s-%s\" % (set_type, i)\n text_a = self.pad_to_max_length([2] + self.mask_tokens(sequence) + [3])\n text_b = [0 if item==0 else 1 for item in text_a]\n label = self.pad_to_max_length([2] + sequence + [3])\n label = [label[i] if item==4 else -100 for i, item in enumerate(text_a)] # for loss computation, only taking into account MASK tokens with id==4\n example = InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label)\n return example\n \n def g(i, line):\n sequence = self.tokenizer.encode(' '.join(line)).ids\n return f(i, sequence)\n \n # Splitting data for memory issues...\n indexes = list(range(0, n, step))\n m = len(indexes)\n n_splits = self.n_splits\n splits = [indexes[i*m//n_splits: m*(i+1)//n_splits] for i in range(n_splits)]\n for index_split, split in enumerate(splits):\n print(f\"Computing split {index_split+1} / {n_splits}... Split size: {len(split)}\")\n examples = Parallel(n_jobs=-1)(delayed(g)(index+split[0], lines[i:i + step]) for index, i in tqdm(enumerate(split)))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl'), examples)\n # Merging\n #examples = [self.load_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl')) for index_split in range(n_splits)]\n #examples = [item for l in examples for item in l]\n #self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples.pkl'), examples)\n \n examples_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl') for index_split in range(n_splits)]\n \n return examples_paths", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def _build_datasets_sis3302(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3302\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3302\")\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3302 ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 7.7241166e-5\n dheader[\"Offset\"] = -2.531\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)", "def _create_projection_datasets(self):\n # First grab the spectroscopic indices and values and position indices\n self._sho_spec_inds = self.h5_main.h5_spec_inds\n self._sho_spec_vals = self.h5_main.h5_spec_vals\n self._sho_pos_inds = self.h5_main.h5_pos_inds\n\n fit_dim_ind = self.h5_main.spec_dim_labels.index(self._fit_dim_name)\n\n self._fit_spec_index = fit_dim_ind\n self._fit_offset_index = 1 + fit_dim_ind\n\n # Calculate the number of loops per position\n cycle_start_inds = np.argwhere(self._sho_spec_inds[fit_dim_ind, :] == 0).flatten()\n tot_cycles = cycle_start_inds.size\n\n # Make the results group\n self._h5_group = create_results_group(self.h5_main, 'Loop_Fit')\n write_simple_attrs(self._h5_group, {'projection_method': 'pycroscopy BE loop model'})\n\n # Write datasets\n self.h5_projected_loops = create_empty_dataset(self.h5_main, np.float32, 'Projected_Loops',\n h5_group=self._h5_group)\n\n h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_spec_dsets(self._h5_group, self._sho_spec_inds,\n self._sho_spec_vals, self._fit_dim_name,\n basename='Loop_Metrics')\n\n self.h5_loop_metrics = write_main_dataset(self._h5_group, (self.h5_main.shape[0], tot_cycles), 'Loop_Metrics',\n 'Metrics', 'compound', None, None, dtype=loop_metrics32,\n h5_pos_inds=self.h5_main.h5_pos_inds,\n h5_pos_vals=self.h5_main.h5_pos_vals,\n h5_spec_inds=h5_loop_met_spec_inds,\n h5_spec_vals=h5_loop_met_spec_vals)\n\n # Copy region reference:\n copy_region_refs(self.h5_main, self.h5_projected_loops)\n copy_region_refs(self.h5_main, self.h5_loop_metrics)\n\n self.h5_main.file.flush()\n self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds\n\n return", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def prepare_dataset(self, xFold_step, xFold_type):\n\n eval_samples_per_xfold = int(round((self.__train_size + self.__eval_size)/xFold_type))\n\n start_index = int(xFold_step*eval_samples_per_xfold)\n end_index = int(start_index + eval_samples_per_xfold)\n\n if end_index < len(self.__read_in_labels[-self.__test_size:]):\n end_index = len(self.__read_in_labels[-self.__test_size:])\n\n dataset = {\n \"x_train\": np.concatenate((self.__read_in_images[:start_index], self.__read_in_images[end_index:]), axis=0),\n \"y_train\": np.concatenate((self.__read_in_labels[:start_index], self.__read_in_labels[end_index:]), axis=0),\n\n \"x_eval\": self.__read_in_images[start_index:end_index],\n \"y_eval\": self.__read_in_labels[start_index:end_index],\n\n \"x_test\": self.__read_in_images[-self.__test_size:],\n \"y_test\": self.__read_in_labels[-self.__test_size:],\n }\n\n return dataset", "def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:\n ds_l = []\n assert \"train\" in dataset.segments\n for key in [\"train\", \"valid\"]:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=[\"feature\", \"label\"], data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\"Empty data from dataset, please check your dataset config.\")\n x, y = df[\"feature\"], df[\"label\"]\n\n # Lightgbm need 1D array as its label\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\"LightGBM doesn't support multi-label training\")\n\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError(\"Unsupported reweighter type.\")\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def refresh_train_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.train_items, self.option.max_path_length)\n self.train_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def prepare_dataset(self, stock):\n dataset = pd.DataFrame(yf.download(stock))\n dataset_train = dataset[0:len(dataset)-30] # Training dataset will be the whole dataset minus the test dataset\n training_set = dataset_train.iloc[:, 1:2].values # Use the open values only\n\n # Feature Scaling\n sc = MinMaxScaler(feature_range=(0, 1)) # Scales all values so that they are in the range (0, 1)\n training_set_scaled = sc.fit_transform(training_set)\n\n # Create an input data structure with a timestep\n x_train = []\n y_train = []\n for i in range(self.timestep, len(dataset)-30):\n x_train.append(training_set_scaled[i - self.timestep:i, 0])\n y_train.append(training_set_scaled[i, 0])\n x_train, y_train = np.array(x_train), np.array(y_train)\n\n # Reshaping\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n return x_train, y_train", "def _create_examples_split(self, lines, set_type):\n examples = []\n \n for (i, line) in enumerate(lines):\n a_label = int(line[\"label\"])\n q_type = line[\"type\"]\n if a_label == 0 and q_type != \"qLookup\":\n #print(\"discontinue\")\n continue\n sentence_number = 0\n premise_text = line[\"premise\"]\n the_id = int(line[\"id\"])\n modified_premise_text = re.sub(self.stage_name_pattern,\"\",premise_text)\n modified_premise_text = re.sub(self.w_patterns,\"\",modified_premise_text)\n hypothesis_text = line[\"hypothesis\"]\n hypothesis_text = re.sub(self.w_patterns,\"\",hypothesis_text)\n \n\n sentences = modified_premise_text.split('.')\n\n for j, sentence in enumerate(sentences):\n guid = \"\" + str(sentence_number) + \"\\t\" + str(i) + \"\\t\" + str(len(sentences)) + \"\\t\" + str(a_label)\n text_a = sentence\n text_b = hypothesis_text\n label = a_label\n sentence_number += 1\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n #print(\"16th sentence::\",sentences[16])\n\n return examples", "def _build_datasets_sis3305(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3305\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3305\")\n if 1 <= ch <= 4:\n fpga_str = \"FPGA 1\"\n else:\n fpga_str = \"FPGA 2\"\n ch = ch - 4\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3305 {fpga_str} ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 0.0019550342\n dheader[\"Offset\"] = -1.0\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)", "def _reset_dimensional_data(self, dataset):\n # local reference to input data\n raw = dataset.get_source_data('prep')\n\n nfids = raw.shape[-2]\n \n nfids = int(nfids/self.set.fids_to_average)\n \n data_shape = list(raw.shape)\n data_shape[-2] = nfids\n\n self.frequency_shift = np.zeros([nfids])\n self.phase_0 = np.zeros([nfids])\n self.measure_time = np.arange(nfids)\n\n self.data = np.zeros(data_shape, dtype=raw.dtype)\n if self.chain is not None:\n self.chain.reset_results_arrays()", "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])", "def collect_data(self):\n self.lines = []\n\n while True:\n self._process_serial_data()", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def build_training_data():\r\n for i in range(len(FILE_NAMES)):\r\n input_text = read_file(FILE_NAMES[i])\r\n list_of_word_lines = limiting_sentence_length(input_text)\r\n data = create_training_data_file(list_of_word_lines, LANGUAGE[i])\r\n write_training_data(data, LANGUAGE[i])\r\n merge_training_data()", "def prepare_data(self, data_blocks, y_blocks=None, restart=False):\n\n begin = True\n fake_y = False\n # prepare list of blocks\n if type(data_blocks) is not list:\n data_blocks = [data_blocks]\n if not y_blocks:\n fake_y = True\n y_blocks = [np.zeros(len(block)) for block in data_blocks]\n elif type(y_blocks) is not list:\n y_blocks = [y_blocks]\n\n if self.preprocessors is not None:\n nrows = 0\n if type(self.preprocessors) is not list:\n self.preprocessors = [self.preprocessors]\n if len(self.preprocessors) != len(data_blocks):\n self.logger.error(\n 'You need same size preprocessors for your datasets.')\n sys.exit()\n\n for pc, block, y in zip(self.preprocessors, data_blocks, y_blocks):\n if len(block) == 0:\n # empty data block\n pc._FEATURE_NAMES = []\n pc._FEATURE_SIZE = 0\n pc._SAMPLE_SIZE = 0\n continue\n if begin:\n output_x, output_y = pc.run(block, y, restart=restart)\n nrows = output_x.shape[0]\n begin = False\n else:\n cur_output_x, cur_output_y = pc.run(\n block, y, restart=restart)\n if cur_output_x.shape[0] != nrows:\n self.logger.error(\n 'Preprocessor {:s} does not align with previous data block dimensions'.format(pc.__name__))\n sys.exit(0)\n else:\n output_x = np.c_[output_x, cur_output_x]\n output_y = np.c_[output_y, cur_output_y]\n else:\n self.logger.warn(\n 'No preprocessor is found in this classifier, data blocks are directly concatenated.')\n output_x = data_blocks[0]\n output_y = y_blocks[0]\n\n for block, y in zip(data_blocks[1:], y_blocks[1:]):\n output_x = np.c_[output_x, block]\n output_y = np.c_[output_y, y]\n\n if fake_y:\n return output_x, None\n else:\n return output_x, output_y", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def PrepareDataset(data, \\\n BATCH_SIZE = 64, \\\n seq_len = seq_len_, \\\n pred_len = pred_len_, \\\n train_propotion = 0.7, \\\n valid_propotion = 0.15, \\\n masking = True, \\\n mask_ones_proportion = 0.8):\n time_len = data.shape[0]\n #speed_matrix = speed_matrix.clip(0, 100) #limit the values to 0-100\n \n max_data = data.max().max()\n #speed_matrix = speed_matrix / max_speed\n \n data_sequences, data_labels, data_pats = [], [], []\n for p in data['patient_id'].unique():\n pat_len = len(data[data['patient_id']==p])\n if (pat_len>(seq_len+pred_len)):\n #for i in range(time_len - seq_len - pred_len):\n for i in range(pat_len - seq_len - pred_len):\n data_sequences.append(data.drop(['SepsisLabel'], axis=1)[data['patient_id']==p].iloc[i:i+seq_len].values)\n #data_labels.append(data['SepsisLabel'][data['pat_id']==p].iloc[i+seq_len:i+seq_len+pred_len].values)\n data_labels.append(data['SepsisLabel'][data['patient_id']==p].iloc[i+seq_len+pred_len:i+seq_len+pred_len+1].values)\n data_pats.append(p)\n \n #print(i)\n data_sequences, data_labels, data_pats = np.asarray(data_sequences), np.asarray(data_labels), np.asarray(data_pats)\n #print(data_sequences.shape)\n #(951, 48, 42)\n if masking:\n print('Split Speed finished. Start to generate Mask, Delta, Last_observed_X ...')\n np.random.seed(1024)\n #Mask = np.random.choice([0,1], size=(data_sequences.shape), p = [1 - mask_ones_proportion, mask_ones_proportion])\n #speed_sequences = np.multiply(speed_sequences, Mask)\n Mask = data_sequences\n if opt.mask:\n Mask[Mask!=0]=1\n else:\n Mask[Mask!=0]=0\n \n # temporal information\n interval = 1 # 5 minutes\n S = np.zeros_like(data_sequences) # time stamps\n for i in range(S.shape[1]):\n S[:,i,:] = interval * i\n \n #print(S)\n Delta = np.zeros_like(data_sequences) # time intervals\n for i in range(1, S.shape[1]):\n Delta[:,i,:] = S[:,i,:] - S[:,i-1,:]\n\n missing_index = np.where(Mask == 0)\n\n X_last_obsv = np.copy(data_sequences)\n for idx in range(missing_index[0].shape[0]):\n i = missing_index[0][idx] \n j = missing_index[1][idx]\n k = missing_index[2][idx]\n if j != 0 and j != (seq_len-1):\n Delta[i,j+1,k] = Delta[i,j+1,k] + Delta[i,j,k]\n if j != 0:\n X_last_obsv[i,j,k] = X_last_obsv[i,j-1,k] # last observation\n \n #this should be column wise\n Delta = Delta / Delta.max() # normalize\n \n # shuffle and split the dataset to training and testing datasets\n print('Generate Mask, Delta, Last_observed_X finished. Start to shuffle and split dataset ...')\n sample_size = data_sequences.shape[0]\n index = np.arange(sample_size, dtype = int)\n np.random.seed(1024)\n np.random.shuffle(index)\n \n #patients = data['pat_id'].unique():\n #pat_sample_size=len(patients)\n #first split patients\n #train_pat_index = int(np.floor(pat_sample_size * train_propotion))\n #valid_pat_index = int(np.floor(pat_sample_size * ( train_propotion + valid_propotion)))\n \n #patients[:train_pat_index]\n #patients[train_pat_index:valid_pat_index]\n #patients[valid_pat_index:]\n \n #train_index=[]\n #for p in patients[:train_pat_index]:\n #item= np.where(data_pats==p)\n #train_index.append(item)\n \n #valid_index=[]\n #for p in patients[train_pat_index:valid_pat_index]:\n #item= np.where(data_pats==p)\n #valid_index.append(item)\n \n #test_index=[]\n #for p in patients[valid_pat_index:]:\n #item= np.where(data_pats==p)\n #test_index.append(item)\n \n \n data_sequences = data_sequences[index]\n data_labels = data_labels[index]\n \n\n if masking:\n X_last_obsv = X_last_obsv[index]\n Mask = Mask[index]\n Delta = Delta[index]\n data_sequences = np.expand_dims(data_sequences, axis=1)\n X_last_obsv = np.expand_dims(X_last_obsv, axis=1)\n Mask = np.expand_dims(Mask, axis=1)\n Delta = np.expand_dims(Delta, axis=1)\n dataset_agger = np.concatenate((data_sequences, X_last_obsv, Mask, Delta), axis = 1)\n \n train_index = int(np.floor(sample_size * train_propotion))\n valid_index = int(np.floor(sample_size * ( train_propotion + valid_propotion)))\n \n if masking:\n train_data, train_label = dataset_agger[:train_index], data_labels[:train_index]\n valid_data, valid_label = dataset_agger[train_index:valid_index], data_labels[train_index:valid_index]\n test_data, test_label = dataset_agger[valid_index:], data_labels[valid_index:]\n else:\n train_data, train_label = data_sequences[:train_index], data_labels[:train_index]\n valid_data, valid_label = data_sequences[train_index:valid_index], data_labels[train_index:valid_index]\n test_data, test_label = data_sequences[valid_index:], data_labels[valid_index:]\n \n train_data, train_label = torch.Tensor(train_data), torch.Tensor(train_label)\n valid_data, valid_label = torch.Tensor(valid_data), torch.Tensor(valid_label)\n test_data, test_label = torch.Tensor(test_data), torch.Tensor(test_label)\n \n train_dataset = utils.TensorDataset(train_data, train_label)\n valid_dataset = utils.TensorDataset(valid_data, valid_label)\n test_dataset = utils.TensorDataset(test_data, test_label)\n \n train_dataloader = utils.DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True, drop_last = True)\n valid_dataloader = utils.DataLoader(valid_dataset, batch_size = BATCH_SIZE, shuffle=True, drop_last = True)\n test_dataloader = utils.DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle=True, drop_last = True)\n \n X_mean = np.mean(data_sequences, axis = 0)\n \n print('Finished')\n \n return train_dataloader, valid_dataloader, test_dataloader, max_data, X_mean", "def iterate_dataset_configs(options):\n for dataset in options[consts.DATASET]:\n config = options.copy()\n config[consts.DATASET] = dataset\n yield config", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def prepare_process(self, dataset):\n if dataset is not None:\n pass", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def make_source_dataset(self, current_host_index, num_hosts):\n pass", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def prepareDataDS(mdl, X):\n\t\n\tX2 = []\n\n\n\tYPs = mdl.predict(X)\n\t\n\tfor i in tqdm(range(len(X)),desc='Preparing Data for DS'):\n\t\t\n\t X2.append(np.array(YPs[0][i]))\n\n\tX2 = np.array(X2)\n\n\n\treturn X2", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def build_dataset(self, data_filename):\n\n # Load all data\n print(\"Loading target-context pairs from {}\".format(data_filename))\n self.data = pd.read_csv(data_filename,\n delimiter=self.delimiter,\n dtype='int32',\n header=None,\n engine='python').values\n\n # Force an adjustment to the node indices\n self.data += self.force_offset\n\n n_total = len(self.data)\n self.split_sizes = [int(n_total * split) for split in self.splits]\n self.split_offset = [0] + self.split_sizes[:-1]\n self.data_index = [0] * self.n_splits", "def init_datasets(self, dataset_names, columns):\n for dataset_name in dataset_names:\n hdf5_dataset_name = self.schema.get(dataset_name)\n if hdf5_dataset_name is None:\n warnings.warn(\"Skipping %s (not in schema)\" % dataset_name)\n else:\n self[dataset_name] = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=self.query_start,\n end=self.query_end_plusplus,\n timestep=self.timestep,\n num_columns=len(columns),\n column_names=columns,\n sort_hex=self.sort_hex)", "def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"", "def setup_datavault(self, x_axis, y_axis):\n\n yield self.dv.cd(['', self.name], True)\n\n # datasets for each laser\n self.dataset = yield self.dv.new(self.name + ' ML', [( 't', 'num')], [('GHz', '', 'num')])", "def preprocess(self):\n print(\"processing content images...\")\n for dir_item in self.selectedContent:\n join_path = Path(self.content_image_dir,dir_item.replace('/','_'))\n if join_path.exists():\n print(\"processing %s\"%dir_item,end='\\r')\n images = join_path.glob('*.%s'%(self.subffix))\n for item in images:\n self.content_dataset.append(item)\n else:\n print(\"%s dir does not exist!\"%dir_item,end='\\r')\n label_index = 0\n print(\"processing style images...\")\n for class_item in self.selectedStyle:\n images = Path(self.style_image_dir).glob('%s/*.%s'%(class_item, self.subffix))\n for item in images:\n self.art_dataset.append([item, label_index])\n label_index += 1\n random.seed(self.random_seed)\n random.shuffle(self.content_dataset)\n random.shuffle(self.art_dataset)\n # self.dataset = images\n print('Finished preprocessing the Art Works dataset, total image number: %d...'%len(self.art_dataset))\n print('Finished preprocessing the Content dataset, total image number: %d...'%len(self.content_dataset))", "def readData(self):\n self._setupArrays()\n\n with open(self.filename) as fh:\n datalines = fh.readlines()[self.NLHEAD:]\n\n datalines = self._checkForBlankLines(datalines)\n\n # Set up loop over unbounded indpendent variable\n m = 0 # Unbounded independent variable mark \n while len(datalines) > 0:\n datalines = self._readData1(datalines, m)\n datalines = self._readData2(datalines, m)\n m = m + 1", "def prepare_data_iterators(self, df, text, label):\n mask = np.random.rand(len(df)) < 0.9\n train = df[mask]\n valid = df[~mask]\n self.fields = {'label': self.label_field, 'text': self.text_field}\n train_ds = pdfds.DataFrameDataset(train, self.fields)\n valid_ds = pdfds.DataFrameDataset(valid, self.fields)\n train_iter = BucketIterator(train_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=True, sort=True, sort_within_batch=True)\n valid_iter = BucketIterator(valid_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=True, sort=True, sort_within_batch=True)\n return train_iter, valid_iter", "def create_dataset():\n x_old, y_old = clean_scores_version1()\n\n # delete duplicates\n x_old = np.unique(x_old, axis=0)\n\n file = open('/Users/kira/Desktop/uni/Connect4/agents/agent_supervised_ml/unlabeled2.txt', \"a\")\n\n for row in x_old:\n string = ''\n move_seq = row[row != 0]\n for move in move_seq:\n string = string + str(move)\n for i in range(1, 8):\n file.write(string + str(i) + '\\n')\n\n file.close()", "def concat_dataset(datafiles: list, delimiter=None, max_size=3000):\n dfs = []\n for i, filename in enumerate(datafiles, start=1):\n if allowed_file(filename):\n print(\"--- filename: \" + filename)\n df = pd.read_csv(filename, header=0)\n\n ###### Below is the only part that varies from ruler data prep\n if not 'labels' in df.columns:\n names = [\"UNK\"]*len(df.columns)\n names[-1] = 'label'\n names[-2] = 'text'\n df = pd.read_csv(filename, names=names, header=0)\n df['span_label'] = df['labels'].apply(lambda x: list(map(label_map, x.split(','))))\n ##### end diff\n assert 'text' in df.columns\n # Add field indicating source file\n df[\"file\"] = filename\n\n # TODO remove Modeler dependence on field 'label'\n # for now, pass dummy doc-level labels\n df['label'] = np.random.randint(0,2, size=len(df))\n\n # Remove delimiter chars\n if delimiter is not None:\n df['text'].replace(regex=True, inplace=True, to_replace=delimiter, value=r'')\n dfs.append(df)\n\n df_full = pd.concat(dfs).sample(frac=1, random_state=123)\n\n # split the data into labelled and unlabeled\n # TODO right now, we assume everything is labelled\n #labelled = df_full[mask_labelled(df_full.label)]\n labelled = df_full\n #unlabelled = df_full[~mask_labelled(df_full.label)]\n unlabelled = []\n\n # if all the data provided is labelled, \n # set some aside to use for interaction examples (training set)\n if len(unlabelled) == 0:\n msk = np.random.rand(len(df_full)) < 0.5\n unlabelled = df_full[msk]\n labelled = df_full[~msk]\n\n # Make sure we have enough labelled data\n MIN_LABELLED_AMOUNT = 10\n assert len(labelled) >= MIN_LABELLED_AMOUNT, \\\n \"Not enough labelled data. \\\n (Only {} examples detected)\".format(len(labelled))\n\n labelled = labelled[:min(max_size, len(labelled))].reset_index(drop=True)\n fifth = int(len(labelled)/5)\n labelled.at[:fifth*2, 'split'] = 'dev'\n labelled.at[fifth*2:fifth*3, 'split'] = 'valid'\n labelled.at[fifth*3:, 'split'] = 'test'\n\n unlabelled = unlabelled[:min(max_size, len(unlabelled))]\n unlabelled['split'] = 'train'\n\n # reset index\n df_full = pd.concat([labelled, unlabelled])\n df_full = df_full.reset_index(drop=True)\n return df_full", "def construct_data_iterators(\n self,\n train_ds: DataSetDict,\n test_ds: DataSetDict,\n key: KeyArray,\n mdtype: DType,\n ):\n size_device_prefetch = 2 # Set for GPU\n\n self.train_dt_iter = create_input_iter(\n key,\n train_ds,\n self.local_batch_size,\n size_device_prefetch,\n mdtype,\n train=True,\n )\n self.eval_dt_iter = create_input_iter(\n key, # eval: no permutation\n test_ds,\n self.local_batch_size,\n size_device_prefetch,\n mdtype,\n train=False,\n )\n\n self.ishape = train_ds[\"image\"].shape[1:3]\n self.log(\n \"Channels: %d, training signals: %d, testing\"\n \" signals: %d, signal size: %d\"\n % (\n train_ds[\"label\"].shape[-1],\n train_ds[\"label\"].shape[0],\n test_ds[\"label\"].shape[0],\n train_ds[\"label\"].shape[1],\n )\n )", "def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer", "def _loopPreparation(self, stimNumber):\n self.nbFrames=10000 #TO DO --> better place for this line of code\n\n self.stimName= self.experimentName+'_S%(number)03d' % {\"number\": stimNumber} #%02d return a 2 char string : 1-->01\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.stimName,\n self.nbFrames,\n self.maxFrames)\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n self.arduinoSync()", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def prepare_data(qids_raw, conditions_raw, outputs_raw):\n\n qids = []\n conditions = []\n outputs = []\n dictionaries_standardization = []\n for qid_raw, condition_raw, output_raw in zip(qids_raw, conditions_raw, outputs_raw):\n qid, condition, output, dictionary = preprocess_sample(qid_raw, condition_raw, output_raw)\n qids.append(qid)\n conditions.append(condition)\n outputs.append(output)\n dictionaries_standardization.append(dictionary)\n\n return qids, conditions, outputs, dictionaries_standardization", "def prepare_epoch(dataset):\n print(\"[-] Epoch Start\")\n\n i = 0\n for sample in range(len(dataset)):\n if sample <= i + BATCH_SIZE-1:\n continue\n\n batch = []\n for i in range(i, i+BATCH_SIZE):\n batch.append(get_image(dataset[i], OUT_SIZE, CHANNELS))\n\n i += BATCH_SIZE + 1\n\n batch_images = np.array(batch).astype(np.float32)\n yield (batch_images, batch_images)\n print(\"i: {}, s: {}\".format(i, sample))\n\n print(\"[+] Epoch complete\")", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def main():\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_row'+str(row_start).zfill(3), Imin=12, Imax=136)\n\n Marcov_Chain_MLE(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '', 160.0, [90.0, 70.0, 50.0, 30.0], 0.0, 0.5)\n plt.show()\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_all')\n\n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAsource_VBdrain', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaS-VbD_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, forward' , 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaD-VbS_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, reversed', 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n\n #hist_IDS_VGS(0, 14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Hist-IDSAT_MLC-rv1-01020304_reverse-read_', range(0, 128), 'MLC programming {2ms, 10ms, 40ms, 200ms} pulses, VGS=1.8, VDS=2.0 for level=1-2-3-4\\nhistogram of read-IDSAT (VGS=VDS=0.8V)', 0, 150, 0, 150, 1000)\n #\n #t_label = []\n #for t in np.arange(0, 0.002*(71) + 0.0001, 0.002):\n # t_label.append(str(t))\n #\n ##MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [21], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row-21', Imin=82, Imax=142)\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row_'+str(row_start).zfill(3), Imin=80, Imax=142)\n\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01', Imin=80, Imax=142)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle0102', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle010203', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01020304', 10, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle0102', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle010203', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 40, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01020304', 10, 125, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n # (L, Nfin, VT_flavor, Nrow, Imax)\n col_list = [(36, 1, 'ULVT', 32 , 60 ), (36, 1, 'LVT', 32 , 50 ), (36, 1, 'SVT', 32 , 45 ),\n (36, 1, 'ULVT', 128, 60 ), (36, 1, 'LVT', 128, 50 ), (36, 1, 'SVT', 128, 45 ),\n (20, 1, 'ULVT', 32 , 75 ), (20, 1, 'LVT', 32 , 60 ), (20, 1, 'SVT', 32 , 50 ),\n (20, 1, 'ULVT', 128, 75 ), (20, 1, 'LVT', 128, 60 ), (20, 1, 'SVT', 128, 50 ),\n (16, 1, 'ULVT', 32 , 80 ), (16, 1, 'LVT', 32 , 65 ), (16, 1, 'SVT', 32 , 60 ),\n (16, 1, 'ULVT', 128, 80 ), (16, 1, 'LVT', 128, 65 ), (16, 1, 'SVT', 128, 60 ),\n (36, 2, 'ULVT', 32 , 115), (36, 2, 'LVT', 32 , 95 ), (36, 2, 'SVT', 32 , 85 ),\n (36, 2, 'ULVT', 128, 115), (36, 2, 'LVT', 128, 95 ), (36, 2, 'SVT', 128, 85 ), \n (20, 2, 'ULVT', 32 , 135), (20, 2, 'LVT', 32 , 115), (20, 2, 'SVT', 32 , 100),\n (20, 2, 'ULVT', 128, 135), (20, 2, 'LVT', 128, 120), (20, 2, 'SVT', 128, 100),\n (16, 2, 'ULVT', 32 , 150), (16, 2, 'LVT', 32 , 125), (16, 2, 'SVT', 32 , 115),\n (16, 2, 'ULVT', 128, 150), (16, 2, 'LVT', 128, 125), (16, 2, 'SVT', 128, 115)]\n\n #MLC_IDSAT_algorithm_rv1(11, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [135+20], [0.2], 1, np.arange(0, 0.01*16+0.0001, 0.01), '', ['../Data/chip11/MLC_programming_Chip11_Col21_2msPULSE_VG1p8_VD2p4_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_rv1_cycle01_EfficientPython')\n\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', '0.9-1.2-1.5-1.8', 2.4, 128, range(0, 128), [59+16, 72+40, 80+31, 68+23], [0.2, 0.2, 0.2, 0.2], 4, [0, 15, 15.1, 37.5, 37.6, 59.8, 59.9, 78.1], ['0', '15', '', '37.4', '', '59.6', '', '77.8'], ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG-0p9-1p2-1p5-1p8_VD2p4', '_rv1_cycle01020304')\n\n t_ratio_lst = [(0, 0.17), (0.16, 0.34), (0.33, 0.505), (0.495, 0.67), (0.66, 0.84), (0.83, 1)]\n\n #t_label = []\n #for t in np.arange(0, 0.2*(59+16) + 0.0001, 0.2):\n # t_label.append(str(t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(0, 128), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(59+16), t_ratio[1]*0.2*(59+16)])\n # segment += 1\n\n #t_label = []\n #for t in np.arange(0, 0.2*(72+40) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(0, 128), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(72+40), t_ratio[1]*0.2*(72+40)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(80+31) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + t))\n ##MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(0, 128), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(80+31), t_ratio[1]*0.2*(80+31)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(68+23) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + 0.2*(80+31) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(68+23), t_ratio[1]*0.2*(68+23)])\n # segment += 1\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle010203', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle010203', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle010203', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle010203', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle01', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle01', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle01', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle01', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle0102', 16, 110)\n # \n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle0102', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle0102', 14, 133)\n # \n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle0102', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 20, 140)", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(tqdm(lines)):\n if i == 0:\n continue\n gold_label_id = self.label2id(line[0])\n several_labels = [self.label2id(line[-5]), self.label2id(line[-4]), self.label2id(line[-3]),\n self.label2id(line[-2]), self.label2id(line[-1])]\n\n pair_id = line[-6]\n premise = line[5]\n hypothesis = line[6]\n premise_bp = line[1]\n hypothesis_bp = line[2]\n premise_p = line[3]\n hypothesis_p = line[4]\n # premise_length = len(premise)\n # hypothesis_length = len(hypothesis)\n guid = \"%s-%s\" % (set_type, i)\n\n ex = dict()\n for k in self.data_keys:\n ex[k] = eval(k)\n examples.append(ex)\n logger.info(\" {} examples\".format(len(examples)))\n return examples", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = self.dataset_class(path)", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label", "def make_parsed_dataset(self, ctx=None):\n # Shuffle the filenames to ensure better randomization.\n dataset = tf.data.Dataset.list_files(self.file_pattern,\n shuffle=self.is_training)\n\n if ctx and ctx.num_input_pipelines > 1:\n dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)\n\n if self.is_training:\n dataset = dataset.repeat()\n\n def fetch_dataset(filename):\n return tf.data.TFRecordDataset(filename, buffer_size=IMAGENET_BUFFER_SIZE)\n\n # Read the data from disk in parallel\n dataset = dataset.interleave(\n fetch_dataset, cycle_length=IMAGENET_FETCH_CYCLE_LENGTH,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if self.is_training:\n dataset = dataset.shuffle(IMAGENET_SHUFFLE_BUFFER_SIZE)\n\n # Parse, pre-process, and batch the data in parallel\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n self.dataset_parser,\n batch_size=self.batch_size,\n num_parallel_batches=IMAGENET_PREPROCESSING_THREADS,\n drop_remainder=self.is_training))\n\n return dataset", "def create_dataset(dataset,time_step=1):\n dataX,dataY=[],[]\n for i in range(len(dataset)-time_step):\n a=dataset[i:i+time_step]\n dataX.append(a)\n dataY.append(dataset[i+time_step])\n return np.asarray(dataX),np.asarray(dataY)", "def prepare_datasets(target_filename='data'):\n data_cornell = np.array(datasets.readCornellData('__data__/cornell/', max_len=1000000))\n data_opensubs = np.array(datasets.readOpensubsData('__data__/opensubs/', max_len=1000000))\n\n data = np.concatenate([data_cornell, data_opensubs], axis=0)\n del data_cornell, data_opensubs\n\n pd.DataFrame(data, columns=('question', 'answer')).to_feather('__data__/'+target_filename+'.feather')", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def create_sets():\n global train_x, train_y, val_x, val_y\n\n print('Creating sets')\n\n dataframe = pd.read_csv('LoggerBot.log', names=NAMES).sample(frac=1)\n inputs = dataframe.values[:,:-1].astype(np.float32)\n outputs = dataframe.values[:,-1].astype(np.int32)\n\n train_set_size = int(len(dataframe) * 0.7)\n train_x, train_y = inputs[:train_set_size], outputs[:train_set_size]\n val_x, val_y = inputs[train_set_size:], outputs[train_set_size:]", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def before_each(self, dataset: pydicom.dataset.Dataset) -> None:", "def repeater(data_loader):\n for loader in itertools.repeat(data_loader):\n for data in loader:\n yield data", "def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')", "def prepare_train_dataset(name, reso, batch_size=32):\r\n transform = transforms.Compose([\r\n transforms.RandomResizedCrop(size=reso, interpolation=3),\r\n transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),\r\n transforms.RandomVerticalFlip(),\r\n transforms.ToTensor()\r\n ])\r\n\r\n path = config.datasets[name]\r\n\r\n if name == 'coco':\r\n img_datasets = CocoDataset(root=path['train_imgs'], annFile=path['train_anno'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=CocoDataset.collate_fn)\r\n elif name == 'voc':\r\n img_datasets = VocDataset(train_list=path['train_imgs'], transform=transform)\r\n dataloder = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=VocDataset.collate_fn)\r\n\r\n return img_datasets, dataloder", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = PointCloudDataset(\n self.dataset_class(path), self._point_cloud_size, self._sample_size\n )", "def preproc_data(base_dir):\n n_users = 182\n user_datas = []\n\n for user in tqdm(range(n_users)): \n #iterate through each trace of user \n user_traces = []\n if (len(str(user)) == 1): \n user_id = '00' + str(user)\n elif (len(str(user)) == 2): \n user_id = '0' + str(user)\n else: \n user_id = str(user)\n dir_name = base_dir + user_id + '/Trajectory/'\n for filename in os.listdir(dir_name): \n #load trajectory\n trajectory_raw = pd.read_csv(\n dir_name + filename,\n delimiter = ',', header = None, skiprows = 6,\n names = ['lat','lon', '0', 'altitude', 't_1899', 'date', 'time' ])\n\n #modify time\n traj = trajectory_raw.copy()\n traj.drop(columns = '0', inplace = True)\n\n traj['t_1899'] = (trajectory_raw['t_1899'] - trajectory_raw['t_1899'].values[0]) * 24 * 60 * 60\n traj_newcols = np.array(traj.columns)\n traj_newcols[traj_newcols == 't_1899'] = 'seconds'\n traj.columns = traj_newcols\n\n #get X_vals: \n R_earth = 6.371e6\n deg_to_rad = np.pi / 180\n traj['X'] = R_earth * np.cos(deg_to_rad * traj['lat'].values) * np.cos(deg_to_rad * traj['lon'].values)\n traj['Y'] = R_earth * np.cos(deg_to_rad * traj['lat'].values) * np.sin(deg_to_rad * traj['lon'].values)\n\n #Normalize to first point: \n origin_x = traj['X'].values[0]\n origin_y = traj['Y'].values[0]\n\n traj['X'] = traj['X'] - origin_x\n traj['Y'] = traj['Y'] - origin_y\n\n txy = traj.copy()\n txy = traj[['seconds', 'X', 'Y']]\n txy.columns = ['T', 'X', 'Y']\n if not np.isnan(txy.values).any(): \n user_traces.append(txy.values)\n\n user_datas.append(user_traces)\n return user_datas", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)" ]
[ "0.6232418", "0.6178191", "0.61240345", "0.6034664", "0.5999491", "0.59856147", "0.5969796", "0.5958377", "0.5950267", "0.5871935", "0.58671767", "0.5818414", "0.5809877", "0.5804624", "0.5801337", "0.5794393", "0.5745656", "0.5720152", "0.5712723", "0.5677284", "0.56412107", "0.5601151", "0.5599085", "0.559302", "0.5569282", "0.55593264", "0.5534732", "0.55347306", "0.55164444", "0.54838455", "0.548135", "0.5470648", "0.54686725", "0.54606855", "0.54577166", "0.54507065", "0.54417354", "0.5433243", "0.5431214", "0.54288137", "0.5427643", "0.54263806", "0.54172087", "0.5417035", "0.5417035", "0.54143727", "0.54127556", "0.5386069", "0.53858936", "0.53836536", "0.5378896", "0.53663325", "0.53647333", "0.536078", "0.53545266", "0.5353649", "0.5343554", "0.5339233", "0.53228813", "0.53036267", "0.5301766", "0.5301609", "0.5301168", "0.5294883", "0.5292591", "0.52925545", "0.52899903", "0.52877647", "0.52838916", "0.52809525", "0.5278759", "0.5277826", "0.52768314", "0.52757466", "0.5274371", "0.526835", "0.52649134", "0.52637476", "0.52618784", "0.52613896", "0.5260462", "0.5257217", "0.5255231", "0.52469116", "0.5246766", "0.5244528", "0.5242875", "0.5237487", "0.5232318", "0.5230981", "0.52266085", "0.5212073", "0.5211414", "0.5208613", "0.52083695", "0.52071863", "0.5200117", "0.5198945", "0.5197578", "0.51956546" ]
0.6497661
0
Create independently looped input files.
def create_input_files(self, datasets_dict): ifname = self.keywords['inputfile'] dirstem = os.path.dirname(ifname) basename = os.path.basename(ifname).split('.')[0] createdfiles=list() if dirstem == "": dirstem = os.getcwd() dkeys = datasets_dict.keys() dkeys.sort() dct=1 for didx in dkeys: newfile = MASTFile() newfile.data = list(datasets_dict[didx]) newname="%s/loop_%s_%s.inp" % (dirstem, basename, str(dct).zfill(2)) newfile.to_file(newname) #createdfiles.append(os.path.basename(newname)) createdfiles.append(newname) dct=dct+1 return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)", "def create_input_files(in_dir, R, I):\n def get_filepath(in_volume, infiles_partition):\n _3d_pos = numeric_to_3d_pos(in_volume.index, infiles_partition, order='F')\n i, j, k = _3d_pos\n out_filename = f'{i}_{j}_{k}.hdf5'\n return os.path.join(in_dir, out_filename)\n\n infiles_partition = get_blocks_shape(R, I)\n infiles_volumes = get_named_volumes(infiles_partition, I)\n for in_volume in infiles_volumes:\n filepath = get_filepath(in_volume, infiles_partition)\n arr = create_random_dask_array(I, distrib='normal', dtype=np.float16)\n save_to_hdf5(arr, filepath, physik_cs=None, key='/data', compression=None)", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))", "def make_dummy_files(paths):\n for p in paths:\n make_dummy_file(p)", "def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)", "def _open_files(inputs, mode):\n assert isinstance(inputs, list)\n\n local_open = pf.open\n return [local_open(ffile, mode=mode) for ffile in inputs]", "def stage_input_file(workdir_path, files):\n if not isinstance(files, list):\n files = [files]\n\n for file_dict in files:\n location = urlparse(file_dict['location'])\n if 'basename' in file_dict:\n dest_path = os.path.join(workdir_path, file_dict['basename'])\n else:\n dest_path = os.path.join(workdir_path, os.path.basename(location.path))\n shutil.copy(location.path, dest_path)\n file_dict['path'] = dest_path\n\n for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):\n stage_input_file(workdir_path, file_dict['secondaryFiles'][i])", "def prepare_io(filename, input_dataset, output_dataset):\n file_id = filename[1:] if filename.startswith(os.sep) else filename\n file_in = os.path.join(input_dataset.path, 'files', file_id)\n file_out = os.path.join(output_dataset.path, 'files', file_id)\n ensure_path(os.path.dirname(file_out))\n return file_in, file_out", "def io_files(self, iterable, ext=None, func=None):\n for input_path in iterable:\n output_path, temp_file = self.check_output_path(input_path, ext)\n\n try:\n func(input_path, temp_file)\n except Exception as e:\n if self._force_continue is True:\n self.handle_error(e, input_path)\n else:\n raise e\n\n self.overwrite_output_path(input_path, output_path, temp_file)", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def pre_loop(self):\n\t\tk = Kernel(name=\"misc.mkfile\")\n\t\tk.arguments = [\"--size=1000\", \"--filename=reference.dat\"]\n\t\tk.upload_input_data = ['levenshtein.py']\n\t\treturn k", "def flow_from_files(self, filenames=None, batch_size=32):\n\n if filenames:\n self.filenames = filenames\n\n for i in range(0, len(self.filenames), batch_size):\n yield np.concatenate([np.load(self.path / f) \\\n for f in self.filenames.iloc[i:i+batch_size]])", "def create_test_input_files(input1, input2):\n random.shuffle(input1)\n random.shuffle(input2)\n filename1 = application.join_abs_path(EMPTY_TEST_DIR, 'file-1.gz')\n filename2 = application.join_abs_path(EMPTY_TEST_DIR, 'file-2.gz')\n\n with gzip.open(filename1, 'wb') as file1:\n file1.write('\\n'.join(input1))\n with gzip.open(filename2, 'wb') as file2:\n file2.write('\\n'.join(input2))", "def create_fake_files(self, temp_dir):\n for fake_file in self.processed_fake_file + self.non_processed_fake_files:\n temp_fake_file = Path(temp_dir) / Path(fake_file)\n temp_fake_file.mkdir(parents=True, exist_ok=True)\n temp_fake_file.touch(exist_ok=True)", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def create_files(self):\n self._do_action_under_lock(self._create_files)", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)", "def split_input(self):\n namenode = self.runner.namenode\n splitter = Splitter(RECORDS_PER_BLOCK)\n results = []\n input_files = []\n for fname in self.inputs:\n input_files.append(RecordFile(fname, namenode))\n\n taskid = 0\n for block in splitter.split(input_files):\n fname = map_input(self.id, taskid)\n taskid += 1\n namenode.create_file(fname)\n\n bytes_written = 0\n for record in block:\n bytes_written += namenode.write_file(fname, bytes_written,\n record)\n\n namenode.close_file(fname)\n results.append(fname)\n self.open_files.append(fname)\n\n for file_ in input_files:\n file_.close()\n\n return results", "def create_input_sample_files(self, input_files: List[Path]) -> pd.DataFrame:\n assemblies = {}\n reads = {}\n sample_names = set()\n data = []\n\n # Initial pass of files to break up into assemblies/reads\n for file in input_files:\n sf = SequenceFile(file)\n sample_name = sf.get_genome_name(exclude_paired_end_indicators=True)\n if sf.is_assembly():\n if sample_name in sample_names:\n if sample_name in assemblies:\n previous_files = [assemblies[sample_name]]\n else:\n previous_files = reads[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n else:\n sample_names.add(sample_name)\n assemblies[sample_name] = file\n elif sf.is_reads():\n if sample_name in assemblies:\n previous_files = assemblies[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n elif sample_name in reads:\n if len(reads[sample_name]) != 1:\n raise Exception(f'Invalid number of files for sample with name [{sample_name}]. '\n f'current_file=[{file}], previous_files={reads[sample_name]}')\n else:\n reads[sample_name].append(file)\n else:\n reads[sample_name] = [file]\n\n sample_names.add(sample_name)\n else:\n logger.warning(f'Input file [{file}] with unknown file type (not assembly or reads). Ignoring.')\n\n # Now we iterate over samples to insert into an array to create the final dataframe\n for sample in assemblies:\n data.append([sample, assemblies[sample], pd.NA, pd.NA])\n\n # Iterate over reads to insert into array for final dataframe\n for sample in reads:\n if len(reads[sample]) == 1:\n data.append([sample, pd.NA, reads[sample][0], pd.NA])\n elif len(reads[sample]) == 2:\n file1 = SequenceFile(reads[sample][0])\n file2 = SequenceFile(reads[sample][1])\n\n file1_differences = file1.name_differences(file2)\n file2_differences = file2.name_differences(file1)\n\n if len(file1_differences) != 1 or len(file2_differences) != 1:\n raise Exception(\n f'Files [{reads[sample]}] do not have exactly one difference between names, cannot determine'\n f' paired structure.')\n else:\n f1d = file1_differences[0].lower()\n f2d = file2_differences[0].lower()\n\n if f1d == '1' and f2d == '2':\n forward = file1\n reverse = file2\n elif f1d == 'f' and f2d == 'r':\n forward = file1\n reverse = file2\n elif f2d == '1' and f1d == '2':\n reverse = file1\n forward = file2\n elif f1d == 'r' and f2d == 'f':\n reverse = file1\n forward = file2\n else:\n raise Exception(f'Cannot determine pair structure for files [{reads[sample]}]')\n\n data.append([sample, pd.NA, forward.file, reverse.file])\n else:\n raise Exception(f'Invalid number of files for sample [{sample}], files={reads[sample]}')\n\n return pd.DataFrame(data, columns=self.INPUT_SAMPLE_FILE_COLUMNS)", "def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))", "def open_read_files(answer_files, answers):\r\n \"\"\"And designates each file to a variable in answers\"\"\"\r\n count = 0\r\n s = 0\r\n answer_files2 = []\r\n for file in answer_files[:]: # Used [:] to get all file_names in answer_files\r\n ans = open(file, mode='r')\r\n print(f\"Opening {ans.name}\")\r\n time.sleep(s)\r\n answers[count] = ans\r\n count += 1\r\n if ans.closed == False: # Section for checking if files are closed\r\n print(f\"Closing {ans.name}\")\r\n ans.close()\r\n answer_files2.append(ans.name)\r\n answer_files.remove(ans.name)\r\n time.sleep(s)\r\n return answer_files2, answers", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def test_create4(self):\n TempfileManager.sequential_files(2)\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertEqual(fname, 'tmp2')\n #\n TempfileManager.unique_files()\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 2)\n fname = os.path.basename(fname)\n self.assertNotEqual(fname, 'tmp3')\n self.assertTrue(fname.startswith('tmp'))", "def _generate_examples(self, files):\n idx = 0\n for filename in files:\n with open(filename) as file:\n for line in file:\n yield idx, {\"text\": line}\n idx += 1", "def write_input_files(pst, pst_path=\".\"):\n par = pst.parameter_data.copy()\n par.index = par.index.str.lower()\n par.loc[:, \"parval1_trans\"] = (par.parval1 * par.scale) + par.offset\n pairs = np.array(list(zip(pst.template_files, pst.input_files)))\n num_tpl = len(pairs)\n chunk_len = 50\n num_chunk_floor = num_tpl // chunk_len\n main_chunks = (\n pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()\n ) # the list of files broken down into chunks\n remainder = pairs[num_chunk_floor * chunk_len :].tolist() # remaining files\n chunks = main_chunks + [remainder]\n # procs = []\n # for chunk in chunks:\n # # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),\n # # os.path.join(pst_path,in_file))\n # p = mp.Process(\n # target=_write_chunk_to_template,\n # args=[chunk, pst.parameter_data.parval1_trans, pst_path],\n # )\n # p.start()\n # procs.append(p)\n # for p in procs:\n # p.join()\n pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))\n x = [\n pool.apply_async(\n _write_chunk_to_template,\n args=(chunk, par.parval1_trans, pst_path),\n )\n for i, chunk in enumerate(chunks)\n ]\n [xx.get() for xx in x]\n pool.close()\n pool.join()", "def _make_generator_fn(files, batch_size, nclasses=6,\n target_field='hadro_data/n_hadmultmeas',\n mode=None, max_evts=None):\n from mnvtf.hdf5_readers import SimpleCategorialHDF5Reader as HDF5Reader\n\n def example_generator_fn():\n event_count = 0\n show_interval = 100000\n end_file = False\n\n for ifile in cycle(files):\n if not end_file:\n logging.info(\"Reading {}\".format(ifile))\n start_idx, stop_idx = 0, batch_size\n reader = HDF5Reader(ifile, target_field=target_field,\n nlabels=nclasses)\n nevents = reader.openf()\n \n while stop_idx <= nevents:\n if max_evts is not None and event_count >= max_evts:\n reader.closef()\n end_file = True\n break\n \n if mode == 'predict' and event_count % show_interval == 0:\n msg='Processing events from {} to {}.'.format(\n event_count, event_count + show_interval)\n logging.info(msg)\n \n yield reader.get_samples(start_idx, stop_idx)\n \n event_count += batch_size\n start_idx += batch_size\n stop_idx += batch_size\n \n reader.closef()\n \n if ifile==files[-1]:\n if mode == 'predict':\n return\n else:\n event_count = 0\n end_file = False\n \n return\n \n return example_generator_fn", "def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram", "def make_files(dir_in, dir_out):\n try:\n listaFisiere = os.listdir(f\"{dir_in}\")\n except Exception as eroare:\n print(\"Path to input file is invalid, exiting...\")\n quit()\n if not os.path.exists(f\"{dir_out}\"):\n os.mkdir(f\"{dir_out}\")\n paths_out = []\n for numeFisier in listaFisiere:\n numeFisierOutput=\"output_\"+numeFisier\n f=open(f\"{dir_out}/\"+numeFisierOutput,\"w\")\n paths_out.append(f\"{dir_out}/\"+numeFisierOutput)\n f.close()\n for i in range(len(listaFisiere)):\n listaFisiere[i] = dir_in + \"/\" + listaFisiere[i]\n return listaFisiere, paths_out", "def run(self):\n for filepage in self.generator:\n print (filepage)\n filepage.touch()", "def _open_files(path, filenames, barcode, queue):\n if not exists(path):\n mkdir(path)\n\n handles = []\n\n for filename in filenames:\n base, ext = basename(filename).split('.', True)\n handles.append(\n Handle('{}/{}_{}.{}'.format(path, base, barcode, ext), queue,\n f_open=_type_handler[ext.split('.')[-1]]))\n\n return handles", "def main():\n onlyfiles = [f for f in listdir(RAWDATA_PATH) if isfile(join(RAWDATA_PATH, f))]\n for file in onlyfiles:\n create_RCSB_fastas(file)", "def automatic_checking(files):\n for i in range(10):\n fft_checking(files[i])", "async def create_upload_files(files: List[UploadFile] = File(...)):\n\n if len(files) > 3:\n return {\" \": {\"mode\": \"File Limit Exceeded\"}}\n \n filename = \"_temp_files_one/myfilem.wav\"\n res_json = {}\n file_counter = 0\n for upload_file in files:\n \n with open(filename, \"wb\") as file_object:\n \n file_object.write(upload_file.file.read())\n \n res_json[upload_file.filename + str(file_counter)] = predict_many(filename)\n \n os.remove(filename)\n \n return res_json", "def wrt_gau_input(self):\n fp = open(\"myfiles.dat\", \"w\") \n nmol = self.model['nmol']\n prefix = self.config['job_prefix']\n incr = self.config['incr']\n for i in xrange(0, nmol, incr):\n self.wrt_gau_input_once(i)\n print >>fp, \"%sx%s.gjf\" % (prefix, i)\n fp.close()\n \n # extra jobs\n fp = open(\"link.sh\", \"w\")\n print >>fp, \"#! /bin/bash\"\n print >>fp, \"# sampling shell input\"\n print >>fp, \"rm linking.gjf\"\n print >>fp, \"myfiles=`more myfiles.dat`\"\n print >>fp, \"\"\"\n for onefile in $myfiles;\n do cat $onefile >> linking.gjf;\n echo -e '\\\\n--Link1--\\\\n' >> linking.gjf;\n done\n \"\"\"\n fp.close()\n return", "def Run():\n file_name = AskForFileName()\n file_content = ReadFileContents(file_name)\n head_list = BuildHeadList(file_content)\n atom_list = BuildAtomList(file_content)\n tail_list = BuildTailList(file_content)\n WriteNewFile(head_list, atom_list, tail_list)", "def _make_input_file_list(binnedfile, num_files):\n outdir_base = os.path.abspath(os.path.dirname(binnedfile))\n outbasename = os.path.basename(binnedfile)\n filelist = \"\"\n for i in range(num_files):\n split_key = \"%06i\" % i\n output_dir = os.path.join(outdir_base, split_key)\n filepath = os.path.join(output_dir,\n outbasename.replace('.fits', '_%s.fits' % split_key))\n filelist += ' %s' % filepath\n return filelist", "def make_files(self):\n return []", "def load_files(self, n=None):\n if not n:\n n = len(self.files)\n\n for _, name in zip(list(range(n)), self.files):\n yield self.load_file(name)", "def main(self, verbose=0):\n indepdict=self.scan_for_loop(self.indeploop)\n pegdict1 = self.scan_for_loop(self.pegloop1)\n pegdict2 = self.scan_for_loop(self.pegloop2)\n if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:\n return dict()\n alldict = dict(indepdict)\n alldict.update(pegdict1)\n alldict.update(pegdict2)\n indepcomb=self.get_combo_list(indepdict, 0)\n pegcomb1=self.get_combo_list(pegdict1, 1)\n pegcomb2=self.get_combo_list(pegdict2, 1)\n allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2)\n datasets = self.prepare_looped_datasets(alldict, allcombs)\n createdfiles = self.create_input_files(datasets)\n if verbose == 1:\n self.print_list(indepcomb)\n self.print_list(pegcomb1)\n self.print_list(pegcomb2)\n self.print_list(allcombs)\n for datakey in datasets:\n self.print_list(datasets[datakey])\n return createdfiles", "def create_input_multiprocess(ids):\n\n threads = 8 \n p = mp.Pool(threads)\n pool_results = p.map(create_input, np.array_split(ids, threads * 2))\n p.close()\n p.join()\n\n ret = pd.concat(pool_results)\n\n pool_results = None\n gc.collect()\n\n return ret", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def open_input_files(self):\n self.dictionaryFile = open(self.dictionaryFile, 'r', encoding=self.encoding)\n\n if self.annotationFile :\n self.annotationFile = open(self.annotationFile, 'r', encoding=self.encoding)\n elif self.annotationFile is None:\n try:\n self.annotationFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '.ann'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: annotation file is not found.\\n\")\n\n if self.abbreviationsFile :\n self.abbreviationsFile = open(self.abbreviationsFile, 'r', encoding=self.encoding)\n elif self.abbreviationsFile is None:\n try:\n self.abbreviationsFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '_abrv.dsl'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: abbreviations file is not found.\\n\")", "def processSetOfCerFiles(files):\n printHeader()\n \n k = 0\n for f in files:\n k = k + 1\n sz = get_file_size(f)\n with open(f, 'rb') as fb:\n processCerFile(k, fb, sz=sz)", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def initialize_files(file_name, ran, file_extension):\r\n \"\"\"Specifiy the exact file name and the number of files --> file_name_(range) e.g file_name=chickens ,ran=16\"\"\"\r\n answer_file_rep = [file_name + str(number) for number in range(1, ran)]\r\n answer_files = [file + \"{}\".format(file_extension) for file in answer_file_rep]\r\n answers = [\"answer\" + str(number) for number in range(1, ran)]\r\n return answer_files, ran, answers", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def simple(tmpdir):\n flowcells = [1, 2, 3, 4, 5, 6, 7, 8]\n lanes = [1, 2, 3]\n reads = [1, 2]\n\n _simple = {\"files\": [], \"data\": []}\n i = 0\n\n for read in reads:\n for flowcell in flowcells:\n for lane in lanes:\n content = _full_content()[i]\n file_path = create_file(tmpdir, flowcell, lane, read, content)\n\n _simple[\"files\"].append(file_path)\n\n data = create_file_data(file_path, flowcell, lane, read)\n _simple[\"data\"].append(data)\n i += 1\n\n return _simple", "def _create_filelist(self):\n print \"[--init] creating %s\" % self.file_list\n if self.source_file is not None:\n shutil.copyfile(self.source_file, self.file_list)\n elif self.source_path is not None:\n filenames = get_file_paths(self.source_path)\n if self.shuffle_file:\n random.shuffle(filenames)\n with open(self.file_list, 'w') as fh:\n for fname in filenames:\n fh.write(\"0000\\t\" + fname + \"\\n\")\n else:\n sys.exit(\"[--init] ERROR: \" +\n \"need to define input with --filelist or \" +\n \"--source-directory option, aborting\")\n read_only(self.file_list)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1", "def create_input(file_name, N_sensory, dt=1e-4, dur=1.0, start=0.3, stop=0.6, I_max=0.6):\n\n Nt = int(dur / dt)\n t = np.arange(0, dt * Nt, dt)\n\n uids = [\"sensory_\" + str(i) for i in range(N_sensory)]\n\n uids = np.array(uids, dtype = 'S')\n\n I = np.zeros((Nt, N_sensory), dtype=np.float64)\n I[np.logical_and(t > start, t < stop)] = I_max\n\n with h5py.File(file_name, 'w') as f:\n f.create_dataset('I/uids', data=uids)\n f.create_dataset('I/data', (Nt, N_sensory),\n dtype=np.float64,\n data=I)", "def read_input():\n\n filenames = sorted(glob.glob(\"%s/openflow_input/*\" % root_dir))\n\n for filename in filenames:\n log(\"Processing struct file: \" + filename)\n ofinput = process_input_file(filename)\n\n # Populate global state\n for wire_version in ofinput.wire_versions:\n version_name = of_g.of_version_wire2name[wire_version]\n versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))\n of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)", "def ingest(self, files):\n for file in files:\n self.files.add(file)", "def _start_input_file_worker(\n q_in: Queue, input_data_path: str, batch_size: int\n) -> None:\n input_data_file = open(input_data_path, \"r\")\n enum_idx = 0\n batch = []\n for line in input_data_file:\n unique_id = str(uuid.uuid1())\n batch.append((enum_idx, unique_id, line))\n # If the batch to send is the size then push to queue and rest batch\n if len(batch) == batch_size:\n q_in.put(batch)\n batch = []\n enum_idx += 1\n if batch:\n q_in.put(batch)", "def _open(args):\n directory = args.directory\n if directory is None:\n directory = os.getcwd()\n\n files = []\n [files.extend(glob(os.path.join(directory, infile)))\n for infile in args.infiles]\n return _open_files(files, args.open_mode)", "def simulation_step(self, iteration, instance):\n k = Kernel(name=\"misc.mkfile\")\n k.arguments = [\"--size=1000000\", \"--filename=asciifile.dat\"]\n\tk.download_output_data = ['asciifile.dat > asciifile-{0}.dat'.format(instance)]\n return [k]", "def Make_Sampled_FileList(input_name):\n\tif args.input_type == 'FILE':\n\t\tFileList=[]\n\t\tSampledFileName, SampledExten = os.path.splitext(input_name)\n\t\tSampledName = '%s_smpld%s' % (SampledFileName,SampledExten)\n\t\tFileList.append(SampledName)\n\tif args.input_type == 'FOLDER':\n\t\tFileList = glob.glob('%s/*_smpld*' % args.input_name)\n\treturn FileList", "def set_inputs(subj):\n inputs = []\n for seq in range(1, 5):\n infname = 'v8.%s_%s.Powered.cleanEPI.uncensored.nii.gz' % (subj, seq)\n infile = os.path.join(os.environ['avp'], 'nii', infname)\n inputs.append(infile)\n inputfiles = ' '.join(inputs)\n\n return inputfiles", "def precreate_tempfiles(self, count):\n spy_for = getattr(self, 'spy_for', None)\n\n assert spy_for, (\n '%r must mix in kgb.SpyAgency in order to call this method.'\n % self.__class__)\n\n tmpfiles: List[str] = [\n make_tempfile()\n for i in range(count)\n ]\n\n tmpfiles_iter = iter(tmpfiles)\n\n @spy_for(make_tempfile)\n def _return_next_tempfile(*args, **kwargs) -> str:\n try:\n tmpfile = next(tmpfiles_iter)\n except StopIteration:\n self.fail('Too many calls to make_tempfile(). Expected %s, '\n 'got %s.'\n % (count, count + 1))\n\n content = kwargs.get('content')\n\n if content:\n with open(tmpfile, 'wb') as fp:\n fp.write(content)\n\n return tmpfile\n\n return tmpfiles", "def _init(args, workflows_dir, config_path):\n for file in [\"samples.tsv\", \"config.yaml\"]:\n src = os.path.join(workflows_dir, args.workflow.replace(\"-\", \"_\"), file)\n dest = os.path.join(os.path.dirname(config_path), file)\n\n copy_file = True\n if os.path.exists(dest) and args.force is False:\n choices = {\"yes\": True, \"y\": True, \"no\": False, \"n\": False}\n\n sys.stdout.write(f\"File: {dest} already exists. Do you want to overwrite it? (yes/no) \")\n while True:\n choice = input().lower()\n if choice in choices:\n copy_file = choices[choice]\n break\n else:\n print(\"Please respond with yes (y) or no (n).\")\n\n if copy_file:\n shutil.copyfile(src, dest)", "def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist", "def create(number, files, directory, base_directory):\n print(\"Creating {0}... \".format(directory), end=\"\")\n\n os.mkdir(directory)\n\n # copy files to the new directory with their new names\n for file in files:\n shutil.copyfile(base_directory + file[0], directory + file[1])\n\n # tailor appropriate files for this project\n filename = \"{0}/Makefile\".format(directory)\n with open(filename) as file:\n modified_text = file.read().replace(\"file1\", \"ex{0}\".format(number))\n with open(filename, \"w\") as file:\n file.write(modified_text)\n\n print(\"done.\")", "def filelist_generator(self):\n for filename in self.filenames:\n yield filename", "def _generate_orca_inputs(\n self, molecules: List[Tuple[np.array, np.array]], current_compdir: str\n ):\n input_files = []\n for idx, molecule in enumerate(molecules):\n # Convert data and generate input files\n atom_types, positions = molecule\n # Convert inputs to Angstrom for input file, since this is the default length unit there\n positions *= spk_units.convert_units(\n self.position_conversion * spk_units.length, \"Angstrom\"\n )\n input_file_name = os.path.join(\n current_compdir, \"{:s}_{:06d}.oinp\".format(self.basename, idx + 1)\n )\n self._write_orca_input(input_file_name, atom_types, positions)\n input_files.append(input_file_name)\n\n return input_files", "def generate_data_files(self, mode='link'):\n\n if self.basedir is None:\n raise ValueError('No base directory set.')\n\n data_folder = self.get_data_folder(mode='absolute')\n\n # compose BIDS data filenames\n filename_stem = f'sub-{self.sub_id}_ses-{self.ses_id}'\n\n for key, files in self.data.items():\n # add '_' prefix for filename concatenation\n if key:\n key = '_' + key\n for i, file in enumerate(files):\n # preserve the suffix\n suffix = file.suffix\n # append split postfix if required\n split = ''\n if len(files) > 1:\n split = f'_split-{i}'\n\n new_filename = filename_stem + key + split + suffix\n destination = data_folder / new_filename\n create_file(file, destination, mode)", "def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)", "def create_files(filename_list, encoding):\n for filename in filename_list:\n codecs.open(filename, 'w', encoding).close()", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def readFile(self, files):\n files = np.atleast_1d(files) # allow scalar input\n\n events = list()\n groups = list()\n flashes = list()\n one_sec = list()\n\n ev_id_ctr = 0\n gr_id_ctr = 0\n fl_id_ctr = 0\n\n for _file in files:\n # todo: with...open\n nc = Dataset(_file)\n\n this_ev = _extract_events(nc)\n this_grp = _extract_groups(nc)\n this_fl = _extract_flashes(nc)\n this_one_sec = _extract_one_second(nc, background=False)\n\n nc.close()\n\n # TODO: do we need check for \"empty\" files like w/GLM?\n\n # IDs are not necessarily unique. We'll modify them so they are.\n # Similar to what is done with GLM data (glm.py in this package)\n # See there for details, but the gist is get unique values and map\n # TODO: refactor?\n\n this_ev.sort_values('id', inplace=True)\n this_grp.sort_values('id', inplace=True)\n this_fl.sort_values('id', inplace=True)\n\n new_flash_id = np.arange(len(this_fl))\n this_fl.id = new_flash_id\n flash_id_map = dict(zip(this_fl._orig_id.values, new_flash_id))\n\n # Update group parent\n new_id = this_grp.parent_id.map(flash_id_map.get)\n this_grp.parent_id = new_id\n\n # New id for the group:\n new_group_id = np.arange(len(this_grp))\n this_grp.id = new_group_id\n group_id_map = dict(zip(this_grp._orig_id.values, new_group_id))\n\n # Update event parent\n this_ev.parent_id = this_ev.parent_id.map(group_id_map.get)\n\n # New event ID (although I don't think is really necessary)\n new_event_id = np.arange(len(this_ev))\n this_ev.id = new_event_id\n\n # Add in an offset to get unique values across files\n this_ev['id'] += ev_id_ctr\n this_grp['id'] += gr_id_ctr\n this_fl['id'] += fl_id_ctr\n\n # Offset the parent IDs for the children too:\n this_ev['parent_id'] += gr_id_ctr\n this_grp['parent_id'] += fl_id_ctr\n\n # Next, update the counters\n ev_id_ctr = this_ev['id'].iloc[-1]+1\n gr_id_ctr = this_grp['id'].iloc[-1]+1\n fl_id_ctr = this_fl['id'].iloc[-1]+1\n\n # Modify the times to UTC:\n for val in [this_ev, this_grp, this_fl]: # one seconds already converted\n val.time = tai93_to_utc(val.time)\n\n # todo: add option to not sort by time\n # this_event.sort_values('time', inplace=True)\n # this_group.sort_values('time', inplace=True)\n # this_flash.sort_values('time', inplace=True)\n\n # Finally, add \"this\" data\n events.append(this_ev)\n groups.append(this_grp)\n flashes.append(this_fl)\n one_sec.append(this_one_sec)\n\n # Put these as attributes of the class\n self.events = Ltg(pd.concat(events))\n self.groups = Ltg(pd.concat(groups))\n self.flashes = Ltg(pd.concat(flashes))\n self.one_second = Ltg(pd.concat(one_sec))", "def run(self, options):\n time.sleep(int(options.sleepLength))\n print('sleeping for %s' % options.sleepLength)\n for (dirpath, dirnames, filenames) in os.walk(options.inputdir):\n relative_path = dirpath.replace(options.inputdir, \"\").strip(\"/\")\n output_path = os.path.join(options.outputdir, relative_path)\n for dirname in dirnames:\n print('Creating directory... %s' % os.path.join(output_path, dirname))\n os.makedirs(os.path.join(output_path, dirname))\n for name in filenames:\n new_name = options.prefix + name\n str_outpath = os.path.join(output_path, new_name)\n print('Creating new file... %s' % str_outpath)\n shutil.copy(os.path.join(dirpath, name), str_outpath)", "def initialize_observation_files(tile_file_list):\n n_tiles = len(tile_file_list)\n if(n_tiles>0):\n for tile_file in tile_file_list:\n target_tile_pack = util.TargetTile(tile_file) \n target_tile_pack.write_results_to_file(tile_file)\n return", "def Make_FileList(input_name):\n\tif args.input_type == 'FILE':\n\t\tFileList=[]\n\t\tFileList.append(input_name)\n\tif args.input_type == 'FOLDER':\n\t\tFileList = glob.glob('%s/*' % input_name)\n\treturn FileList", "def process_files(files_pattern, output, options=None):\n if options is None:\n options = {}\n\n queue = Queue(100)\n\n files = glob.glob(files_pattern, recursive=True)\n total_count = len(files)\n logging.info(\"starting to parse %s files\", total_count)\n\n write_results_process = Process(target=write_results, args=(queue, output, total_count))\n write_results_process.start()\n\n pool = Pool(None, process_file_init, [queue, options])\n pool.map(process_file, files)\n pool.close()\n pool.join()\n queue.put(None)\n write_results_process.join()\n logging.info(\"successfully processed %s files\", queue.get())", "def interactive():\r\n\r\n while True: \r\n fn = rel_path(abspath(input('Enter Photo Name or Folder Name: \\n')))\r\n print()\r\n\r\n if exists(fn):\r\n break\r\n\r\n print('{} is not a valid path \\n'.format(fn))\r\n\r\n # Start Single Mode\r\n if isfile(fn):\r\n main(fn, MIN_PLANT_SIZE)\r\n\r\n # Start Multiple mode\r\n else:\r\n while True:\r\n num = input(\"Enter number of output images: \\n\")\r\n print()\r\n\r\n if num.isdigit():\r\n break\r\n\r\n print('{} is not a valid number\\n'.format(num))\r\n\r\n num = int(num)\r\n\r\n options = []\r\n for i in range(1, num + 1):\r\n options.append(parse_options(input('Enter options for output image #{}: \\n'.format(i))))\r\n print()\r\n \r\n multiple(fn, MIN_PLANT_SIZE, options)", "def __init__(self, root_dir, file_dir, fig_name, n_files=None, **kwargs):\n self.reader = FileReader(root_dir, sub_dirs=[file_dir,], num_files=[n_files,], **kwargs)\n self.fig_name = fig_name\n self.out_dir = '{:s}/{:s}/'.format(root_dir, fig_name)\n if self.reader.comm.rank == 0 and not os.path.exists('{:s}'.format(self.out_dir)):\n os.mkdir('{:s}'.format(self.out_dir))\n self.my_sync = Sync(self.reader.comm)\n\n self.files = self.reader.local_file_lists[file_dir]\n self.idle = self.reader.idle[file_dir]\n self.dist_comm = self.reader.distribution_comms[file_dir]", "def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks", "def preprocess_files_second_pass(self, max_files=10):\n print \"reading {} files\".format(max_files)\n file_counter = 0\n for fn in os.listdir(self.output_data_path):\n file_path = self.output_data_path + fn\n if os.path.isfile(file_path) and fn.startswith('user-ct-test-collection') and file_counter < max_files:\n # print(\"loading {}...\".format(file_path))\n # data = pd.read_csv(file_path, sep=\",\")\n # print(\"preprocessing {}...\".format(file_path))\n # data = self.preprocess_data_second(data)\n # save_df(self.output_data_path, data, fn)\n # file_counter += 1\n return", "def loop_run(self, loops):\n self.loop_seek(self.num_loops + loops)", "def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')", "def file_package_iter(self):\n files = list()\n futures = list()\n\n amount = 0\n for file in self.file_iterator:\n if amount + self._estimate_file_size(file) > self.max_size:\n if len(files) == 0: # This file is too large for one archive, special handling\n self.pool.wait(futures)\n self._calculate_hash(file)\n yield self._finish_info_package([file])\n continue\n\n self.pool.wait(futures)\n yield self._finish_info_package(files)\n\n files = list()\n amount = 0\n\n amount += file.size\n files.append(file)\n futures.append(self.pool.add_task(self._calculate_hash, file)) # todo calc small files in-thread?\n\n if len(files) > 0:\n yield self._finish_info_package(files)", "def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)", "def make_data_files(gi, files, username, password, galaxyemail, galaxypass, control, test, history_id, filetype, dbkey):\n uploaded_files = []\n ftp = gi.config.get_config()[\"ftp_upload_site\"]\n for file in files:\n nfile = str(file).split('/')\n filename = nfile[len(nfile)-1]\n with open(username + \"/input_\" + filename, \"w\") as dfile:\n cont = subprocess.Popen([\"curl -u \" + username + \":\" + password + \" -k -s \" + file], stdout=subprocess.PIPE, shell=True).communicate()[0]\n dfile.write(cont)\n dfile.close()\n with open(username + \"/input_\" + filename, \"r\") as tfile:\n # Trim file based on selected samples.\n matrix = False\n noheader = False\n samples_a = []\n samples_b = []\n linenr = 0\n if control != \"[]\" or test != \"[]\":\n with open(username + \"/input_A_\" + filename, \"w\") as ndfilea:\n with open(username + \"/input_B_\" + filename, \"w\") as ndfileb:\n for line in tfile:\n if linenr == 0:\n samples_a.append(0)\n samples_b.append(0)\n if \"!\" not in line:\n noheader = True\n if not noheader:\n if \"!Sample_geo_accession\" in line:\n line = line.split('\\t')\n for x in range(0, len(line)):\n if line[x].replace('\\n', '') in control:\n samples_a.append(x)\n if line[x].replace('\\n', '') in test:\n samples_b.append(x)\n else:\n if \"!series_matrix_table_begin\" in line:\n matrix = True\n samples_a.append(0)\n if matrix:\n line = line.split('\\t')\n for p in (p for p,x in enumerate(line) if p in samples_a):\n if \"!series_matrix_table_begin\" not in line[p] and \"!series_matrix_table_end\" not in line[p]:\n ndfilea.write(line[p].replace('\\\"', '').replace('\\n', '') + '\\t')\n for pb in (pb for pb,x in enumerate(line) if pb in samples_b):\n if \"!series_matrix_table_begin\" not in line[pb] and \"!series_matrix_table_end\" not in line[pb]:\n ndfilea.write(line[pb].replace('\\\"', '').replace('\\n', '') + '\\t')\n ndfilea.write('\\n')\n else:\n line.strip()\n else:\n line = line.split('\\t')\n if linenr == 0:\n column = 0\n control = control.split(',')\n test = test.split(',')\n for l in line:\n for c in control:\n if str(c.replace('[', '').replace(']', '').replace('\"', '')) == l.replace('\\n', ''):\n samples_a.append(column)\n for t in test:\n if str(t.replace('[', '').replace(']', '').replace('\"', '')) == l.replace('\\n', ''):\n samples_b.append(column)\n column += 1\n column = 0\n for l in line:\n if column in samples_a:\n ndfilea.write(line[column].replace('\\\"', '').replace('\\n', '') + '\\t')\n if column in samples_b:\n ndfileb.write(line[column].replace('\\\"', '').replace('\\n', '') + '\\t')\n column += 1\n ndfilea.write('\\n')\n ndfileb.write('\\n')\n linenr += 1\n if len(samples_a) > 1:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + ndfilea.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(ndfilea.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(ndfilea.name.split(\"/\")[-1])\n if len(samples_b) > 1:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + ndfileb.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(ndfileb.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(ndfileb.name.split(\"/\")[-1])\n ndfilea.close()\n ndfileb.close()\n call([\"rm\", ndfilea.name])\n call([\"rm\", ndfileb.name])\n else:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + tfile.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(tfile.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(tfile.name.split(\"/\")[-1])\n call([\"rm\", dfile.name])\n call([\"rm\", tfile.name])\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n # Stop process after workflow is done\n while status['running'] or status['queued'] or status['new'] or status['upload']:\n time.sleep(20)\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n if not status['running'] and not status['queued'] and not status['new'] and not status['upload']:\n for uf in uploaded_files:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"rm -r \" + uf + \"; bye\\\"\"], shell=True)\n break", "def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def cluster_files_reader(\n files_pattern, trainer_count, trainer_id, loader=pickle.load\n):\n\n def reader():\n if not callable(loader):\n raise TypeError(\"loader should be callable.\")\n file_list = glob.glob(files_pattern)\n file_list.sort()\n my_file_list = []\n for idx, fn in enumerate(file_list):\n if idx % trainer_count == trainer_id:\n print(\"append file: %s\" % fn)\n my_file_list.append(fn)\n for fn in my_file_list:\n with open(fn, \"r\") as f:\n lines = loader(f)\n for line in lines:\n yield line\n\n return reader", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList" ]
[ "0.6580419", "0.6510424", "0.6231249", "0.6147335", "0.61276037", "0.60895264", "0.60482085", "0.6024064", "0.59860575", "0.5985176", "0.59534013", "0.58648974", "0.58312774", "0.582473", "0.5797607", "0.57878214", "0.5783304", "0.5764239", "0.5764155", "0.57330567", "0.57255614", "0.5718252", "0.5709521", "0.57032084", "0.5697962", "0.56756985", "0.566487", "0.56636274", "0.5657628", "0.5656732", "0.56532615", "0.56341976", "0.56291026", "0.56227785", "0.56201327", "0.5619428", "0.5598465", "0.55940336", "0.5582068", "0.55791914", "0.5574769", "0.5560993", "0.55513716", "0.5541963", "0.5538633", "0.553857", "0.55278987", "0.55218", "0.5521479", "0.55084246", "0.55032897", "0.55017984", "0.5499934", "0.54984534", "0.549777", "0.5489078", "0.5488862", "0.5488862", "0.54859793", "0.5485081", "0.5483671", "0.5461065", "0.5458162", "0.5456077", "0.5454121", "0.545071", "0.5444137", "0.5443259", "0.5426801", "0.5419762", "0.5416223", "0.5400342", "0.53853863", "0.5385211", "0.5380632", "0.5379968", "0.5376369", "0.53738487", "0.53737116", "0.5361425", "0.53608066", "0.53544563", "0.53528625", "0.53528196", "0.53511333", "0.5350633", "0.535027", "0.5348393", "0.53436047", "0.53356653", "0.5332329", "0.53315866", "0.53304213", "0.532574", "0.53245664", "0.532283", "0.53178877", "0.53165895", "0.53160435", "0.5316001" ]
0.6638289
0
Extract constant names from sybdb.h to use as python constants
def extract_constants(freetds_include="sybdb.h", constants_file="bcp_constants.py"): fileno, source_file = mkstemp(suffix=".c", text=True) write(fileno, "#include <{}>".format(freetds_include).encode()) close(fileno) fileno, include_directives = mkstemp(suffix=".txt") close(fileno) if ON_WINDOWS: cmd_template = "cl /E {includes} {source} > {output}" else: cmd_template = "cpp {includes} '{source}' > '{output}'" cmd = cmd_template.format( output=normpath(include_directives), source=normpath(source_file), includes=" ".join( "-I{}".format(normpath(_include)) for _include in include_dirs ) ) fifo = Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) fifo.communicate() fifo.wait() remove(source_file) if fifo.returncode < 0: raise Exception("Cannot run preprocessor step") row_regex = re.compile('[\r\n]+') field_regex = re.compile('[\s]+') with open(include_directives, "r") as fd: include_paths = list( _filename for contents in [fd.read()] for _row in row_regex.split(contents) if _row.find(freetds_include) > -1 for _index, _word in enumerate(field_regex.split(_row)) if _index == 2 for _filename in [_word.strip('"')] if exists(_filename) ) remove(include_directives) for include_file in include_paths: with open(include_file, "r") as fd: definition_pairs = [ (_values[1], int(_values[2])) for contents in [fd.read()] for _row in row_regex.split(contents) for _values in [field_regex.split(_row)] if len(_values) == 3 and _values[0] == "#define" and _values[2].isdigit() ] if len(definition_pairs): with open(constants_file, "w") as output_fd: output_fd.write("\n".join("%s=%d" % _row for _row in definition_pairs)) break else: raise Exception("Couldn't find a freetds include file")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_declarations(self):\n return \"extern const unsigned int %s;\\n\" % self.name", "def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \\\n r\"\\hat{E}\", r\"g_1\"", "def _parseKeyNames(lib):\n _keyNames = {}\n for attr in dir(lib): # from the modules variables\n if attr[:6] == 'TCODK_': # get the K_* constants\n _keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs\n return _keyNames", "def get_definitions(self):\n return \"const unsigned int %s = 0x%xu;\\n\" % (self.name, self.address)", "def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts", "def get_predefined_constant_names_latex():\n return \"t_0/t_g\", \"t_g\", r\"\\dot{\\varepsilon}\", \\\n \"E_1\", \"E_3\", r\"\\nu_{21}\", r\"\\nu_{31}\"", "def get_defined_constants():\n raise NotImplementedError()", "def consts(consts):\n\n namespace = { }\n\n for c in consts:\n constname = c[\"constname\"]\n consttype = c[\"consttype\"]\n constval = c[\"constval\"]\n\n # Correct various values that won't evaluate in python.\n if constval == \"( SteamItemInstanceID_t ) ~ 0\":\n constval = \"-1\"\n elif constval == \"( ( uint32 ) 'd' << 16U ) | ( ( uint32 ) 'e' << 8U ) | ( uint32 ) 'v'\":\n constval = \"6579574\"\n else:\n constval = re.sub(r\"(0x[0-9a-fA-F]*)ull\", r\"\\1\", constval)\n\n # Evaluate the result, and place it into the namespace.\n value = eval(constval, namespace, namespace)\n namespace[constname] = value\n\n # Generate.\n mapped = map_type(consttype)\n\n if value > 0:\n p(f\"{constname} = {mapped}(0x{value:x})\")\n else:\n p(f\"{constname} = {mapped}({value})\")", "def constants(self):\n return self._constants", "def get_constants(prefix):\n return {getattr(socket, name): name \n for name in dir(socket) if name.startswith(prefix)}", "def parse_defines(self):\n for line in self.header.splitlines():\n if line.lower().startswith(\"#define\"):\n _, line = line.strip().split(None, 1) # remove #define\n if \" \" in line:\n symbol, value = line.split(None, 1)\n if value.isdigit():\n value = int(value)\n elif value.startswith(\"0x\"):\n value = int(value, 16)\n elif value in self.types:\n self.types[symbol] = self.types[value]\n else:\n symbol = line\n value = \"\"\n self.constants[symbol] = value\n return self.constants", "def get_constants(prefix):\n return {\n getattr(socket, n): n\n for n in dir(socket)\n if n.startswith(prefix)\n }", "def get_constants(prefix):\n return dict( (getattr(socket, n), n)\n for n in dir(socket)\n if n.startswith(prefix)\n )", "def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s", "def package_macros(self):\n from re import sub\n NAME = sub(r'[\\.\\-\\s]', '_', self.name.upper())\n return [('HAVE_' + NAME, '1')]", "def compose_defines():\n return \"\"\"\nLIBPBDATA_INC ?=../pbdata\nLIBPBIHDF_INC ?=../hdf\nLIBBLASR_INC ?=../alignment\nLIBPBDATA_LIB ?=%(thisdir)s/pbdata/\nLIBPBIHDF_LIB ?=%(thisdir)s/hdf/\nLIBBLASR_LIB ?=%(thisdir)s/alignment/\nnohdf ?=1\n\"\"\"%(dict(thisdir=thisdir))", "def include_constants_pi():\n return f\"\"\"\n#define PI_F 3.14159274101257f\n#define PI_2_F 1.57079637050629f\n#define PI_4_F 0.78539818525314f\n\"\"\"", "def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]", "def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp", "def constants(self):\n return self.bot.constants", "def getCDefinesAsString( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s' % fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C = ''\n raw_CPP = ''\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C = re.compile( '^C_DEFINES\\s=\\s+(.*)$' )\n regexp_CPP = re.compile( '^CXX_DEFINES\\s=\\s+(.*)$' )\n regexp_C_CFLAGS = re.compile( '^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( '^CXX_FLAGS\\s=\\s+(.*)$' )\n result = ''\n\n for line in content:\n tmp = regexp_C.search( line )\n\n if tmp:\n raw_C = tmp.group( 1 )\n # logging.debug( 'raw C defines: %s' % raw_C )\n\n tmp = regexp_CPP.search( line )\n\n if tmp:\n raw_CPP = tmp.group( 1 )\n # logging.debug( 'raw CPP defines: %s' % raw_CPP )\n\n tmp = regexp_C_CFLAGS.search(line)\n\n if tmp:\n raw_C_CFLAGS = tmp.group(1)\n\n tmp = regexp_CPP_CFLAGS.search(line)\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group(1)\n\n candidates = ( shlex.split( raw_C ) +\n shlex.split( raw_CPP ) +\n shlex.split( raw_C_CFLAGS ) +\n shlex.split( raw_CPP_CFLAGS ) )\n\n for candidate in candidates:\n if candidate.startswith( '-D' ):\n result += candidate + ' '\n\n return result", "def load_constants():\r\n marker_dictionary = dict()\r\n marker_dictionary[\"SP\"] = SP\r\n marker_dictionary[\"LCL\"] = LCL\r\n marker_dictionary[\"ARG\"] = ARG\r\n marker_dictionary[\"THIS\"] = THIS\r\n marker_dictionary[\"THAT\"] = THAT\r\n marker_dictionary[\"SCREEN\"] = SCREEN\r\n marker_dictionary[\"KBD\"] = KBD\r\n for i in range(0, RAM_RESERVE_END):\r\n marker_dictionary[\"R\"+str(i)] = i\r\n return marker_dictionary", "def constants(self):\n return self._constants", "def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12", "def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec", "def GetDefineGuardSymbol(file_name):\n return os.path.basename(file_name).upper().replace('.', '_')", "def gyp_defines():\n return dict(arg.split('=', 1)\n for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))", "def normalize_const(var_name):\n return var_name.lower().split('_')", "def cblas_header_text():\r\n\r\n return \"\"\"\r\n //#include <stddef.h>\r\n\r\n #undef __BEGIN_DECLS\r\n #undef __END_DECLS\r\n #ifdef __cplusplus\r\n #define __BEGIN_DECLS extern \"C\" {\r\n #define __END_DECLS }\r\n #else\r\n #define __BEGIN_DECLS /* empty */\r\n #define __END_DECLS /* empty */\r\n #endif\r\n\r\n __BEGIN_DECLS\r\n\r\n #define MOD %\r\n\r\n /*\r\n * Enumerated and derived types\r\n */\r\n #define CBLAS_INDEX size_t /* this may vary between platforms */\r\n\r\n enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};\r\n enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};\r\n enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};\r\n enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};\r\n enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};\r\n\r\n float cblas_sdsdot(const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY);\r\n double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,\r\n const int incY);\r\n float cblas_sdot(const int N, const float *X, const int incX,\r\n const float *Y, const int incY);\r\n double cblas_ddot(const int N, const double *X, const int incX,\r\n const double *Y, const int incY);\r\n\r\n /*\r\n * Functions having prefixes Z and C only\r\n */\r\n void cblas_cdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_cdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n void cblas_zdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_zdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n\r\n /*\r\n * Functions having prefixes S D SC DZ\r\n */\r\n float cblas_snrm2(const int N, const float *X, const int incX);\r\n float cblas_sasum(const int N, const float *X, const int incX);\r\n\r\n double cblas_dnrm2(const int N, const double *X, const int incX);\r\n double cblas_dasum(const int N, const double *X, const int incX);\r\n\r\n float cblas_scnrm2(const int N, const void *X, const int incX);\r\n float cblas_scasum(const int N, const void *X, const int incX);\r\n\r\n double cblas_dznrm2(const int N, const void *X, const int incX);\r\n double cblas_dzasum(const int N, const void *X, const int incX);\r\n\r\n\r\n /*\r\n * Functions having standard 4 prefixes (S D C Z)\r\n */\r\n CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);\r\n CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);\r\n CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);\r\n CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 1 BLAS routines\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (s, d, c, z)\r\n */\r\n void cblas_sswap(const int N, float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_scopy(const int N, const float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_saxpy(const int N, const float alpha, const float *X,\r\n const int incX, float *Y, const int incY);\r\n\r\n void cblas_dswap(const int N, double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_dcopy(const int N, const double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_daxpy(const int N, const double alpha, const double *X,\r\n const int incX, double *Y, const int incY);\r\n\r\n void cblas_cswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_ccopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_caxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n void cblas_zswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zcopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zaxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n\r\n /* \r\n * Routines with S and D prefix only\r\n */\r\n void cblas_srotg(float *a, float *b, float *c, float *s);\r\n void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);\r\n void cblas_srot(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float c, const float s);\r\n void cblas_srotm(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float *P);\r\n\r\n void cblas_drotg(double *a, double *b, double *c, double *s);\r\n void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);\r\n void cblas_drot(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double c, const double s);\r\n void cblas_drotm(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double *P);\r\n\r\n\r\n /* \r\n * Routines with S D C Z CS and ZD prefixes\r\n */\r\n void cblas_sscal(const int N, const float alpha, float *X, const int incX);\r\n void cblas_dscal(const int N, const double alpha, double *X, const int incX);\r\n void cblas_cscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_zscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_csscal(const int N, const float alpha, void *X, const int incX);\r\n void cblas_zdscal(const int N, const double alpha, void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 2 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *X, const int incX, const float beta,\r\n float *Y, const int incY);\r\n void cblas_sgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const float alpha,\r\n const float *A, const int lda, const float *X,\r\n const int incX, const float beta, float *Y, const int incY);\r\n void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, float *X,\r\n const int incX);\r\n void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda,\r\n float *X, const int incX);\r\n void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n\r\n void cblas_dgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *X, const int incX, const double beta,\r\n double *Y, const int incY);\r\n void cblas_dgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const double alpha,\r\n const double *A, const int lda, const double *X,\r\n const int incX, const double beta, double *Y, const int incY);\r\n void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, double *X,\r\n const int incX);\r\n void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda,\r\n double *X, const int incX);\r\n void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n\r\n void cblas_cgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_cgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n void cblas_zgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_zgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n\r\n /* \r\n * Routines with S and D prefixes only\r\n */\r\n void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *Ap,\r\n const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const float alpha, const float *X, const int incX,\r\n const float *Y, const int incY, float *A, const int lda);\r\n void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *A, const int lda);\r\n void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *Ap);\r\n void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A,\r\n const int lda);\r\n void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A);\r\n\r\n void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *Ap,\r\n const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const double alpha, const double *X, const int incX,\r\n const double *Y, const int incY, double *A, const int lda);\r\n void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *A, const int lda);\r\n void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *Ap);\r\n void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A,\r\n const int lda);\r\n void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A);\r\n\r\n\r\n /* \r\n * Routines with C and Z prefixes only\r\n */\r\n void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 3 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const float alpha, const float *A,\r\n const int lda, const float *B, const int ldb,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n\r\n void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const double alpha, const double *A,\r\n const int lda, const double *B, const int ldb,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n\r\n void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n\r\n /* \r\n * Routines with prefixes C and Z only\r\n */\r\n void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const void *A, const int lda,\r\n const float beta, void *C, const int ldc);\r\n void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const float beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const void *A, const int lda,\r\n const double beta, void *C, const int ldc);\r\n void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const double beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_xerbla(int p, const char *rout, const char *form, ...);\r\n\r\n __END_DECLS\r\n \"\"\"", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def getcodedesc(code_):\n symname_ = (ctypes.c_char * value.max_str_len)()\n str_ = (ctypes.c_char * value.max_str_len)()\n res = __library__.MSK_XX_getcodedesc(code_,symname_,str_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n _symname_retval = symname_.value.decode(\"utf-8\",errors=\"replace\")\n _str_retval = str_.value.decode(\"utf-8\",errors=\"replace\")\n return (_symname_retval,_str_retval)", "def getCDefinesAsList( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n result = []\n regexp = re.compile( '-D\\s*(.*)' )\n\n for token in getCDefinesAsString( targetPlatform, targetName ).split():\n\n if token.startswith( '-D' ):\n tmp = regexp.search( token )\n item = (tmp.group(1)).strip()\n result.append( item )\n\n return frozenset(result)", "def extract_constants(func):\n const_dict = {}\n params = len(func.params)\n new_func, consts = ExtractConstants().extract_constants(func)\n for i, const in enumerate(consts):\n const_dict[params + i] = const\n\n new_func = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(new_func))[\"main\"]\n return new_func, const_dict", "def get_constant(name, libgmt):\n c_get_enum = libgmt.GMT_Get_Enum\n c_get_enum.argtypes = [ctypes.c_char_p]\n c_get_enum.restype = ctypes.c_int\n\n value = c_get_enum(name.encode())\n\n if value is None or value == -99999:\n raise GMTCLibError(\n \"Constant '{}' doesn't exits in libgmt.\".format(name))\n\n return value", "def t_CCONST(t):\n return t", "def get_addon_consts(self) -> Dict[str, str]:\n from azure.cli.command_modules.acs._consts import (\n ADDONS, CONST_ACC_SGX_QUOTE_HELPER_ENABLED,\n CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,\n CONST_AZURE_POLICY_ADDON_NAME, CONST_CONFCOM_ADDON_NAME,\n CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,\n CONST_INGRESS_APPGW_ADDON_NAME,\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,\n CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,\n CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID,\n CONST_INGRESS_APPGW_WATCH_NAMESPACE,\n CONST_KUBE_DASHBOARD_ADDON_NAME, CONST_MONITORING_ADDON_NAME,\n CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,\n CONST_MONITORING_USING_AAD_MSI_AUTH,\n CONST_OPEN_SERVICE_MESH_ADDON_NAME, CONST_ROTATION_POLL_INTERVAL,\n CONST_SECRET_ROTATION_ENABLED, CONST_VIRTUAL_NODE_ADDON_NAME,\n CONST_VIRTUAL_NODE_SUBNET_NAME)\n\n addon_consts = {}\n addon_consts[\"ADDONS\"] = ADDONS\n addon_consts[\n \"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\"\n ] = CONST_ACC_SGX_QUOTE_HELPER_ENABLED\n addon_consts[\n \"CONST_AZURE_POLICY_ADDON_NAME\"\n ] = CONST_AZURE_POLICY_ADDON_NAME\n addon_consts[\"CONST_CONFCOM_ADDON_NAME\"] = CONST_CONFCOM_ADDON_NAME\n addon_consts[\n \"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\"\n ] = CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME\n addon_consts[\n \"CONST_INGRESS_APPGW_ADDON_NAME\"\n ] = CONST_INGRESS_APPGW_ADDON_NAME\n addon_consts[\n \"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\"\n ] = CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID\n addon_consts[\n \"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME\"\n ] = CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME\n addon_consts[\n \"CONST_INGRESS_APPGW_SUBNET_CIDR\"\n ] = CONST_INGRESS_APPGW_SUBNET_CIDR\n addon_consts[\n \"CONST_INGRESS_APPGW_SUBNET_ID\"\n ] = CONST_INGRESS_APPGW_SUBNET_ID\n addon_consts[\n \"CONST_INGRESS_APPGW_WATCH_NAMESPACE\"\n ] = CONST_INGRESS_APPGW_WATCH_NAMESPACE\n addon_consts[\n \"CONST_KUBE_DASHBOARD_ADDON_NAME\"\n ] = CONST_KUBE_DASHBOARD_ADDON_NAME\n addon_consts[\n \"CONST_MONITORING_ADDON_NAME\"\n ] = CONST_MONITORING_ADDON_NAME\n addon_consts[\n \"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID\"\n ] = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID\n addon_consts[\n \"CONST_OPEN_SERVICE_MESH_ADDON_NAME\"\n ] = CONST_OPEN_SERVICE_MESH_ADDON_NAME\n addon_consts[\n \"CONST_VIRTUAL_NODE_ADDON_NAME\"\n ] = CONST_VIRTUAL_NODE_ADDON_NAME\n addon_consts[\n \"CONST_VIRTUAL_NODE_SUBNET_NAME\"\n ] = CONST_VIRTUAL_NODE_SUBNET_NAME\n addon_consts[\n \"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\"\n ] = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME\n addon_consts[\n \"CONST_SECRET_ROTATION_ENABLED\"\n ] = CONST_SECRET_ROTATION_ENABLED\n addon_consts[\n \"CONST_ROTATION_POLL_INTERVAL\"\n ] = CONST_ROTATION_POLL_INTERVAL\n\n addon_consts[\n \"CONST_MONITORING_USING_AAD_MSI_AUTH\"\n ] = CONST_MONITORING_USING_AAD_MSI_AUTH\n return addon_consts", "def get_all_constants():\n return filter(\n lambda key: key.upper() == key and type(globals()[key]) in _ALLOWED,\n\n filter( # filter _PRIVATE variables\n lambda x: not x.startswith(\"_\"),\n globals()\n )\n )", "def get_checked_define_files(self):\n return (self._files['src/config.h'],\n self._files['src/gromacs/simd/simd.h'],\n self._files['src/gromacs/ewald/pme_simd.h'],\n self._files['src/gromacs/nbnxm/nbnxm_simd.h'])", "def get_symbols(self):\n symbols = os.environ.get('SYMBOLS', 'btc,eth')\n if not symbols:\n return 'btc,eth'\n return symbols", "def r_consts(size, used=None):\n return r_symbols(size, CONST_SYMBOLS, ARGS.constant_length, used)", "def get_strings():\n import re\n import pathlib\n\n # the absolute path to the root directory\n rootdir = pathlib.Path(__file__).resolve().parent\n\n # read README.md and overwrite readme\n with open(rootdir.joinpath(\"README.md\"), 'r') as f:\n readme = f.read()\n\n # read __init__.py\n with open(rootdir.joinpath(\"yasynccli\", \"__init__.py\"), 'r') as f:\n content = f.read()\n\n # version\n version = re.search(\"__version__\\s*?=\\s*?(?P<version>\\S+?)$\", content, re.MULTILINE)\n version = version.group(\"version\").strip(\"\\\"\\'\")\n\n # desc\n desc = re.search(\"^\\\"\\\"\\\"(?P<desc>\\S.*?)$\", content, re.MULTILINE)\n desc = desc.group(\"desc\")\n\n return version, desc, readme", "def _get_wgpu_header(*filenames):\n # Read files\n lines1 = []\n for filename in filenames:\n with open(filename) as f:\n lines1.extend(f.readlines())\n # Deal with pre-processor commands, because cffi cannot handle them.\n # Just removing them, plus a few extra lines, seems to do the trick.\n lines2 = []\n for line in lines1:\n if line.startswith(\"#\"):\n continue\n elif 'extern \"C\"' in line:\n continue\n line = line.replace(\"WGPU_EXPORT \", \"\")\n lines2.append(line)\n return \"\".join(lines2)", "def print_const(consts):\n for const in consts:\n print ' ',\n change_color_by_tag(const)\n print '{TYPE}'.format(TYPE=const['Type']),\n print '{NAME}'.format(NAME=const['Name']),\n print '={VALUE}'.format(VALUE=const['Value'])", "def get_candidate_names():\n suffix = get_sharedlib_suffix()\n LDLIBRARY = sysconfig.get_config_var(\"LDLIBRARY\")\n if LDLIBRARY:\n yield LDLIBRARY\n\n LIBRARY = sysconfig.get_config_var(\"LIBRARY\")\n if LIBRARY:\n yield os.path.splitext(LIBRARY)[0] + suffix\n\n dlprefix = \"\" if is_windows() else \"lib\"\n sysdata = dict(\n v=sys.version_info,\n # VERSION is X.Y in Linux/macOS and XY in Windows:\n VERSION=(sysconfig.get_config_var(\"VERSION\") or\n \"{v.major}.{v.minor}\".format(v=sys.version_info)),\n ABIFLAGS=(sysconfig.get_config_var(\"ABIFLAGS\") or\n sysconfig.get_config_var(\"abiflags\") or \"\"),\n )\n\n for stem in (\n \"python{VERSION}{ABIFLAGS}\".format(**sysdata),\n \"python{VERSION}\".format(**sysdata),\n \"python{v.major}\".format(**sysdata),\n \"python\"\n ):\n yield dlprefix + stem + suffix", "def getNameCode( self ):\r\n constTuple = self.getValue()\r\n if( constTuple[4] == None ): return [ constTuple[0], None ]\r\n return [ constTuple[0], strip( constTuple[4] ) ]", "def generate_rename_tabled(self, prefix):\n return \"#define %s%s g_symbol_table.%s\" % (prefix, self.__name, self.__name)", "def getHeadersFromSWIG (filename):\n stream = open(filename)\n lines = stream.readlines()\n stream.close()\n\n lines = [line for line in lines if line.strip().startswith('%include')]\n lines = [line for line in lines if line.strip().endswith('.h')]\n return [line.replace('%include', '').strip() for line in lines]", "def GetIndexFileHeaderText(headerinfo):#{{{\n (dbname, version, ext, prefix) = headerinfo\n indexFileHeaderText = []\n indexFileHeaderText.append(\"DEF_VERSION %s\"%(version))\n indexFileHeaderText.append(\"DEF_DBNAME %s\"%(dbname))\n indexFileHeaderText.append(\"DEF_EXTENSION %s\"%(ext))\n indexFileHeaderText.append(\"DEF_PREFIX %s\"%(prefix))\n return indexFileHeaderText", "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def save_all_consts_to_memory():\n for key, value in symbol_table.dict.items():\n if value[2] is not None:\n save_to_memory(\"a\", \"b\", value[5], value[2])\n # assembler_generator.add_comment(\"# consts saved to memory\")", "def parseNames(self, compiledCode):\n res = []\n if not compiledCode is None:\n res = compiledCode.co_names\n for co in compiledCode.co_consts:\n if not co is None and isinstance(co, CodeType):\n res += co.co_names\n return res", "def get_schema_defs():\n return SCHEMA_DEFS", "def GetHeaderGuardCPPVariable(fn, filename):\n var_parts = list()\n head = filename\n while head:\n head, tail = os.path.split(head)\n var_parts.insert(0, tail)\n if head.endswith('include') or os.path.exists(os.path.join(head, \"package.xml\")) or tail == \"\":\n break\n return re.sub(r'[-./\\s]', '_', \"_\".join(var_parts)).upper()", "def test_cime_constants(e3sm_tag='master'):\n\n resp = requests.get(\n f'https://raw.githubusercontent.com/E3SM-Project/E3SM/{e3sm_tag}/'\n f'share/util/shr_const_mod.F90')\n\n text = resp.text\n\n text = text.split('\\n')\n\n found = {}\n for constant in constants:\n found[constant] = False\n\n for line in text:\n constant, value = _parse_value(line)\n if constant is None:\n continue\n print(f'line: {line}')\n print(f'parsed: {constant} = {value}')\n if constant in constants:\n if isinstance(value, float):\n print('verifying {}'.format(constant))\n assert value == constants[constant]\n else:\n print('skipping verification for {}'.format(constant))\n\n found[constant] = True\n else:\n print('not in constants')\n\n print('')\n\n all_found = True\n for constant in found:\n if not found[constant]:\n print('{} was not found!'.format(constant))\n all_found = False\n\n assert all_found", "def gen_get_const_table(cls, names, p, const_p):\n s = \"// Store constant table for {p} to {const_p}\\n\".format(\n const_p = const_p, p = p\n ) \n s += \"{c} = {t}[(((({p}) + 1) * {mul}) >> 8) & 7];\\n\".format(\n c = const_p, p = p, t = names[cls.T_NAME],\n mul = cls.deBruijnMult\n )\n return s", "def update_protocol(filename='../server/Protocol.h'):\n f = open(filename, 'r')\n for line in f.readlines():\n l = list(filter(None, line.split(' ')))\n if l[0] == '#define' and len(l) == 3:\n print(l[1] + '=' + l[2][:-1])", "def get_macros(self):\n return LEVELS.keys()", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def get_all_d_module_info():\n a_local_var = 'this is local variable'\n zzz = 5", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def _parse_col_constants() -> Dict[str, List[str]]:\n\n col_type_map = {\n k: [] for k in TEST_ARGUMENT_DATA_TYPES.keys()\n }\n return col_type_map", "def get_glib_cflags():\n\tpkgcmd = os.popen(pkg_config_path +\n ' pkg-config --cflags glib-2.0', 'r')\n pkgcmd_text = pkgcmd.read()\n pkgcmd.close()\n\tincludes = pkgcmd_text.split()\n\tfor x in range(len(includes)):\n\t\tincludes[x] = includes[x][2:]\n\t\t\n\treturn includes", "def getMibSymbol(self):\n if self.__state & self.stClean:\n return self.__modName, self.__symName, self.__indices\n else:\n raise SmiError('%s object not fully initialized' % self.__class__.__name__)", "def generateColumnConsts(data):\n\n model = data[\"model\"]\n global c_col\n global col_prefix\n global col_count\n col_prefix = \"COL_%s\" % model[\"name\"].upper()\n col_count = 0\n for field in data[\"fields\"]:\n if not field.has_key(\"head\"):\n continue\n try:\n c_col.append(\"const int %s_%s = %d;\" % (col_prefix, field[\"name\"].upper(), col_count))\n col_count += 1\n except KeyError:\n pass\n c_col.append(\"const int %s_LAST = %d;\" % (col_prefix, col_count-1))\n c_col.append(\"\\n\")", "def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()", "def get_constants():\r\n\r\n dict_constants = dict()\r\n dict_constants['LOGIN_URL'] = \"https://yocket.in/account/login\"\r\n dict_constants['PAST_RESULTS_URL'] = \"https://yocket.in/recent-admits-rejects?page=\"\r\n dict_constants['ALL_RESULTS_URL'] = \"https://yocket.in/profiles/find/matching-admits-and-rejects?page=\"\r\n dict_constants['HOME_PAGE'] = 'https://yocket.in/'\r\n dict_constants['NUMBER_PAGE_TO_SCRAPE_FIRST'] = 1\r\n dict_constants['NUMBER_PAGE_TO_SCRAPE_LAST'] = 2\r\n dict_constants['MINIMUM_GPA'] = 7.5\r\n dict_constants['MINIMUM_GRE'] = 320\r\n dict_constants['MINIMUM_TOEFL'] = 100\r\n\r\n return dict_constants", "def _codec_names():\n import glob\n import os.path\n\n package_folder = os.path.dirname(__file__)\n for codec_path in glob.glob(os.path.join(package_folder, \"cp*.py\")):\n codec_name = os.path.splitext(os.path.basename(codec_path))[0]\n yield codec_name", "def GetDefineGuardHeaderLines(output_file_name):\n result = []\n result.append(\n '#ifndef %s_PREDICTION_%s_' %(_MOZC_DIR_FOR_DEFINE_GUARD,\n GetDefineGuardSymbol(output_file_name)))\n result.append(\n '#define %s_PREDICTION_%s_' %(_MOZC_DIR_FOR_DEFINE_GUARD,\n GetDefineGuardSymbol(output_file_name)))\n return result", "def get_c_declare(r, name, sub):\r\n pre = \"\"\"\r\n PyObject* py_%(name)s;\r\n \"\"\" % locals()\r\n return pre + r.type.c_declare(name, sub)", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def name_python_binary_module(self) -> str:\n return f'_ba{self.name_compact}'", "def find_constants_referenced(self, text: str) -> list[str]:\n aList = sorted(set(re.findall(r\"@[A-Za-z_][-A-Za-z0-9_]*\", text)))\n # Exempt references to Leo constructs.\n for s in ('@button', '@constants', '@data', '@language'):\n if s in aList:\n aList.remove(s)\n return aList", "def _get_const_str(self):\n const_components = []\n for k, v in self.constargs.items():\n v_str = f\"'{v}'\" if type(v) is str else str(v)\n const_components.append(f\"{k}={v_str}\")\n return \",\".join(const_components)", "def __getConsts(self, imt):\n\n if 'PGA' in imt:\n c = self.__constants['pga']\n c2 = self.__constants2['pga']\n elif 'PGV' in imt:\n c = self.__constants['pgv']\n c2 = self.__constants2['pgv']\n elif 'SA' in imt:\n pp = imt.period\n if pp == 0.3:\n c = self.__constants['psa03']\n c2 = self.__constants2['psa03']\n elif pp == 1.0:\n c = self.__constants['psa10']\n c2 = self.__constants2['psa10']\n elif pp == 3.0:\n c = self.__constants['psa30']\n c2 = self.__constants2['psa30']\n else:\n raise ValueError(\"Unknown SA period: %f\" % pp)\n else:\n raise ValueError(\"Unknown IMT %r\" % imt)\n return (c, c2)", "def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path.join(get_hopper_script_dir(), basename + \".txt\")\n if not os.path.exists(filename):\n doc.log(\"Symbol file not found: %s\" % filename)\n return None\n\n symbols = {}\n with open(filename, \"r\") as fp:\n for i, line in enumerate(fp, 1):\n match = symbol_line.match(line)\n if not match:\n doc.log(\"Skipping line %d: Malformed\" % i)\n continue\n\n ordinal, name = match.group(1), match.group(2)\n if ordinal and name:\n symbols[ordinal] = name\n\n return symbols", "def getBuilderNames():", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def ignore_pyc(root,names):\n return [name for name in names if name.endswith('pyc')]", "def render_const(var_words):\n return '_'.join(var_words).upper()", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def consts(t):\n for elt in t:\n r = repr(elt)\n if r.startswith(\"<code object\"):\n yield \"<code object %s>\" % elt.co_name\n else:\n yield r", "def load_constants():\n with open(VARIABLES_DIR / \"constants.yaml\", \"r\", encoding=\"utf-8\") as stream:\n constants = yaml.safe_load(stream)\n\n return constants", "def const(string):\n\n\tconstparams = dict({\n\t\t\t\t\t'kb' : 1.3806e-23,\t# Boltzmann's constant\n\t\t\t\t\t'hbar' : 1.054e-34,\t# Planck's constant\n\t\t\t\t\t'topeta' : 1e15,\t# To peta-\n\t\t\t\t\t'totera' : 1e12,\t# To tera-\n\t\t\t\t\t'togiga' : 1e9,\t# To giga-\n\t\t\t\t\t'tomega' : 1e6,\t# To mega-\n\t\t\t\t\t'tokilo' : 1e3,\t# To kilo-\n\t\t\t\t\t'tocenti' : 1e-2,\t# To centi-\n\t\t\t\t\t'tomilli' : 1e-3,\t# To milli-\n\t\t\t\t\t'tomicro' : 1e-6,\t# To micro-\n\t\t\t\t\t'tonano' : 1e-9,\t# To nano-\n\t\t\t\t\t'topico' : 1e-12,\t# To pico-\n\t\t\t\t\t'tofemto' : 1e-15,\t# To femto-\n\t\t\t\t\t})\n\n\ttry:\n\t\treturn constparams[string]\n\texcept KeyError, e:\n\t\tprint \"KeyError: %s is not a valid key for ntpy.param.const().\" % e\n\t\traise", "def get_test_modules_names() -> typing.List[str]:\n\n from services.meter.tests.unit import constants_for_tests\n return constants_for_tests.TESTS_MODULES", "def _getConsts(self, imt):\r\n\r\n if (imt != self._pga and imt != self._pgv and imt != self._sa03 and\r\n imt != self._sa10 and imt != self._sa30):\r\n raise ValueError(\"Invalid IMT \" + str(imt))\r\n c = self._constants[imt]\r\n return (c)", "def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import safe_str_convert\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"from core.utils import lt\\n\"\n boilerplate += indent_str + \"from core.utils import lte\\n\"\n boilerplate += indent_str + \"from core.utils import gt\\n\"\n boilerplate += indent_str + \"from core.utils import gte\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.python_jit import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def bpix_kw(bpixtab):\n print('Verifying the header keywords of UVIS bad pixel table {}...'.format(bpixtab))\n print('USEAFTER:')\n print(fits.getheader(bpixtab)['USEAFTER'])\n print(' ')\n print('PEDIGREE:')\n print(fits.getheader(bpixtab)['PEDIGREE'])\n print(' ')\n print('DESCRIP:')\n print(fits.getheader(bpixtab)['DESCRIP'])\n print(' ')\n print('COMMENT:')\n print(fits.getheader(bpixtab)['COMMENT'])\n print(' ')\n print('HISTORY:')\n print(fits.getheader(bpixtab)['HISTORY'])", "def _dump_prefix(guard: str) -> List[str]:\n\n return [\n f\"#ifndef {guard}\",\n f\"#define {guard}\",\n \"// <<< Use Configuration Wizard in Context Menu >>>\",\n \"#ifdef USE_APP_CONFIG\",\n '#include \"app_config.h\"',\n \"#endif\"\n ]", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def _sym_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.sym]\n except KeyError:\n return str(self.sym)", "def get_macro(self, name: str) -> str:\n data = struct.pack('<HH{}s'.format(len(name)), 0, len(name), name.encode())\n return self.__intf(2, data)[2:].decode()", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def getOsiVarNames( self ):\n\n if self.osiVarNames:\n return self.osiVarNames.keys()\n \n n = self.adb.get( \"nOsiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"osiVarName\",\n indx ) \n self.osiVarNames[ name ]= indx\n\n return self.osiVarNames.keys()", "def test_created():\n assert len(dir(constants)) > 300\n assert hasattr(constants, \"Planck_constant\") == True", "def getInfoVariableNames(self, product):\r\n return []", "def GetHeaderName(name):\n name = os.path.splitext(name)[0] + '.h'\n name = name.replace(os.sep, '/')\n return 'ppapi/c/' + name" ]
[ "0.63279045", "0.6142163", "0.594471", "0.5942139", "0.59368765", "0.59215355", "0.590987", "0.58534914", "0.57482266", "0.57082814", "0.5656697", "0.56237125", "0.55894226", "0.55852455", "0.5570691", "0.5560409", "0.5546228", "0.5487835", "0.5454318", "0.5445472", "0.5438231", "0.5431478", "0.54168934", "0.539878", "0.5372401", "0.5366455", "0.5365839", "0.5358318", "0.5319855", "0.530538", "0.5301711", "0.52766186", "0.52487016", "0.52296436", "0.5225606", "0.5224576", "0.5182079", "0.5160733", "0.5130271", "0.5127967", "0.5116287", "0.5087695", "0.5085727", "0.5082717", "0.5079234", "0.5066526", "0.5041545", "0.50304586", "0.50289917", "0.50251234", "0.50150204", "0.50065017", "0.5005424", "0.5003382", "0.49936035", "0.4991045", "0.4931618", "0.49266768", "0.4922245", "0.4903186", "0.49020082", "0.4898961", "0.4884125", "0.4882187", "0.487816", "0.4874059", "0.48736688", "0.48620388", "0.4846791", "0.4840434", "0.48399815", "0.4825771", "0.48202333", "0.48089328", "0.4802165", "0.4791801", "0.47898534", "0.47898534", "0.47898534", "0.47798878", "0.47767928", "0.47744137", "0.47729236", "0.47700757", "0.47564578", "0.47552866", "0.47519797", "0.47365576", "0.473376", "0.47212726", "0.47191662", "0.47161847", "0.471578", "0.47147277", "0.47088978", "0.4704482", "0.47038147", "0.4697082", "0.46929896", "0.46854532" ]
0.6312631
1
Return holdout indices respecting hte temporal ordering of the data
def time_series_hold_out_validation(random_state: np.random.RandomState, val_share: float, indices: np.ndarray, **kwargs: Any) \ -> Tuple[np.ndarray, np.ndarray]: n_prediction_steps = kwargs['n_prediction_steps'] n_repeats = kwargs['n_repeats'] # Time Series prediction only requires on set of prediction for each # This implement needs to be combined with time series forecasting dataloader, where each time an entire # time series is used for prediction cv = TimeSeriesSplit(n_splits=2, test_size=1 + n_prediction_steps * (n_repeats - 1), gap=n_prediction_steps - 1) train, val = holdout_split_forecasting(holdout=cv, indices=indices, n_prediction_steps=n_prediction_steps, n_repeats=n_repeats) return train, val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pulling_indices(self, weight):\n pass", "def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def get_out_idx():\n exacz = pd.read_csv(f'{home}/ref/exac/exac_zscore_mimssense+stopgain_gn_checked.txt', sep='\\t')\n exacz = exacz[['gn', 'conseq', 'exac_z', 'exac_zrank']]\n\n gdi = pd.read_csv(f'{home}/work/generisk/gdi/gdi_score_pnas_gn_checked.txt', sep=\"\\t\")\n gdi = gdi[['gn', 'gdi', 'gdi_phred_raw']]\n gdi['gdi_rank'] = 100 - round(gdi['gdi'].rank() / len(gdi.index) * 100, 2)\n\n rvis = pd.read_csv(f\"{home}/ref/rvis/rvis_lite.txt\", sep='\\t')\n\n out_idx = pd.merge(exacz, gdi, on='gn', how='outer')\n out_idx = pd.merge(out_idx, rvis, on='gn', how='outer')\n\n # merge with omim\n omim = pd.read_csv(f\"{home}/ref/omim/omim_dedup.tsv\", sep='\\t', usecols='gn,inher'.split(\",\"))\n out_idx = pd.merge(out_idx, omim, on='gn', how='left')\n out_idx['inher'] = out_idx['inher'].fillna('na')\n\n # 18090\n out_idx = out_idx.loc[out_idx.conseq == 'missense_variant'].drop('conseq', axis=1)\n\n out_idx.to_pickle(f'{home}/gr/final/out_idx.pk')\n\n return out_idx", "def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for channel_id in range(channel_num):\n for row_id in range(row_num):\n for col_id in range(col_num):\n our_indices[batch_id, channel_id, row_id, col_id] = col_num * 2 * 2 * row_id + 2 * col_id\n return torch.from_numpy(our_indices)", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def get_output_slice_idx(self, output_index):\r\n ipos = 0\r\n opos = output_index\r\n for otaps in zip(self.mitmot_out_taps()):\r\n if len(otaps) > 0:\r\n return ipos\r\n else:\r\n opos = opos - 1\r\n ipos += len(otaps)\r\n return ipos + opos", "def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])", "def pt_index(*args):\n index = []\n x = check_pt_data(args[0])\n i = 0\n for line in args[0].Data.PTData.pt_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def get_cv_indices(df, col, time_slices):\n return [\n ## get train and holdout indices for slice\n tuple(get_row_indices(df, col, slc[x]) for x in range(2))\n\n ## get indices for each slice\n for slc in time_slices\n ]", "def indxmap_diff(Nd): \n\n ndims = len(Nd)\n Ndprod = numpy.prod(Nd)\n mylist = numpy.arange(0, Ndprod).astype(numpy.int32)\n mylist = numpy.reshape(mylist, Nd)\n d_indx = []\n dt_indx = []\n for pp in range(0, ndims):\n d_indx = d_indx + [ numpy.reshape( numpy.roll( mylist, +1 , pp ), (Ndprod,) ,order='C').astype(numpy.int32) ,]\n dt_indx = dt_indx + [ numpy.reshape( numpy.roll( mylist, -1 , pp ) , (Ndprod,) ,order='C').astype(numpy.int32) ,]\n\n return d_indx, dt_indx", "def _sort_index(self):\n\n allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1]\n allAziPos = np.array(sorted(list(set(list(self.data['azimuth'])))))\n\n indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos]\n\n for i, traceItem in enumerate(self.data):\n alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign']\n for j, altPos in enumerate(allAltPos):\n for k, aziPos in enumerate(allAziPos):\n if alt==altPos and azi==aziPos:\n if sign==1:\n if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!')\n else: indON[j][k]=i\n\n if sign==-1:\n if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!')\n else: indOFF[j][k]=i\n\n indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF])\n\n return indON,indOFF,allAltPos,allAziPos", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def time_window_bins(self, t0, tend):\n indexes = []\n for level in range(self._max_level):\n for i in range(2**level):\n local_times = self.partial_time_interval(level, i)\n if t0 >= local_times['t0'] and t0 < local_times['tend']:\n indexes.append(self._index_list(level, i))\n if tend > local_times['t0'] and tend <= local_times['tend']:\n indexes.append(self._index_list(level, i))\n if t0 <= local_times['t0'] and tend >= local_times['tend']:\n indexes.append(self._index_list(level, i))\n # Remove duplicates if they exist\n # indexes = list(dict.fromkeys(indexes)) # Python 3.7 or later (preserve order)\n indexes = list(set(indexes)) # Any Python version, but does not preserve order\n indexes = np.sort(indexes)\n return indexes", "def ordered_indices(self):\n return self.base_dataset.ordered_indices()", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def get_indices_input_target(num_obs, input_len, step_size, forecast_horizon, target_len):\n input_len = round(input_len) # just a precaution\n start_position = 0\n stop_position = num_obs - 1\n\n inpseq_first_idx = start_position\n inpseq_last_idx = inpseq_first_idx + input_len\n target_first_idx = inpseq_last_idx + forecast_horizon\n target_last_idx = target_first_idx + target_len\n print(\"target_last_idx = {}\".format(target_last_idx))\n print(\"stop_position = {}\".format(stop_position))\n indices = []\n while target_last_idx <= stop_position:\n indices.append((inpseq_first_idx, inpseq_last_idx, target_first_idx, target_last_idx))\n inpseq_first_idx += step_size\n inpseq_last_idx += step_size\n target_first_idx += inpseq_last_idx + forecast_horizon\n target_last_idx += target_first_idx + target_len\n return indices", "def get_data_indices(aperiodic_mode):\n\n indices = {\n 'CF' : 0,\n 'PW' : 1,\n 'BW' : 2,\n 'offset' : 0,\n 'knee' : 1 if aperiodic_mode == 'knee' else None,\n 'exponent' : 1 if aperiodic_mode == 'fixed' else 2\n }\n\n return indices", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def get_index(ks):\n unq_vals, unq_ix = np.unique(ks[:, 0], return_index=True)\n return np.vstack([unq_vals, unq_ix]).T", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def window_inds(dataset, window_sz, overlap):\r\n\tdata_len = len(dataset[0])\r\n\tassert window_sz < data_len\r\n\tind1 = 0\r\n\tind2 = window_sz-1\r\n\tind_list = []\r\n\tov_ind_diff = int(np.ceil(np.abs(overlap*window_sz)))\r\n\tif ov_ind_diff == window_sz:\r\n\t\tov_ind_diff += -1\r\n\twhile ind2 < data_len:\r\n\t\tind_list.append((ind1,ind2))\r\n\t\tind1 += window_sz-ov_ind_diff\r\n\t\tind2 += window_sz-ov_ind_diff\r\n\treturn ind_list", "def _topological_sort_timestamp_index(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)", "def _exclude_indices(self):\n idx = self._next_idx\n exclude = np.arange(idx - 1, idx + self.obs_len) % self._maxsize\n return exclude", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def get_prediction_indices(self):\r\n if self.full_df['Dates'][0] > self.full_df['Dates'][len(self.full_df) - 1]:\r\n self.full_df = self.full_df[::-1]\r\n self.full_df.reset_index(inplace=True)\r\n self.full_df.drop('index', axis=1, inplace=True)\r\n date_condition = ((self.full_df['Dates'] <= self.pred_end) &\r\n (self.full_df['Dates'] >= self.pred_start))\r\n self.pred_indices = list(self.full_df[date_condition].index)", "def step_indices(group_idx):\n ilen = step_count(group_idx) + 1\n indices = np.empty(ilen, np.int64)\n indices[0] = 0\n indices[-1] = group_idx.size\n cmp_pos = 0\n ri = 1\n for i in range(len(group_idx)):\n if group_idx[cmp_pos] != group_idx[i]:\n cmp_pos = i\n indices[ri] = i\n ri += 1\n return indices", "def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)", "def bprop_sparse_tensor_get_indices(sparse_tensor, out, dout):\n return (zeros_like(sparse_tensor),)", "def get_time_index():\n\n def get_common_list(map):\n res = set()\n first = True\n for _, value in map.items():\n if first:\n res = set(value)\n first = False\n else:\n res = res.intersection(value)\n return list(res)\n\n # get common time for 3 cities\n demand_map = {}\n for city in ['beijing', 'tianjing', 'guangzhou']:\n demand_map[city] = []\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n demand_map[city].append(file[0:12])\n\n\n # get weather time\n weather_map = {}\n for city in ['beijing', 'tianjing', 'guangzhou']:\n weather_map[city] = []\n for file in os.listdir(spider_data_path + os.sep + 'weather' + os.sep + city):\n with open(spider_data_path + os.sep + 'weather' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for line in reader:\n # t = line[0].replace(\"\\ufeff\", \"\")\n weather_map[city].append(line[0].replace(\"\\ufeff\", \"\"))\n\n demand_res = get_common_list(demand_map)\n weather_res = get_common_list(weather_map)\n res = get_common_list({'demand': demand_res, 'weather': weather_res})\n res = sorted(res)\n print(len(res))\n print(res)\n\n\n time_index = {'index': {}, 'rev_index': {}}\n index = 0\n for time in res:\n time_index['index'][index] = time\n time_index['rev_index'][time] = index\n index = index + 1\n\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, time_index)", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def _get_block_indices(y):\n block_labels, block_inv, block_sizes = np.unique(\n y, return_inverse=True, return_counts=True\n )\n\n n_blocks = len(block_labels)\n block_inds = range(n_blocks)\n\n block_vert_inds = []\n for i in block_inds:\n # get the inds from the original graph\n inds = np.where(block_inv == i)[0]\n block_vert_inds.append(inds)\n return block_vert_inds, block_inds, block_inv", "def _selected_indices(self, subset):\n # We want the DataFrame to be indexed the same way its values array is\n ftr = self.frametracks.reset_index(drop=True)\n if subset is not None:\n ftr['tmpindex'] = ftr.index.values\n ftr = ftr.set_index('particle').reindex(subset).set_index('tmpindex')\n if self.autoclip:\n # Boundaries are computed for the whole system\n xmin = self.frametracks.x.min() + self.nncutoff\n xmax = self.frametracks.x.max() - self.nncutoff\n ymin = self.frametracks.y.min() + self.nncutoff\n ymax = self.frametracks.y.max() - self.nncutoff\n r = ftr.index[ (ftr.x > xmin) & (ftr.x < xmax) & \\\n (ftr.y > ymin) & (ftr.y < ymax) ].values.astype(int)\n else:\n r = ftr.index.values.astype(int)\n if self.fast:\n return np.random.permutation(r)[:int(len(r) / 10)]\n else:\n return r", "def relevant_indexes(data, min_threshold):\n\n start_index = 1\n end_index = len(data) - 1\n\n for i in range(len(data)):\n if data[i] > min_threshold:\n start_index = i\n break\n\n for i in range(len(data)):\n if data[::-1][i] > min_threshold:\n end_index = i\n break\n\n return start_index, end_index", "def get_index_from_well(self, well):\n pass", "def sorted_index(self) -> np.ndarray:\n return np.argsort(self.result_array.sum(axis=1))[::-1]", "def break_index(self, **kwargs):\n return self.peak_indices(**kwargs)[0][-1]", "def get_itds(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_itd_indices=False):\n ears = ears.astype(np.bool)\n itds_to_return = np.zeros(timestamps.size, dtype=np.float32)\n itds_to_return.fill(-5. * max_itd)\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n for ts_right, ts_idx_right in zip(timestamps_right, timestamp_indices_right):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = ts_right - timestamps_left[matched_indices]\n min_itd = np.argmin(np.abs(matched_itds))\n itds_to_return[ts_idx_right] = matched_itds[min_itd]\n\n for ts_left, ts_idx_left in zip(timestamps_left, timestamp_indices_left):\n matched_indices = np.where((timestamps_right >= ts_left - max_itd) &\n (timestamps_right < ts_left + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = timestamps_right[matched_indices] - ts_left\n min_itd = np.argmin(np.abs(matched_itds))\n itds_to_return[ts_idx_left] = matched_itds[min_itd]\n\n itd_indices = np.where(itds_to_return > -4. * max_itd)[0]\n itds_to_return = itds_to_return[itd_indices]\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps[itd_indices], ears=ears[itd_indices], types=types[itd_indices],\n itds=itds_to_return, itd_indices=itd_indices)\n\n if return_itd_indices:\n return itds_to_return, itd_indices\n\n return itds_to_return", "def test_k_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n ki = k_index(pressure, temperature, dewpoint)\n assert_almost_equal(ki, 33.5 * units.degC, 2)", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def get_continous_time_periods(binary_array):\n binary_array = np.copy(binary_array).astype(\"int8\")\n n_times = len(binary_array)\n d_times = np.diff(binary_array)\n # show the +1 and -1 edges\n pos = np.where(d_times == 1)[0] + 1\n neg = np.where(d_times == -1)[0] + 1\n\n if (pos.size == 0) and (neg.size == 0):\n if len(np.nonzero(binary_array)[0]) > 0:\n return [(0, n_times-1)]\n else:\n return []\n elif pos.size == 0:\n # i.e., starts on an spike, then stops\n return [(0, neg[0])]\n elif neg.size == 0:\n # starts, then ends on a spike.\n return [(pos[0], n_times-1)]\n else:\n if pos[0] > neg[0]:\n # we start with a spike\n pos = np.insert(pos, 0, 0)\n if neg[-1] < pos[-1]:\n # we end with aspike\n neg = np.append(neg, n_times - 1)\n # NOTE: by this time, length(pos)==length(neg), necessarily\n h = np.matrix([pos, neg])\n # print(f\"len(h[1][0]) {len(h[1][0])} h[1][0] {h[1][0]} h.size {h.size}\")\n if np.any(h):\n result = []\n for i in np.arange(h.shape[1]):\n if h[1, i] == n_times-1:\n result.append((h[0, i], h[1, i]))\n else:\n result.append((h[0, i], h[1, i]-1))\n return result\n return []", "def get_max_indices(self, input):\n \n min_element = torch.min(torch.abs(input.contiguous().view(-1)))\n input_temp = input + min_element + self.epsilon\n masked_input_temp = input_temp * self.mask\n values, indices = torch.sort(masked_input_temp, dim=1, descending=True)\n\n return indices[:, :self.num_active_nodes,:]", "def get_adjacent_idxs(sample, array):\n state = sample >= array # boolean array\n # Find the index where the last \"True\"\n # This is the idx lower than the sample\n idx_lower = np.where(state)[0][-1]\n\n # Find the index where the last \"False\"\n # This is the first idx lower than the sample\n idx_higher = np.where(np.logical_not(state))[0][0]\n\n return idx_lower, idx_higher", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def get_track_mask_idxes(self):\n instance_id_num_pts = defaultdict(lambda: 0)\n instance_id_lifetimes = defaultdict(lambda: [10000, -1])\n\n for frame_num, labels_per_frame in enumerate(self._frame_labels):\n for id in labels_per_frame.unique().tolist():\n instance_id_num_pts[id] += (labels_per_frame == id).long().sum().item()\n instance_id_lifetimes[id][0] = min(frame_num, instance_id_lifetimes[id][0])\n instance_id_lifetimes[id][1] = max(frame_num, instance_id_lifetimes[id][1])\n\n instance_id_lifetimes = {k: v[1] - v[0] for k, v in instance_id_lifetimes.items()}\n return self._frame_labels, instance_id_num_pts, instance_id_lifetimes", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def ordered_indices(self):\r\n '''we need random order'''\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n '''\r\n if self.tgt_sizes is not None:\r\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\r\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\r\n '''\r\n return indices", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def mesh_span_indices(self):\n self._ensure_mesh()\n k2m = self._knots_to_mesh\n return np.where(k2m[1:] != k2m[:-1])[0]", "def create_jackknife_indexes(data):\n from numpy import arange, delete\n\n index_range = arange(0, len(data))\n return (delete(index_range, i) for i in index_range)", "def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks", "def get_segment_index(datadb):\n #match in time!!!!\n if cfg.use_saliency:\n segment_index_tar = util.get_time_for_visual(datadb)\n segment_index_tar_future = OrderedDict()\n for key in segment_index_tar.keys():\n segment_index_tar_future[key] = np.array(segment_index_tar[key])+max_encoder_seq_length\n return segment_index_tar,segment_index_tar_future", "def get_vacancy_indices(array):\t\n\tvacancy_indices = np.argwhere(array == 0)\n\treturn vacancy_indices", "def ordered_indices(self):\n return self.d1.ordered_indices()\n # RETURN BASED ON D1's sizes", "def get_cached_indices(self, start=None, end=None):\n params = {}\n indices = [\n y[\"sample_identifier\"]\n for y in self.mongo_database.cache.find(\n params, {\"_id\": 0, \"sample_identifier\": 1}\n )[start:end]\n ]\n return np.unique(indices).tolist()", "def computeTopSurfaceIndices(top):\n itop = np.array([(top[i,j], j, i) \\\n for i in range(top.shape[0]) \\\n for j in range(top.shape[1]) \\\n if top[i,j] >= 0])\n return itop", "def objects_to_indexes(self, object_seq: Sequence[Any]) -> np.ndarray:\n res = np.zeros(len(object_seq))\n for i, obj in enumerate(object_seq):\n if obj in self.obj_to_idx:\n res[i] = self.obj_to_idx[obj]\n else:\n res[i] = self.start-1\n return res", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes", "def selectCompatibleIndices(bigTimes, smallTimes):\r\n indices = []\r\n for idx, _ in enumerate(smallTimes):\r\n distances = (bigTimes - smallTimes[idx])**2\r\n def getValue(k):\r\n return distances[k]\r\n thisIndices = sorted(range(len(distances)), key=getValue)\r\n indices.append(thisIndices[0])\r\n return np.array(indices)", "def get_recorded_indices(self, application_vertex, variable):\n if variable not in self.__sampling_rates:\n return []\n if self.__indexes[variable] is None:\n return range(application_vertex.n_atoms)\n return self.__indexes[variable]", "def scatter_counts_to_indices(input: torch.LongTensor) -> torch.LongTensor:\n return torch.repeat_interleave(torch.arange(input.size(0), device=input.device), input).long()", "def get_negative_idx(self, outputs, targets, topk=(1, )):\n max_k = max(topk)\n\n _, pred = outputs.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(targets.view(1, -1).expand_as(pred))\n \n res = []\n for k in topk:\n negative_idx = (correct[:k].view(-1).float() == 0).nonzero()\n res.append(negative_idx)\n return res", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def generate_reverse_index(self):", "def get_shuffle_ids(self, bsz):\n forward_inds = torch.randperm(bsz).long().cuda()\n backward_inds = torch.zeros(bsz).long().cuda()\n value = torch.arange(bsz).long().cuda()\n backward_inds.index_copy_(0, forward_inds, value)\n return forward_inds, backward_inds", "def get_selected_indices(bboxes, gt_boxes, total_pos_bboxes, total_neg_bboxes):\n # Calculate iou values between each bboxes and ground truth boxes\n iou_map = generate_iou_map(bboxes, gt_boxes)\n # Get max index value for each row\n max_indices_each_gt_box = tf.argmax(iou_map, axis=2, output_type=tf.int32)\n # IoU map has iou values for every gt boxes and we merge these values column wise\n merged_iou_map = tf.reduce_max(iou_map, axis=2)\n # Sorted iou values\n sorted_iou_map = tf.argsort(merged_iou_map, direction=\"DESCENDING\")\n # Get highest and lowest candidate indices\n pos_candidate_indices = sorted_iou_map[:, :total_pos_bboxes * 2]\n neg_candidate_indices = sorted_iou_map[:, ::-1][:, :total_neg_bboxes * 2]\n # Shuffling\n pos_candidate_indices_shuffled = tf.transpose(tf.random.shuffle(tf.transpose(pos_candidate_indices)))\n neg_candidate_indices_shuffled = tf.transpose(tf.random.shuffle(tf.transpose(neg_candidate_indices)))\n # Randomly select pos and neg indices from candidates\n pos_bbox_indices = pos_candidate_indices_shuffled[:, :total_pos_bboxes]\n neg_bbox_indices = neg_candidate_indices_shuffled[:, :total_neg_bboxes]\n gt_box_indices = tf.gather(max_indices_each_gt_box, pos_bbox_indices, batch_dims=1)\n #\n return pos_bbox_indices, neg_bbox_indices, gt_box_indices", "def fft_bin_to_index(self, bins):\n idx = bins.copy()\n return idx", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def racetrack_filter(reversals, h):\n\n y = reversals\n yprev = None\n ix = []\n for n, yn in enumerate(y):\n if (n == 0) or (n == y.size-1):\n yprev = yn\n ix.append(n)\n continue\n dy = yn - yprev\n if fabs(dy) > h / 2.:\n yprev = yn - dy/fabs(dy) * h/2.\n ix.append(n)\n ix = np.array(ix, dtype=int)\n return y[ix], ix", "def _little_group_index(self, k: Array) -> Array:\n # calculate k' = p(k) for all p in the point group\n big_star = np.tensordot(self.point_group_.matrices(), k, axes=1)\n big_star = self.lattice.to_reciprocal_lattice(big_star) % self.lattice.extent\n # should test for pbc before taking the modulus, but the only valid wave\n # vector for non-pbc axes is 0 and 0 % anything == 0\n\n # assumes point_group_[0] is the identity\n is_in_little_group = np.all(big_star == big_star[0], axis=1)\n return np.arange(len(self.point_group_))[is_in_little_group]", "def _indicies_of_periods(datetime_index, freq, use_local_time=True):\n\n if use_local_time:\n datetime_index = _tz_to_naive(datetime_index)\n\n periods = pd.period_range(datetime_index[0], datetime_index[-1], freq=freq)\n\n # Declare and initialise some constants and variables used\n # during the loop...\n\n # Find the minimum sample period.\n MIN_SAMPLE_PERIOD = int(get_sample_period(datetime_index))\n MAX_SAMPLES_PER_PERIOD = int(\n secs_per_period_alias(freq) / MIN_SAMPLE_PERIOD)\n MAX_SAMPLES_PER_2_PERIODS = MAX_SAMPLES_PER_PERIOD * 2\n n_rows_processed = 0\n boundaries = {}\n for period in periods:\n # The simplest way to get data for just a single period is to use\n # data_for_day = datetime_index[period.strftime('%Y-%m-%d')]\n # but this takes about 300ms per call on my machine.\n # So we take advantage of several features of the data to achieve\n # a 300x speedup:\n # 1. We use the fact that the data is sorted in order, hence\n # we can chomp through it in order.\n # 2. MAX_SAMPLES_PER_PERIOD sets an upper bound on the number of\n # datapoints per period. The code is conservative and uses\n # MAX_SAMPLES_PER_2_PERIODS. We only search through a small subset\n # of the available data.\n\n end_index = n_rows_processed + MAX_SAMPLES_PER_2_PERIODS\n rows_to_process = datetime_index[n_rows_processed:end_index]\n indicies_for_period = np.where(rows_to_process < period.end_time)[0]\n if indicies_for_period.size > 0:\n first_i_for_period = indicies_for_period[0] + n_rows_processed\n last_i_for_period = indicies_for_period[-1] + n_rows_processed + 1\n boundaries[period] = (first_i_for_period, last_i_for_period)\n n_rows_processed += last_i_for_period - first_i_for_period\n\n return periods, boundaries", "def indices(self):\n return self._kbounded_partitions", "def get_itds_v3(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_itd_indices=False):\n ears = ears.astype(np.bool)\n itds_to_return = np.zeros(timestamps.size, dtype=np.float32)\n itds_to_return.fill(-5. * max_itd)\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n max_num_events = 5\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n num_right_events = timestamps_right.shape[0]\n\n for event_idx, (ts_right, ts_idx_right) in enumerate(zip(timestamps_right, timestamp_indices_right)):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = ts_right - timestamps_left[matched_indices]\n min_itd_idx_local = np.argmin(np.abs(matched_itds))\n min_itd = matched_itds[min_itd_idx_local]\n # absolute index of the itd pair event\n min_itd_ts_left = ts_right - min_itd\n # now check that the itd pair for the itd pair event is the current event\n if event_idx < max_num_events:\n min_itd_ts_right = timestamps_right[0: event_idx + max_num_events + 1]\n alt_min_itd_idx = np.argmin(np.abs(min_itd_ts_left - min_itd_ts_right))\n if alt_min_itd_idx == event_idx:\n itds_to_return[ts_idx_right] = min_itd\n else:\n min_itd_ts_right = timestamps_right[event_idx - max_num_events: event_idx + max_num_events + 1]\n alt_min_itd_idx = np.argmin(np.abs(min_itd_ts_left - min_itd_ts_right))\n if alt_min_itd_idx == max_num_events:\n itds_to_return[ts_idx_right] = min_itd\n if min_itd_ts_right[0] > min_itd_ts_left - max_itd or min_itd_ts_right[-1] < min_itd_ts_left + max_itd:\n print('[WARNING] The max_num_events is not enough, please check.')\n sys.stdout.flush()\n\n itd_indices = np.where(itds_to_return > -4. * max_itd)[0]\n itds_to_return = itds_to_return[itd_indices]\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps[itd_indices], ears=ears[itd_indices], types=types[itd_indices],\n itds=itds_to_return, itd_indices=itd_indices)\n\n if return_itd_indices:\n return itds_to_return, itd_indices\n\n return itds_to_return", "def get_indices(self):\r\n return self._indices", "def mainIndices(self):\n return self.i1, self.i2", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def getNegativeSamples(target, dataset, K):\n\n indices = [None] * K\n for k in range(K):\n newidx = dataset.sampleTokenIdx()\n while newidx == target:\n newidx = dataset.sampleTokenIdx()\n indices[k] = newidx\n return indices", "def unravel_index(ijk, n):\n \n if type(ijk) is int:\n return ijk\n if len(ijk)==1:\n return ijk[0]\n\n assert (np.diff(ijk)>0).all()\n assert all([i<n for i in ijk])\n\n ix = sum([int(binom(n-1-i,len(ijk)-1)) for i in range(ijk[0])])\n for d in range(1, len(ijk)-1):\n if (ijk[d]-ijk[d-1])>1:\n ix += sum([int(binom(n-i-1,len(ijk)-d-1)) for i in range(ijk[d-1]+1, ijk[d])])\n ix += ijk[-1] -ijk[-2] -1\n return ix", "def getNegativeSamples(target, dataset, K):\n\n indices = [None] * K\n for k in xrange(K):\n newidx = dataset.sampleTokenIdx()\n while newidx == target:\n newidx = dataset.sampleTokenIdx()\n indices[k] = newidx\n return indices", "def getNegativeSamples(target, dataset, K):\n\n indices = [None] * K\n for k in xrange(K):\n newidx = dataset.sampleTokenIdx()\n while newidx == target:\n newidx = dataset.sampleTokenIdx()\n indices[k] = newidx\n return indices", "def getNegativeSamples(target, dataset, K):\n\n indices = [None] * K\n for k in xrange(K):\n newidx = dataset.sampleTokenIdx()\n while newidx == target:\n newidx = dataset.sampleTokenIdx()\n indices[k] = newidx\n return indices", "def _extract_t_indices(self, X, X2=None, dL_dK=None):\r\n\r\n # TODO: some fast checking here to see if this needs recomputing?\r\n self._t = X[:, 0]\r\n if not X.shape[1] == 2:\r\n raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices')\r\n self._index = np.asarray(X[:, 1],dtype=np.int)\r\n # Sort indices so that outputs are in blocks for computational\r\n # convenience.\r\n self._order = self._index.argsort()\r\n self._index = self._index[self._order]\r\n self._t = self._t[self._order]\r\n self._rorder = self._order.argsort() # rorder is for reversing the order\r\n \r\n if X2 is None:\r\n self._t2 = None\r\n self._index2 = None\r\n self._order2 = self._order\r\n self._rorder2 = self._rorder\r\n else:\r\n if not X2.shape[1] == 2:\r\n raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices')\r\n self._t2 = X2[:, 0]\r\n self._index2 = np.asarray(X2[:, 1],dtype=np.int)\r\n self._order2 = self._index2.argsort()\r\n self._index2 = self._index2[self._order2]\r\n self._t2 = self._t2[self._order2]\r\n self._rorder2 = self._order2.argsort() # rorder2 is for reversing order\r\n\r\n if dL_dK is not None:\r\n self._dL_dK = dL_dK[self._order, :]\r\n self._dL_dK = self._dL_dK[:, self._order2]", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def test_total_totals_index():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n\n tt = total_totals_index(pressure, temperature, dewpoint)\n assert_almost_equal(tt, 45.10 * units.delta_degC, 2)", "def generate_index(self):\n begin_o, end_o, begin_a, end_a = 0, 0, 0, 0\n for obs_space, act_space in zip(self.env.observation_space, self.env.action_space):\n end_o = end_o + obs_space.shape[0]\n if isinstance(act_space, Box):\n end_a = act_space.shape[0]\n else:\n end_a = act_space.n\n range_o = (begin_o, end_o)\n range_a = (begin_a, end_a)\n self.observation_index.append(range_o)\n self.action_index.append(range_a)\n begin_o = end_o\n begin_a = end_a", "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def test_gleckler_index(self):\n\n # generate sample data\n # sample data\n tmp = np.zeros((5, 3, 1))\n tmp[:,0,0] = np.ones(5)*1.\n tmp[:,1,0] = np.ones(5)*2.\n tmp[:,2,0] = np.ones(5)*5.\n\n # The data is like ...\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n\n x = self.D.copy()\n x._temporal_subsetting(0, 4)\n\n x.data = np.ma.array(tmp, mask=tmp!=tmp)\n x.std = np.ones(x.data.shape)\n x.time[0] = pl.datestr2num('2000-02-15')\n x.time[1] = pl.datestr2num('2000-03-15')\n x.time[2] = pl.datestr2num('2000-04-15')\n x.time[3] = pl.datestr2num('2000-05-15')\n x.time[4] = pl.datestr2num('2000-06-15')\n\n y = self.D.copy()\n y._temporal_subsetting(0, 4)\n tmp = np.ones(x.data.shape) # sample data 2\n y.data = np.ma.array(tmp, mask=tmp!=tmp)\n y.time[0] = pl.datestr2num('2000-02-15')\n y.time[1] = pl.datestr2num('2000-03-15')\n y.time[2] = pl.datestr2num('2000-04-15')\n y.time[3] = pl.datestr2num('2000-05-15')\n y.time[4] = pl.datestr2num('2000-06-15')\n\n # Case 1: same area weights\n # cell area\n tmp = np.ones((3, 1))\n x.cell_area = tmp*1.\n\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #===================\n #| 0 | 5 | 5*4**2=5*16. = 80 |\n #==> E2 = sqrt(85./(15.))\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b')\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n\n\n # Case 2: Different area weights\n # cell area\n tmp = np.ones((3, 1))\n tmp[1, 0] = 2.\n x.cell_area = tmp*1.\n\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #--------------------------\n # w = 0.25 w = 0.5 w=0.25|\n #--------------------------\n\n # 0.25*0 + 0.5 * 1 + 0.25 * 16 = 0 + 0.5 + 4 = 4.5\n # the mean of that is 4.5 for each timestep\n # mean because the overall weights are calculated as such that\n # they give a total weight if 1\n\n # diagnostic\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n # Case 3: use different std\n x.std = np.ones(x.data.shape)\n x.std[:, 2, 0] = 0.5\n\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #--------------------------------\n # w = 0.25 w = 0.5 w=0.25|\n # 0 + 0.5 + 0.25*32 = 0.5 + 8 = 8.5\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error", "def ztost_ind(self, low, upp, usevar=\"pooled\"):\n tt1 = self.ztest_ind(alternative=\"larger\", usevar=usevar, value=low)\n tt2 = self.ztest_ind(alternative=\"smaller\", usevar=usevar, value=upp)\n # TODO: remove tuple return, use same as for function tost_ind\n return np.maximum(tt1[1], tt2[1]), tt1, tt2", "def get_gt_hom_idxs(alt_num):\n last = -1\n hom_idxs = []\n for a in range(alt_num + 1):\n last = last + (a + 1)\n hom_idxs.append(last)\n return hom_idxs", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def tril_indices(n,k=0):\r\n return mask_indices(n,tril,k)", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def create_repeated_indexes(data):\n from numpy import arange\n\n index_range = arange(0, len(data))\n return (index_range for i in index_range)", "def morton_idx(pts):\n lib = _initlib()\n p = require(pts, dtype=float64, requirements=['C']) \n inv_cell_width = 1.0/8192\n npts = len(p)\n out = empty(npts, dtype=int64)\n lib.get_morton_idx(p, npts, inv_cell_width, out)\n return out" ]
[ "0.6033882", "0.5888383", "0.5833695", "0.576953", "0.5758707", "0.56996095", "0.5611121", "0.5609971", "0.5606583", "0.5600059", "0.5593295", "0.5524574", "0.54750305", "0.54693586", "0.5458139", "0.54525465", "0.54286104", "0.54272556", "0.5421725", "0.5421557", "0.5390778", "0.53880054", "0.5387267", "0.5364159", "0.5364121", "0.5360811", "0.5356435", "0.53508663", "0.53504926", "0.53449017", "0.5335162", "0.5334382", "0.5331971", "0.53210676", "0.531988", "0.53169304", "0.53152215", "0.5313708", "0.53036076", "0.52851015", "0.52823424", "0.5278069", "0.52493805", "0.52385896", "0.52318263", "0.5230262", "0.52279824", "0.52243346", "0.5224061", "0.52186716", "0.5212888", "0.5212244", "0.52119946", "0.5198007", "0.5196583", "0.51939195", "0.5187022", "0.51819056", "0.51818573", "0.5179425", "0.51792705", "0.51747537", "0.5167434", "0.5162587", "0.51515996", "0.514822", "0.51461446", "0.514448", "0.51372015", "0.5131495", "0.5129077", "0.5127173", "0.5126614", "0.51253104", "0.5121963", "0.511779", "0.51163375", "0.51160437", "0.51147854", "0.51098084", "0.51089597", "0.5105133", "0.5102285", "0.5101745", "0.5101745", "0.5101745", "0.51004386", "0.5099206", "0.5098707", "0.5097059", "0.50933266", "0.50917184", "0.50886965", "0.50866956", "0.5086004", "0.50809586", "0.50809586", "0.50736016", "0.50732815", "0.5059736", "0.505661" ]
0.0
-1
Standard k fold cross validation.
def k_fold_cross_validation(random_state: np.random.RandomState, num_splits: int, indices: np.ndarray, **kwargs: Any ) -> List[Tuple[np.ndarray, np.ndarray]]: shuffle = kwargs.get('shuffle', True) cv = KFold(n_splits=num_splits, random_state=random_state if shuffle else None, shuffle=shuffle) splits = list(cv.split(indices)) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k_folds_cross_validate(self, k):\n start_time = time.time()\n partition_size = int(self.num_samples / k)\n partitions = [\n (i * partition_size, (i + 1) * partition_size) for i in range(k)\n ]\n average_accuracy = 0.0\n for start, end in partitions:\n validation_input_set = self.input[start:end, :] # subset of input of size k (k samples)\n validation_output_set = self.output[start:end] # subset of output of size k (k outputs)\n training_input_set = np.delete(self.input, np.s_[start:end], 0) # subset of input excluding validation set\n training_output_set = np.delete(self.output, np.s_[start:end], 0) # subset of output excluding validation\n\n self.fit(training_input_set, training_output_set)\n accuracy = self.evaluate_acc(validation_input_set, validation_output_set)\n # print('Accuracy: ', accuracy) # accuracy of each fold\n average_accuracy += accuracy\n average_accuracy /= k\n print('Average accuracy: ', average_accuracy)\n print('Runtime: ', time.time() - start_time, 'seconds')", "def k_fold_cross_validation(X, K):\n\tfor k in xrange(K):\n\t\ttraining = [i for i, x in enumerate(X) if i % K != k]\n\t\tvalidation = [i for i, x in enumerate(X) if i % K == k]\n\t\tyield training, validation", "def k_fold_cross_validation(X, K, randomise = False):\n\tif randomise: from random import shuffle; X=list(X); shuffle(X)\n\tfor k in xrange(K):\n\t\ttraining = [x for i, x in enumerate(X) if i % K != k]\n\t\tvalidation = [x for i, x in enumerate(X) if i % K == k]\n\t\tyield training, validation", "def cross_validation(self, k=5):\n test_errors, train_errors = [], []\n\n # For leave out cross validation k = n\n if k == -1:\n k = len(data)\n\n for _ in range(k):\n shuffled_data = data.copy()\n\n # Do not shuffle data for leave one out cross validation\n if k != -1:\n # Copy the original data and shuffle it\n np.random.shuffle(shuffled_data)\n\n # Divide it into k folds\n split = int(((k - 1) / k) * len(data))\n train_data = shuffled_data[:split]\n test_data = shuffled_data[split:]\n\n # Find train_X, train_Y, test_X, test_Y\n # KNN requires access to training data to determine error\n self.train_X = np.asarray(train_data[:, :6])\n self.train_Y = np.asarray(train_data[:, 7])\n\n test_X = np.asarray(test_data[:, :6])\n test_Y = np.asarray(test_data[:, 7])\n\n # Use (k-1) part for training and 1 part for testing\n self.fit(self.train_X, self.train_Y)\n test_error = self.compute_error(test_X, test_Y)\n train_error = self.compute_error(self.train_X, self.train_Y)\n test_errors.append(test_error)\n train_errors.append(train_error)\n\n # Average the error\n avg_train_error = np.round(np.average(np.asarray(train_errors), axis=0), 3)\n avg_test_error = np.round(np.average(np.asarray(test_errors), axis=0), 3)\n print(\"The average error of {} is - Train : {}\\tTest : {} Overfit : {}\".format(self.method,\n avg_train_error, avg_test_error,\n avg_test_error > avg_train_error))\n return avg_test_error, avg_train_error", "def cross_validation(self):\r\n kfold = KFold(10, shuffle=True, random_state=1)\r\n data = self.read_data()\r\n # error from each kth iteration\r\n errors = []\r\n for train, test in kfold.split(data):\r\n\r\n #Splitting into test and training data\r\n X_test, Y_test = data[test][:, 1], data[test][:, 2]\r\n X_train, Y_train = data[train][:, 1], data[train][:, 2]\r\n\r\n #Training on the split data\r\n weights, design_matrix = self.train(X_train, Y_train)\r\n\r\n y_pred = self.make_prediction(X_test, weights)\r\n self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)\r\n\r\n #error matrix\r\n errors.append(np.mean(y_pred - Y_test) ** 2)\r\n\r\n #cross-validation parameter taken as mean of errors obtained from each iteration\r\n print(\"%0.10f mean with a standard deviation of %0.10f across the k-folds\" % (np.mean(errors), np.std(errors)))", "def k_cross_fold_validation( data, labels, k=5):\n rf = RandomForestClassifier(n_estimators=25)\n predictions, indexes = [], []\n size = data.shape[0]\n step_size = int(size / 5)\n for i in range(k):\n indexes.append(range(i*k, min(i*k + k, size)))\n xtrain = np.array(data)\n ytrain = np.array(labels)\n\n # picks test slices\n xtest = xtrain[i*k:i*k + k]\n ytest = ytrain[i*k:i*k + k]\n\n # removes test slices from the training sets\n xtrain = np.delete(xtrain, np.s_[i*k:i*k + k], axis=0)\n ytrain = np.delete(ytrain, np.s_[i*k:i*k + k], axis=0)\n\n \n rf = rf.fit(xtrain, ytrain)\n prediction = rf.predict(xtest)\n\n predictions.append(prediction.tolist())\n \n return np.array(predictions), np.array(indexes)", "def cross_validation(whole_train_data, whole_train_labels, k, k_fold):\n accuracies = []\n for i in range(k_fold):\n train_data, train_labels, validation_data, validation_labels = split_train_and_validation(whole_train_data, whole_train_labels, i, k_fold)\n accuracy = knn(train_data, train_labels, validation_data, validation_labels, k)\n accuracies.append(accuracy)\n avg_accuracy = np.mean(accuracies)\n return avg_accuracy", "def cross_validate(cv, x, y, k=1):\n indices = np.arange(len(x))\n np.random.shuffle(indices)\n stepsize = int(len(x) / cv)\n metrics = np.zeros(4)\n for i in range(cv):\n logging.info(f\"Cross-validation fold {i+1}\")\n\n # Slice test set out of data\n test_indices = indices[i*stepsize:i*stepsize+stepsize]\n x_test = x[test_indices]\n y_test = y[test_indices]\n\n # Everything else is the training set\n x_train = np.copy(x)\n x_train = np.delete(x_train, test_indices, axis=0)\n y_train = np.copy(y)\n y_train = np.delete(y_train, test_indices, axis=0)\n\n metrics += evaluate(knn(x_test, x_train, y_train, k), y_test)\n metrics /= cv\n\n print(metrics)\n return metrics", "def cross_validation(self, k_folds, **inputs):\n self.cv_stats, self.cv_traces = [], []\n self.k_folds = k_folds\n inputs = self._clean_inputs(inputs)\n for i, fold in enumerate(k_folds):\n train, test = fold\n input_train, input_test = {}, {}\n for name, data in inputs.items():\n if name in self.cv_vars:\n input_train[name] = data[train]\n input_test[name] = data[test]\n else:\n input_train[name] = data\n input_test[name] = data\n trace = self.run(**input_train)\n stats = self.calculate_statistics(trace, **input_test)\n self.cv_traces.append(trace)\n self.cv_stats.append(stats)\n return self.cv_traces, self.cv_stats", "def kfold_cross_validation(X, n_splits=5):\n\n #Define variables\n X_train_folds = []\n X_test_folds = []\n indexes = list(range(len(X)))\n index = 0\n\n #Create folds\n for i in range(n_splits):\n test = []\n train = []\n #Determine how many to put in test\n if((len(X) % n_splits) > i):\n numTest = len(X) // n_splits +1\n else:\n numTest = len(X) // n_splits\n for j in range(numTest):\n if(index < len(X)):\n test.append(index)\n indexes.pop(indexes.index(index))\n index = index + 1\n for index1 in indexes:\n train.append(index1)\n X_test_folds.append(test)\n X_train_folds.append(train)\n indexes = list(range(len(X)))\n\n return X_train_folds, X_test_folds", "def kFoldCrossVal(self, k):\n # shuffle\n df = self.df.sample(frac=1)\n # chunk into k folds\n #fold_idxs = [range(i, self.len_, k) for i in xrange(k)] TODO: pointers ?\n df_arr = [df[i::k] for i in xrange(k)]\n\n for i, validate in enumerate(df_arr):\n train_list = df_arr[:i] + df_arr[i+1:]\n train = pd.concat(train_list, axis=0)\n assert len(train) + len(validate) == len(df)\n yield (train, validate)", "def fold(self, k):\n if not (0 < k < 5):\n raise ValueError('almazan cross-evaluation proposes a k=4 k-cross evaluation, {} out of range'.format(k))\n test_idcs = [idx for idx, fold in enumerate(self.fold_idcs) if fold == k]\n train_idcs = list(set(range(len(self))).difference(set(test_idcs)))\n return self.sub_set(train_idcs), self.sub_set(test_idcs)", "def kfold_cross_validation(X, n_splits=5):\r\n X_train_folds = []\r\n X_test_folds = []\r\n\r\n x_len = len(X)\r\n \r\n fold_modulus = x_len % n_splits\r\n \r\n start_idx = 0\r\n for fold in range(n_splits): \r\n\r\n if fold < fold_modulus:\r\n fold_size = x_len // n_splits + 1\r\n else:\r\n fold_size = x_len // n_splits\r\n\r\n fold_end = (start_idx + fold_size) - 1\r\n\r\n tmp = []\r\n for i in range(start_idx, fold_end + 1):\r\n tmp.append(i)\r\n X_test_folds.append(tmp)\r\n\r\n tmp = []\r\n for i in range(0, x_len):\r\n if i not in X_test_folds[fold]:\r\n tmp.append(i)\r\n X_train_folds.append(tmp)\r\n\r\n start_idx = fold_size + start_idx \r\n\r\n return X_train_folds, X_test_folds", "def crossValidate(x_training_data, y_training_data, test_size_percentage, k_values):\n data_length = len(x_training_data)\n foldSize = int(round(data_length * test_size_percentage)) # size of each temporary test data\n groups = int(data_length/foldSize + 1) # # of groups\n\n best_score = 0\n best_k = 0\n\n for k in k_values: # Test different values of k\n score = 0\n for i in range(0, data_length, foldSize): # Switch section of test data\n \n x_temp_test = x_training_data[i:i+foldSize] # get temporary data to test\n known_y_test = y_training_data[i:i+foldSize] # we already know their labels\n x_temp_training = np.append(x_training_data[0:i], x_training_data[i+foldSize:], axis=0) # the rest is our temporary training data\n y_temp_training = np.append(y_training_data[0:i], y_training_data[i+foldSize:], axis=0)\n\n y_temp_test = knnForAll(x_temp_training, y_temp_training, x_temp_test, k) # labels determined for a current k value\n\n for i in range(len(known_y_test)): # Score how well this value of k did based \n if y_temp_test[i] == known_y_test[i]: # on how well it matches the known labels\n score += 1\n\n print(\"k:\",k,\"-- % correct: \",\"{:0.2f}\".format(score/data_length)) # print accuracy for training data \n if score > best_score: # Choose the best k value up to this point\n best_score = score\n best_k = k\n\n return best_k", "def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def cross_validation(features, target, n_neighbors=5, n_folds=5):\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n cv_scores = cross_val_score(clf, features, target, cv=n_folds)\n # print each cv score (accuracy) and average them\n print('Cross Validation Scores Mean: %.2f' % (np.mean(cv_scores) * 100))", "def nestedKFoldValidation(k, X, y, kernel):\n\n print(\"running k-folds cross validation to find best slack C\")\n\n # list of slack variable values used to test the SVM with\n C_list = [0.001, 0.01, 0.1, 1.0, 10.0, 100, 1000]\n # number of iterations to run the bootstrapping\n B = 30\n\n sample_list = list(range(len(y)))\n np.random.shuffle(sample_list)\n\n # splitting list into k folds\n foldSize = len(y)//k\n folds = [sample_list[i*foldSize:(i+1)*foldSize] for i in range(k)]\n\n # add remaining items to the last fold\n folds[-1] += sample_list[foldSize*k:]\n\n test_results = {}\n for test in folds:\n print('==== next outer fold ====')\n # python dictionary to keep track of errors\n error_dict = {}\n for validation in folds:\n # do not want identical test and validation folds\n if validation == test:\n continue\n\n # training data is all folds except for test and validation\n train = [f for f in folds if f != test and f != validation][0]\n # find errors across all parameters\n for c in C_list:\n alg = SVC(C=c, kernel=kernel)\n alg.fit(X[train], y[train])\n err = np.mean(y[validation] != alg.predict(X[validation]))\n # keep track of all errors\n if c not in error_dict:\n error_dict[c] = {}\n error_dict[c]['err'] = []\n error_dict[c]['model'] = alg\n error_dict[c]['err'].append(err)\n\n # finding the best parameter from the inner loop\n bestC = -1\n bestError = 1.0\n for c in error_dict:\n print('c=', c, 'err=', np.mean(error_dict[c]['err']))\n err = np.mean(error_dict[c]['err'])\n if err < bestError:\n bestError = err\n bestC = c\n print('best c from inner loop is', bestC)\n\n # testing the best model from the inner loop against test data\n train_err = np.mean(y[test] != alg.predict(X[test]))\n if bestC not in test_results:\n test_results[bestC] = []\n test_results[bestC].append(train_err)\n\n for c in test_results:\n print('c=', c,\n 'mean error=', np.mean(test_results[c]),\n 'stddev of errors=', np.std(test_results[c]))", "def kFoldCrossValidation(self, n_splits ):\n X = self.X\n y = self.y\n\n k_fold = KFold(n_splits)\n model = self.model\n\n for train, test in k_fold.split(X):\n model.fit(X[train], y[train])\n p = model.predict( X[test] )\n # Add line for scores\n\n return model #return scores here?", "def k_fold_cross_validation(data, K, randomise = False, random_seed=None, structured=None):\n if randomise:\n import random\n if random_seed != None:\n random.seed(random_seed)\n random.shuffle(data.data)\n datatr=data.make_clone()\n datatst=data.make_clone()\n\n for k in xrange(K):\n datatr.data = []\n index_tr = []\n datatst.data = []\n index_tst = []\n for i, x in enumerate(data.data) :\n if i % K != k :\n datatr.data.append(x)\n index_tr.append(i)\n else :\n datatst.data.append(x)\n index_tst.append(i)\n yield datatr, datatst, index_tr, index_tst", "def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds", "def cross_validation(y, x, K, model, lambda_, gamma, max_iters, initial_w, num_epochs=1, shuffle=True, seed=1):\n data_size = len(y) ## Number of data points\n x = np.array(x)\n y = np.array(y)\n\n count = 0\n batch_size = int(data_size/K) ## Data size assumed to multiple of K\n err_val = np.zeros(num_epochs*K)\n err_train = np.zeros(num_epochs*K)\n\n # Set random seed such that model comparison is uniform and consistent\n np.random.seed(seed)\n\n # Number of times K-fold cross validation is repeated\n for epoch in range(num_epochs):\n print(\"epoch = {}\".format(epoch))\n # Randomize to remove ordering in the input data\n if shuffle == True:\n shuffle_ind = np.random.permutation(np.arange(data_size))\n y_shuffle = y[shuffle_ind]\n x_shuffle = x[shuffle_ind]\n else:\n y_shuffle = y\n x_shuffle = x\n\n # K-fold cross validation\n for k in range(0,K):\n print(\"k = {}\".format(k))\n\n # Select validation data in kth fold\n start_val_ind = k*batch_size\n end_val_ind = (k+1)*batch_size\n y_val = y_shuffle[start_val_ind: end_val_ind]\n x_val = x_shuffle[start_val_ind: end_val_ind]\n\n # Select training data in kth fold\n train_ind = np.setxor1d(range(0,data_size),range(start_val_ind,end_val_ind))\n y_train = y_shuffle[train_ind]\n x_train = x_shuffle[train_ind]\n\n # Logistic regression and reg_logistic_regression models\n if ((model=='logistic_regression') or (model=='reg_logistic_regression')):\n if model=='logistic_regression': ## Train the model\n w, loss = logistic_regression((1+y_train)/2, x_train, initial_w, max_iters, gamma)\n elif model=='reg_logistic_regression':\n w, loss = reg_logistic_regression((1+y_train)/2, x_train, lambda_, initial_w, max_iters, gamma)\n # Predict on validation and training data\n y_pred_val = np.ones(len(y_val))\n y_pred_val[sigmoid(np.dot(x_val,w)) <= 0.5] = -1\n y_pred_train = np.ones(len(y_train))\n y_pred_train[sigmoid(np.dot(x_train,w)) <= 0.5] = -1\n\n\n # Least squares models\n else:\n if model == 'least_squares': ## Least squares regression using normal equation\n w, loss = least_squares(y_train, x_train)\n elif model == 'least_squares_GD': ## Least squares regression using gradient descent\n w, loss = least_squares_GD(y_train, x_train, initial_w, max_iters, gamma)\n elif model == 'least_squares_SGD': ## Least squares regression using stochastic gradient descent\n w, loss = least_squares_SGD(y_train, x_train, initial_w, max_iters, gamma)\n elif model == 'ridge_regression': ## Ridge regression\n w, loss = ridge_regression(y_train, x_train, lambda_)\n else:\n print(\"Unknown model\")\n # Predict on validation and training data\n y_pred_val = predict_labels(w, x_val) ## Predict on validation data\n y_pred_train = predict_labels(w, x_train) ## Predict on training data\n\n err_val[count] = sum(y_pred_val!=y_val)/len(y_val) ## Accuaracy on Validation data\n err_train[count] = sum(y_pred_train!=y_train)/len(y_train) ## Accuaracy on training data\n count+=1\n\n ## Average error over all folds of cross validation\n avg_val_err = np.mean(err_val)\n avg_train_err = np.mean(err_train)\n\n ## Return optimal weight, loss, average validation error, average training error\n return w, loss, avg_val_err, avg_train_err", "def crossValidate(k, epochs, hyperparams, data, trainFunc, testFunc, report=None):\n \n if not (report == None):\n tabs = '\\t' * report;\n print tabs, 'Performing %d-fold cross validation...' % k;\n \n # create vars to save the best hyperparameters and their performance\n bestTheta = None;\n bestRate = float(\"-inf\");\n \n # create queue for worker threads to post results to\n queue = mp.Queue();\n \n # create train/test folds\n numPerFold = len(data) // k;\n numLeftOver = len(data) % k;\n folds = [data[i*numPerFold:i*numPerFold+numPerFold] for i in range(0, k)];\n if numLeftOver > 0:\n folds[-1].extend(data[-numLeftOver:]); \n \n # create a list of tuples; each tuple defining a unique assignment of hyperparameters \n thetas = list(itertools.product(*hyperparams));\n \n # create worker threads try all combinations of hyperparameters \n workers = []; \n for theta in thetas: \n p = mp.Process(target=cvWorker, args=(epochs, theta, folds, trainFunc, \\\n testFunc, report, queue));\n workers.append(p)\n \n # start worker threads and wait for them to finish\n for p in workers:\n p.start();\n for p in workers:\n p.join()\n \n if not (report == None):\n print tabs, 'All worker threads have terminated.';\n \n # read results out of queue \n while not queue.empty():\n [theta, rate] = queue.get();\n if rate > bestRate:\n bestTheta = theta\n bestRate = rate;\n \n return bestTheta;", "def executeKFoldValidation(train_data, train_labels, val_data, val_labels, test_data, test_labels,\n images_47, labels_47, images_84, labels_84, all_unseen_images, all_unseen_labels):\n if run_k_fold_validation:\n print(\"In executingKFoldValidation\")\n\n # this is doing it manually:\n kfold = StratifiedKFold(n_splits=k_fold_num, shuffle=True)\n\n test_scores_list = []\n unseen_47_scores_list = []\n unseen_84_scores_list = []\n all_unseen_scores_list = []\n\n test_matrix_list = []\n matrix_47_list = []\n matrix_84_list = []\n all_matrix_list = []\n kf_counter = 0\n\n for train, test in kfold.split(train_data, train_labels):\n kf_counter += 1\n print('KFold #:', kf_counter)\n\n model = buildClassifier()\n # fit the model\n model.fit(train_data[train],\n train_labels[train],\n epochs=epochs,\n validation_data=(val_data, val_labels),\n batch_size=batch_size\n )\n\n unseen_47_scores = model.evaluate(images_47, labels_47, batch_size=batch_size)\n unseen_47_scores_list.append(unseen_47_scores[1] * 100)\n unseen_84_scores = model.evaluate(images_84, labels_84, batch_size=batch_size)\n unseen_84_scores_list.append(unseen_84_scores[1] * 100)\n test_scores = model.evaluate(test_data, test_labels, batch_size=batch_size)\n test_scores_list.append(test_scores[1] * 100)\n all_unseen_score = model.evaluate(all_unseen_images, all_unseen_labels, batch_size=batch_size)\n all_unseen_scores_list.append(all_unseen_score[1] * 100)\n\n # show confusion matrix\n test_confusion_matrix, confusion_matrix_47, confusion_matrix_84, all_confusion_matrix = \\\n gettingKFoldConfusionMatrix(test_data, test_labels, images_47, labels_47, images_84, labels_84,\n all_unseen_images,\n all_unseen_labels, kf_counter)\n test_matrix_list.append(test_confusion_matrix)\n matrix_47_list.append(confusion_matrix_47)\n matrix_84_list.append(confusion_matrix_84)\n all_matrix_list.append(all_confusion_matrix)\n\n test_scores_mean = np.mean(test_scores_list)\n test_scores_std = np.std(test_scores_list)\n unseen_47_mean = np.mean(unseen_47_scores_list)\n unseen_47_std = np.std(unseen_47_scores_list)\n unseen_84_mean = np.mean(unseen_84_scores_list)\n unseen_84_std = np.std(unseen_84_scores_list)\n all_unseen_mean = np.mean(all_unseen_scores_list)\n all_unseen_std = np.std(all_unseen_scores_list)\n\n print(\"Test Scores: \" + str(test_scores_list))\n print(\"Test Scores Mean: \" + str(test_scores_mean))\n print(\"Test Scores Std: \" + str(test_scores_std))\n print(\"Unseen 47 Scores: \" + str(unseen_47_scores_list))\n print(\"Unseen 47 Scores Mean: \" + str(unseen_47_mean))\n print(\"Unseen 47 Scores Std: \" + str(unseen_47_std))\n print(\"Unseen 84 Scores: \" + str(unseen_84_scores_list))\n print(\"Unseen 84 Scores Mean: \" + str(unseen_84_mean))\n print(\"Unseen 84 Scores Std: \" + str(unseen_84_std))\n print(\"All Unseen Scores: \" + str(all_unseen_scores_list))\n print(\"All Unseen Scores Mean: \" + str(all_unseen_mean))\n print(\"All Unseen Scores Std: \" + str(all_unseen_std))\n print(\"Test Confusion Matrices: \" + str(test_matrix_list))\n print(\"47 Confusion Matrices: \" + str(matrix_47_list))\n print(\"84 Confusion Matrices: \" + str(matrix_84_list))\n print(\"All Confusion Matrices: \" + str(all_matrix_list))\n\n excel_headers.append(\"Test Scores Mean\")\n excel_dictionary.append(test_scores_mean)\n excel_headers.append(\"Test Scores Std\")\n excel_dictionary.append(test_scores_std)\n excel_headers.append(\"Unseen 47 Scores Mean\")\n excel_dictionary.append(unseen_47_mean)\n excel_headers.append(\"Unseen 47 Scores Std\")\n excel_dictionary.append(unseen_47_std)\n excel_headers.append(\"Unseen 84 Scores Mean\")\n excel_dictionary.append(unseen_84_mean)\n excel_headers.append(\"Unseen 84 Scores Std\")\n excel_dictionary.append(unseen_84_std)\n excel_headers.append(\"All Unseen Scores Mean\")\n excel_dictionary.append(all_unseen_mean)\n excel_headers.append(\"All Unseen Scores Std\")\n excel_dictionary.append(all_unseen_std)\n\n plt.plot(test_scores_list, color='red', label='Testing Scores')\n plt.plot(unseen_47_scores_list, color='blue', label='Unseen 47 Scores')\n plt.plot(unseen_84_scores_list, color='black', label='Unseen 84 Scores')\n plt.plot(all_unseen_scores_list, color='green', label='Unseen Scores')\n plt.xlabel('Folds')\n plt.ylabel('Accuracy')\n plt.legend()\n\n plt.show()", "def cross_validation(y, tx, k_fold, fit_function, score_function, seed=1, **fit_function_kwargs):\n k_indices = build_k_indices(y, k_fold, seed)\n score_te = 0\n\n for k in range(k_fold):\n te_indices = k_indices[k]\n tr_indices = k_indices[~(np.arange(k_indices.shape[0]) == k)].reshape(-1)\n\n y_te, x_te = y[te_indices], tx[te_indices]\n y_tr, x_tr = y[tr_indices], tx[tr_indices]\n\n w, fit_loss = fit_function(y_tr, x_tr, **fit_function_kwargs)\n score_te += score_function(y_te, x_te, w)\n\n return score_te/k_fold", "def cross_validation(ww_data, rw_data, k):\n # shuffle the data\n np.random.shuffle(ww_data)\n np.random.shuffle(rw_data)\n\n # calculate cutoff for each partition\n cutoff = ww_data.shape[0]//k\n redArr = []\n whiteArr = []\n tmpStart = 0\n tmpEnd = cutoff\n \n # create a list of k partitions for red and white data\n for x in range(k):\n if x != k-1:\n redArr.append(rw_data[tmpStart:tmpEnd])\n whiteArr.append(ww_data[tmpStart:tmpEnd])\n else:\n redArr.append(rw_data[tmpStart:])\n whiteArr.append(ww_data[tmpStart:])\n tmpStart+=cutoff\n tmpEnd+=cutoff\n\n\n redTraining = np.array(())\n whiteTraining = np.array(())\n accuracy = 0\n count = 0\n \n for x in range(k):\n # creates Test data set\n tmpRedTest = redArr[x]\n tmpWhiteTest = whiteArr[x]\n \n # creates list of partitons for training data set\n if x!=k-1:\n redTrainingList = redArr[:x] + redArr[x+1:]\n whiteTrainingList = whiteArr[:x] + whiteArr[x+1:]\n else:\n redTrainingList = redArr[:x]\n whiteTrainingList = whiteArr[:x]\n\n # stacks each training list into one nparray\n redTraining = np.vstack(redTrainingList)\n whiteTraining = np.vstack(whiteTrainingList)\n\n accuracy += experiment(whiteTraining, redTraining, tmpWhiteTest, tmpRedTest)\n count += 1\n # calculates accuracy and returns it\n result = accuracy/count\n return result", "def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results", "def cross_validation(self, X, y, n_folds=5, shuffle=True, evaluation_metric='top30'):\n # WE DON'T USE THIS\n # We use basic train-test split to evaluate or models as a first approach\n # We will then use CV for searching the best parameters via random search\n pass", "def cross_validate(model, X, y, folds=5, epochs=5, batch_size=32, callbacks=None, shuffle=False, random_state=None):\n\n # Initalize KFold\n kfolds = KFold(n_splits=folds, random_state=random_state, shuffle=shuffle)\n all_metrics = []\n\n # To build the model\n if type(model).__name__ == 'SVDpp':\n model.implicit_feedback(X[:10, :])\n model(X[:10, :])\n\n # Workaround to reset weights after each fold fit\n weights = model.get_weights()\n i = 1\n\n for train, val in kfolds.split(X, y):\n\n # Gather implicit feedback if model is SVD++\n if type(model).__name__ == 'SVDpp':\n model.implicit_feedback(X[train])\n\n print(f'\\nFitting on Fold {i}')\n # Train and evaluate metrics\n history = model.fit(\n X[train], y[train], batch_size=batch_size, epochs=epochs, callbacks=callbacks)\n print(f'\\nEvaluating on Fold {i}')\n fold_score = history.model.evaluate(X[val], y[val])\n all_metrics.append(fold_score)\n\n # Reset Weights\n model.set_weights(weights)\n\n i += 1\n\n all_metrics = np.array(all_metrics)\n\n for i, metric in enumerate(model.metrics_names):\n print(f'Mean {metric.capitalize()} : {np.mean(all_metrics.T[i])}')\n\n return all_metrics", "def _kfold_cross_val(self, training_elms: np.ndarray) -> None:\n kf = model_selection.KFold(\n n_splits=config.folds, shuffle=True, random_state=config.seed\n )\n self.df[\"elm_events\"] = training_elms\n self.df[\"fold\"] = -1\n for f_, (_, valid_idx) in enumerate(kf.split(X=training_elms)):\n self.df.loc[valid_idx, \"fold\"] = f_", "def cross_validation(self, x, t, k=5):\n print(\"Cross validation of the SVM Model...\")\n\n # Initialize best error / hyperparameters\n best_error = float('inf')\n best_reg = 0\n best_deg = 0\n\n # Cross-validation 80-20\n N = len(x)\n N_train = math.floor(0.8 * N)\n t = t.reshape((N,))\n\n #Initialize the grid search\n\n log_min_reg = np.log(0.001)\n log_max_reg = np.log(1000)\n reg_list = np.logspace(log_min_reg, log_max_reg, num=7, base=math.e)\n\n min_deg = 1\n max_deg = 4\n\n for deg in range(min_deg, max_deg):\n for reg in reg_list:\n errors = np.zeros(k)\n for j in range(k):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_x, random_t = zip(*map_index)\n\n train_x = random_x[:N_train]\n valid_x = random_x[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = SVC(gamma='auto', kernel='poly', degree=deg, C=reg, cache_size=1000)\n self.train(train_x, train_t)\n\n error_valid = np.array([self.error(x_n, t_n) for t_n, x_n in zip(valid_t, valid_x)])\n errors[j] = error_valid.mean()\n\n mean_error = np.mean(errors)\n print(mean_error)\n if mean_error < best_error:\n best_error = mean_error\n best_reg = reg\n best_deg = deg\n print(\"The new best hyper parameters are : \", best_reg, best_deg)\n\n print(\"Best hyper parameters are : \", best_reg, best_deg)\n print(\"Validation error : \", 100 * best_error, \"%\")\n self.model = SVC(gamma='auto', kernel='poly', degree=best_deg, C=best_reg)\n self.train(x, t)", "def _cross_val(self, X, y, classifier, nfolds=10):\n\n # Make sure dimensions agree\n assert X.shape[0] == y.shape[0], \"Number of observations should equal\" \\\n \"number of labels.\"\n\n # Concatenate data in order to shuffle without changing X-y correspondence\n data = np.c_[X, y]\n\n # Shuffle data (swaps rows when 2D - works OK for us)\n np.random.seed(42)\n np.random.shuffle(data)\n\n # Split data into (almost) equal folds (returns a list of arrays)\n # and we cast the list into a numpy array in order to do list indexing\n data = np.array(np.array_split(data, nfolds))\n\n # Do the k-fold cross-validation\n accs = []\n for k in range(nfolds):\n # Get current test set\n X_k_test = data[k][:, :-1]\n y_k_test = data[k][:, -1]\n\n # Get remaining indices and current training set\n remaining_idx = [i for i, v in enumerate(data) if i != k]\n X_k_train = np.vstack(data[remaining_idx])[:, :-1]\n y_k_train = np.vstack(data[remaining_idx])[:, -1]\n\n # Fit and predict with classifier\n classifier.fit(X_k_train, y_k_train)\n yhat = classifier.predict(X_k_test)\n\n # Store error rate\n accs.append(self._accuracy(y_k_test, yhat))\n\n return np.array(accs)", "def __implement_cross_validation(self, X, y, model):\n\n skfolds = StratifiedKFold(n_splits=3, random_state=42)\n\n for train_index, test_index in skfolds.split(X, y):\n clone_clf = clone(model)\n X_train_folds = X[train_index]\n y_train_folds = y[train_index]\n X_test_fold = X[test_index]\n y_test_fold = y[test_index]\n\n clone_clf.fit(X_train_folds, y_train_folds)\n y_pred = clone_clf.predict(X_test_fold)\n n_correct = sum(y_pred == y_test_fold)\n message = \"ratio of correct predictions: \", n_correct / len(y_pred)\n self.__logger.info(message)", "def k_fold_cross_validation(self, K: int=5, rem_day4:bool=True, smote: bool=True, shuffle: bool=False) -> dict:\n k_folds = {}\n\n for k in range(K):\n k_folds[k] = {\"X_train\": [], \"X_test\": [], \"y_test\": [], \"y_train\": []}\n for df in self.dataframes:\n header = set(df['label'].tolist())\n # Removing Day 4\n trails = set()\n for i in header:\n trail = eval(i)\n if trail[0] != 4:\n trails.add(i)\n else:\n if not(rem_day4):\n trails.add(i)\n\n header = trails\n\n for trial in header:\n # geting rows with (day, Trail)-label\n rows = df.loc[df['label'] == trial].to_numpy()\n # getting response label\n response = rows[0][-1]\n # getting the actual data from the matrix\n rows = np.delete(rows, np.s_[0,1,-1], axis=1)\n\n chunks = np.array_split(rows, K)\n for chunk in chunks[k]:\n k_folds[k][\"X_test\"].append(chunk.astype(np.float))\n k_folds[k][\"y_test\"].append(response)\n\n train_chunks = np.delete(chunks, k, axis=0)\n for chunk in train_chunks:\n for ch in chunk:\n k_folds[k][\"X_train\"].append(ch.astype(np.float))\n k_folds[k][\"y_train\"].append(response)\n\n\n for k in range(K):\n\n self.X_test = np.asarray(k_folds[k][\"X_test\"])\n self.y_test = np.asarray(k_folds[k][\"y_test\"])\n self.X_train = np.asarray(k_folds[k][\"X_train\"])\n self.y_train = np.asarray(k_folds[k][\"y_train\"])\n\n if smote:\n self.use_SMOTE()\n\n if shuffle:\n self.shuffle_labels()\n\n k_folds[k][\"X_test\"] = self.X_test\n k_folds[k][\"X_train\"] = self.X_train\n k_folds[k][\"y_test\"] = self.y_test\n k_folds[k][\"y_train\"] = self.y_train\n\n return k_folds", "def crossValidate(self, args):\n\n ##################################\n # Read the training data\n ##################################\n if not os.path.isdir(args.annotationPath):\n print('annotation path does not exist: {}' \\\n .format(args.annotationPath))\n return -1\n\n data = self.readData(args.annotationPath)\n\n ############################\n # Execute the K-Fold cross validation\n ############################\n\n x = []\n y = []\n l = []\n for subject, df in data.items():\n lx = df[['gradient', 'rate']].values.tolist()\n #lx = df[['rate']].values.tolist()\n ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)\n x.extend(lx)\n y.extend(ly.tolist())\n l.append(len(lx))\n\n x = np.array(x)\n y = np.array(y)\n\n print('Executing cross-validation with k = {}...'.format(args.k))\n clf = StructuredPerceptron(random_state=2)\n scores = []\n folds = SequenceKFold(l, n_folds=args.k)\n for train_idx, train_len, test_idx, test_len in folds:\n xTrain = x[train_idx]\n yTrain = y[train_idx]\n clf.fit(xTrain, yTrain, train_len)\n\n xTest = x[test_idx]\n yTest = y[test_idx]\n yPred = clf.predict(xTest, test_len)\n scores.append(accuracy_score(yTest, yPred))\n\n scores = np.array(scores)\n print(scores)\n print('Result of the K-Fold CV: {:3f} (+- {:3f})' \\\n .format(scores.mean(), 2 * scores.std()))\n\n ############################\n # Execute the Leave-One-Out cross validation\n ############################\n\n\n return 0", "def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs):\n np.set_printoptions(precision=4)\n k_indices = build_k_indices(self.train_y, cv, seed)\n # define lists to store the loss of training data and test data\n err_tr = []\n err_te = []\n weights = []\n print(\"K-fold ({}) cross validation to examine [{}]\".\n format(cv, lambdas))\n for lamb in lambdas:\n print(\"For lambda: {}\".format(lamb))\n _mse_tr = []\n _mse_te = []\n _weight = []\n for k in range(cv):\n print('Cross valid iteration {}'.format(k))\n weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x,\n k_indices, k,\n lamb, lambda_name, **kwargs)\n _mse_tr += [loss_tr]\n _mse_te += [loss_te]\n _weight.append(weight)\n if skip:\n break\n avg_tr = np.average(_mse_tr)\n avg_te = np.average(_mse_te)\n err_tr += [avg_tr]\n err_te += [avg_te]\n weights.append(_weight)\n print(\"\\t train error {}, \\t valid error {}\".\n format(avg_tr, avg_te))\n # Select the best parameter during the cross validations.\n print('K-fold cross validation result: \\n {} \\n {}'.\n format(err_tr, err_te))\n # Select the best based on least err_te\n min_err_te = np.argmin(err_te)\n print('Best err_te result {}, lambda {}'.\n format(err_te[min_err_te], lambdas[min_err_te]))\n if plot:\n from plots import cross_validation_visualization\n cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name,\n error_name=self.loss_function_name)\n else:\n save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name)\n\n return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te)", "def cross_validation(exp_name):\n click.echo(\"Mode: Cross-validation.\")\n # defaults = get_defaults()\n\n # fitted_model_filename = add_extension(fitted_model_filename)\n\n # derive final path for fitted model as base output path for fitted models + model filename\n # fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n # new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # don't reserve dev set at this point since we need to do it in each cv fold\n boot_data = bootstrap(new_options=None, mode=\"cv\")\n\n defaults = boot_data['defaults']\n X_train, y_train = boot_data['data']\n\n cv = RepeatedStratifiedKFold(n_splits=defaults.EVAL.N_SPLITS,\n n_repeats=defaults.EVAL.N_REPEATS,\n random_state=defaults.MISC.SEED)\n\n s = time.time()\n outer_results, outer_preds = cross_validate(X=X_train, y=y_train,\n cv=cv,\n conf=defaults)\n print(\"Execution time: %s seconds.\" % (time.time() - s))\n\n # dump results\n # fitted_model_best_params_path = os.path.join(defaults.OUTPUT.PARAMS_PATH,\n # \"best_params_{}.pkl\".format(fitted_model_filename.split('.')[0]))\n\n outer_results_formatted = show_cross_val_results(outer_results, conf=defaults)\n\n cv_results_path = os.path.join(defaults.OUTPUT.RESULTS_PATH, \"cv_results_{}.csv\".format(exp_name))\n outer_results_formatted.to_csv(cv_results_path)\n\n # save predictions\n outer_preds_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"cv_pooled_preds_{}.pkl\".format(exp_name))\n save_obj(outer_preds, outer_preds_path)", "def kfold_cross_validate(\n num_folds: int,\n config: Union[dict, str],\n dataset: str = None,\n data_format: str = None,\n skip_save_training_description: bool = False,\n skip_save_training_statistics: bool = False,\n skip_save_model: bool = False,\n skip_save_progress: bool = False,\n skip_save_log: bool = False,\n skip_save_processed_input: bool = False,\n skip_save_predictions: bool = False,\n skip_save_eval_stats: bool = False,\n skip_collect_predictions: bool = False,\n skip_collect_overall_stats: bool = False,\n output_directory: str = \"results\",\n random_seed: int = default_random_seed,\n gpus: Union[str, int, List[int]] = None,\n gpu_memory_limit: Optional[float] = None,\n allow_parallel_threads: bool = True,\n backend: Union[Backend, str] = None,\n logging_level: int = logging.INFO,\n **kwargs,\n) -> Tuple[dict, dict]:\n # if config is a path, convert to dictionary\n if isinstance(config, str): # assume path\n config = load_yaml(config)\n backend = initialize_backend(backend or config.get(\"backend\"))\n\n # check for k_fold\n if num_folds is None:\n raise ValueError(\"k_fold parameter must be specified\")\n\n logger.info(f\"starting {num_folds:d}-fold cross validation\")\n\n # create output_directory if not available\n if not os.path.isdir(output_directory):\n os.mkdir(output_directory)\n\n # prepare data for k-fold processing\n # use Ludwig's utility to facilitate creating a dataframe\n # that is used as the basis for creating folds\n\n dataset, _, _, _ = load_dataset_uris(dataset, None, None, None, backend)\n\n # determine data format of provided dataset\n if not data_format or data_format == \"auto\":\n data_format = figure_data_format(dataset)\n\n data_df = load_dataset(dataset, data_format=data_format, df_lib=backend.df_engine.df_lib)\n\n kfold_cv_stats = {}\n kfold_split_indices = {}\n\n for train_indices, test_indices, fold_num in generate_kfold_splits(data_df, num_folds, random_seed):\n with tempfile.TemporaryDirectory() as temp_dir_name:\n curr_train_df = data_df.iloc[train_indices]\n curr_test_df = data_df.iloc[test_indices]\n\n kfold_split_indices[\"fold_\" + str(fold_num)] = {\n \"training_indices\": train_indices,\n \"test_indices\": test_indices,\n }\n\n # train and validate model on this fold\n logger.info(f\"training on fold {fold_num:d}\")\n\n model = LudwigModel(\n config=config,\n logging_level=logging_level,\n backend=backend,\n gpus=gpus,\n gpu_memory_limit=gpu_memory_limit,\n allow_parallel_threads=allow_parallel_threads,\n )\n (eval_stats, train_stats, preprocessed_data, output_directory) = model.experiment(\n training_set=curr_train_df,\n test_set=curr_test_df,\n experiment_name=\"cross_validation\",\n model_name=\"fold_\" + str(fold_num),\n skip_save_training_description=skip_save_training_description,\n skip_save_training_statistics=skip_save_training_statistics,\n skip_save_model=skip_save_model,\n skip_save_progress=skip_save_progress,\n skip_save_log=skip_save_log,\n skip_save_processed_input=skip_save_processed_input,\n skip_save_predictions=skip_save_predictions,\n skip_save_eval_stats=skip_save_eval_stats,\n skip_collect_predictions=skip_collect_predictions,\n skip_collect_overall_stats=skip_collect_overall_stats,\n output_directory=os.path.join(temp_dir_name, \"results\"),\n random_seed=random_seed,\n )\n\n # augment the training statistics with scoring metric from\n # the hold out fold\n train_stats_dict = dataclasses.asdict(train_stats)\n train_stats_dict[\"fold_eval_stats\"] = eval_stats\n\n # collect training statistics for this fold\n kfold_cv_stats[\"fold_\" + str(fold_num)] = train_stats_dict\n\n # consolidate raw fold metrics across all folds\n raw_kfold_stats = {}\n for fold_name in kfold_cv_stats:\n curr_fold_eval_stats = kfold_cv_stats[fold_name][\"fold_eval_stats\"]\n for of_name in curr_fold_eval_stats:\n if of_name not in raw_kfold_stats:\n raw_kfold_stats[of_name] = {}\n fold_eval_stats_of = curr_fold_eval_stats[of_name]\n\n for metric in fold_eval_stats_of:\n if metric not in {\n \"predictions\",\n \"probabilities\",\n \"confusion_matrix\",\n \"overall_stats\",\n \"per_class_stats\",\n \"roc_curve\",\n \"precision_recall_curve\",\n }:\n if metric not in raw_kfold_stats[of_name]:\n raw_kfold_stats[of_name][metric] = []\n raw_kfold_stats[of_name][metric].append(fold_eval_stats_of[metric])\n\n # calculate overall kfold statistics\n overall_kfold_stats = {}\n for of_name in raw_kfold_stats:\n overall_kfold_stats[of_name] = {}\n for metric in raw_kfold_stats[of_name]:\n mean = np.mean(raw_kfold_stats[of_name][metric])\n std = np.std(raw_kfold_stats[of_name][metric])\n overall_kfold_stats[of_name][metric + \"_mean\"] = mean\n overall_kfold_stats[of_name][metric + \"_std\"] = std\n\n kfold_cv_stats[\"overall\"] = overall_kfold_stats\n\n logger.info(f\"completed {num_folds:d}-fold cross validation\")\n\n return kfold_cv_stats, kfold_split_indices", "def stratified_kfold_cross_validation(X, y, n_splits=5):\n\n #Define variables\n X_train_folds = []\n X_test_folds = []\n\n #Create dictionary \n y_dict = myutils.group_by(y)\n\n #Split data\n folds = [[] for _ in range(n_splits)]\n for category in y_dict.keys():\n index = y_dict[category]\n for i in range(len(index)):\n folds[i % n_splits].append(index[i])\n\n #Add data to train and testing sets\n for i in range(n_splits):\n train = []\n for j in range(n_splits):\n if i != j:\n for item in folds[j]:\n train.append(item)\n test = folds[i]\n X_train_folds.append(train)\n X_test_folds.append(test)\n \n return X_train_folds, X_test_folds", "def tenfold_cross_validation(X, y):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n for train_index, test_index in KFold(10).split(X):\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # change the parameters to see how each parameter affects the l1inear classifier\n linear_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\n # start training the classifier\n linear_classifier.fit(x_train, y_train)\n\n # create and plot the confusion matrix\n # cross validation done with cross_val_\n y_train_pred = cross_val_predict(linear_classifier, x_test, y_test, cv=10)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_test, y_train_pred)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_cross_validation_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def cross_validation_score(X_data, y_data, model, folds=5):\n\n # Shuffle index\n index = np.random.permutation(len(X_data))\n\n fold_size = int(len(X_data) / folds)\n scores = []\n for i in range(folds):\n \n # Partition Data\n X_train, X_val = partition_data(X_data[index], folds, i + 1, fold_size)\n y_train, y_val = partition_data(y_data[index], folds, i + 1, fold_size)\n\n # Train Model\n print(f\"Training on Fold: {i + 1}\")\n model.fit(X_train, y_train)\n\n # Predict Values on Validation Set\n val_pred = model.predict(X_val)\n\n # Get Accuracy\n score = accuracy_score(y_val, val_pred)\n scores.append(score)\n \n return sum(scores) / len(scores)", "def HypeNKFoldCV(x,\n group_cols,\n target_col,\n clf,\n nfolds,\n kfolds,\n alpha,\n noise_std,\n scorer):\n ## all indices\n all_idx = x.copy().index.values\n ## will shuffle indices for randomisation\n np.random.shuffle(all_idx)\n ## outer splits indices\n outer_splits = np.array_split(all_idx, nfolds)\n ## scorer results\n scores_val = []\n ## outer cycle\n for i in range(nfolds):\n ## keep `i`-th fold for validation\n val_idx = outer_splits[i]\n x_val = x.loc[val_idx].copy()\n ## choose all but `i`-th split\n inner_idx = np.concatenate(outer_splits[:i] + outer_splits[(i + 1):])\n ## further randomise training indices\n np.random.shuffle(inner_idx)\n ## split others further\n inner_splits = np.array_split(inner_idx, kfolds)\n ## training data frame\n x_train = x.loc[inner_idx].copy()\n ## iterate over group cols\n for group_col in group_cols:\n n_col_name = '_'.join([group_col, target_col])\n ## encode using division into KFolds\n x_train.loc[:, n_col_name] = KFoldTargetEncoding(x_train[[group_col, target_col]].copy(),\n inner_splits,\n group_col,\n target_col,\n n_col_name,\n alpha,\n noise_std)\n ## filling in the same column on val\n ## using whole `x_train`\n x_val.loc[:, n_col_name] = targetEncoding(x_train.loc[:, [group_col, target_col]],\n x_val.loc[:, [group_col]],\n group_col,\n target_col,\n alpha,\n noise_std)\n\n ## will train on x_train\n ## will validate on x_val\n if 'fit' in dir(clf):\n clf.fit(x_train.drop(target_col, axis=1), x_train[target_col])\n preds_val = clf.predict(x_val.drop(target_col, axis=1))\n elif 'train' in dir(clf):\n clf.train(x_train.drop(target_col, axis=1), x_train[target_col])\n preds_val = clf.test(x_val.drop(target_col, axis=1)).argmax(axis=1)\n else:\n raise Exception(\"`clf` must contain either (`fit` and `predict`) or\"\n \" (`train` and `test`) methods\")\n scores_val.append(scorer(x_val[target_col], preds_val))\n del x_val, preds_val, x_train\n return scores_val", "def stratified_kfold_cross_validation(X, y, n_splits=5):\r\n indices = [x for x in range(0, len(X))]\r\n labels = []\r\n uniq_feat = []\r\n\r\n for idx,clss in enumerate(y):\r\n\r\n if clss in uniq_feat:\r\n labels[uniq_feat.index(clss)].append(indices[idx])\r\n else:\r\n labels.append([indices[idx]])\r\n uniq_feat.append(clss)\r\n \r\n index = 0\r\n X_test_folds = [[] for _ in range(0, n_splits)]\r\n\r\n for label in labels:\r\n for val in label:\r\n fold_idx = index%n_splits\r\n X_test_folds[fold_idx].append(val)\r\n index += 1\r\n \r\n X_train_folds = [[] for _ in range(0, n_splits)]\r\n\r\n for i in range(0, len(X)):\r\n for j in range(0, n_splits):\r\n if i not in X_test_folds[j]:\r\n X_train_folds[j].append(i)\r\n \r\n return X_train_folds, X_test_folds", "def cross_validation(self, x, t, k=5):\n print(\"Cross validation of the Decision Tree Classifier...\")\n bestCriteria = ''\n bestMax_depth= 2\n bestError = float('inf')\n\n N = len(x)\n N_train = math.floor(0.8 * N)\n\n dicCriteria = ['gini', 'entropy']\n min_depth = 2\n max_depth = 40\n\n for crit in dicCriteria:\n for d in range(min_depth, max_depth):\n errors = np.zeros(k)\n\n for j in range(k):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_X, random_t = zip(*map_index)\n\n train_x = random_X[:N_train]\n valid_x = random_X[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = tree.DecisionTreeClassifier(max_depth=d, criterion=crit)\n self.train(train_x, train_t)\n error_valid = np.array([self.error(x_n, t_n)\n for t_n, x_n in zip(valid_t, valid_x)])\n errors[j] = error_valid.mean()\n\n mean_error = np.mean(errors)\n if mean_error < bestError:\n bestError = mean_error\n bestCriteria = crit\n bestMax_depth = d\n print(\"The new best hyper parameters are : \", bestMax_depth, bestCriteria)\n\n print(\"Best hyper parameters are : \", bestMax_depth, bestCriteria)\n print(\"Validation error : \", 100 * bestError, \"%\")\n self.model = tree.DecisionTreeClassifier(max_depth=bestMax_depth, criterion=bestCriteria)\n self.train(x, t)", "def crossValidation(training, k, performance):\n\n predictions = []\n accuracy = []\n\n for index in range(1, 6):\n # print index\n temp = list(range(1, 6))\n temp.remove(index)\n # print 'index: ' + str(index) + ', temp: ' + str(temp)\n\n for x in range(len(training.get_group(index))):\n if x % 100 != 0:\n continue\n target = training.get_group(index).values.tolist()[x][-1]\n # if x % 500 == 0:\n # print 'index: ' + str(index) + ', x: ' + str(x)\n neighbors = []\n distances = []\n for validationSet in temp:\n getDistances(training.get_group(validationSet).values.tolist(), training.get_group(index).values.tolist()[x], distances)\n # Sort the distances list by the distance\n distances.sort(key = lambda item: item[1])\n # print distances\n # Select first k closest elements to return as the neighbors\n for x in range(k):\n neighbors.append(distances[x][0])\n\n result=getResponse(neighbors)\n\n # print distances\n # print neighbors\n # print result\n predictions.append(result)\n # print 'result: ' + str(result)\n # print 'target: ' + str(target)\n # print 'result == target: ' + str(result == target)\n if result == target:\n accuracy.append((True, target))\n else:\n accuracy.append((False, target))\n\n count = 0\n for item in accuracy:\n if item[0] == True:\n count += 1\n\n # print 'number of instances: ' + str(len(accuracy)) + ' number correct: ' + str(count)\n\n count = 0\n for item in accuracy:\n if item[0] == True:\n count += 1\n\n # Add the current k-value and its accuracy for this run to dictionary\n performance[k] = count / len(accuracy)\n\n print performance\n return performance", "def __cross_validation(self, classifier, X, y, k, stratify=True):\n\t\tif k == X.shape[0]:\t\t# leave-one-out\n\t\t\tkf = model_selection.KFold(n_splits=k)\n\t\telse:\n\t\t\tif stratify:\n\t\t\t\tkf = model_selection.StratifiedKFold(n_splits=k, shuffle=True, random_state=0)\n\t\t\telse:\n\t\t\t\tkf = model_selection.KFold(n_splits=k, shuffle=True, random_state=0)\n\n\t\t# training data and predictions for each fold\n\t\ty_train_list = []\n\t\ty_train_pred_list = []\n\t\ty_train_prob_list = []\n\t\ty_val_list = []\n\t\ty_val_pred_list = []\n\t\ty_val_prob_list = []\n\n\t\tfor train_idx, val_idx in kf.split(X, y):\n\t\t\tX_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n\t\t\ty_train, y_val = y.iloc[train_idx], y.iloc[val_idx]\n\t\t\ty_train_list.append(y_train)\n\t\t\ty_val_list.append(y_val)\n\n\t\t\t# catch convergence warning\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.filterwarnings('error', category=exceptions.ConvergenceWarning)\n\t\t\t\ttry:\n\t\t\t\t\tclassifier = classifier.fit(X_train, y_train)\n\t\t\t\texcept exceptions.ConvergenceWarning:\n\t\t\t\t\tModel.counter -= 1\n\t\t\t\t\traise\n\n\t\t\ty_train_pred_list.append(classifier.predict(X_train))\n\t\t\ty_val_pred_list.append(classifier.predict(X_val))\n\t\t\ty_train_prob_list.append(classifier.predict_proba(X_train))\n\t\t\ty_val_prob_list.append(classifier.predict_proba(X_val))\n\n\t\tif k == X.shape[0]:\t\t# leave-one-out\n\t\t\ty_val = np.hstack(y_val_list)\n\t\t\ty_val_pred = np.hstack(y_val_pred_list)\n\t\t\ty_val_prob = np.vstack(y_val_prob_list)\n\n\t\t\treturn ModelMetrics(classifier, y_train_list, y_train_pred_list, y_train_prob_list, 'cv'), \\\n\t\t\t\tModelMetrics(classifier, y_val, y_val_pred, y_val_prob, 'loo')\n\t\telse:\n\t\t\treturn ModelMetrics(classifier, y_train_list, y_train_pred_list, y_train_prob_list, 'cv'), \\\n\t\t\t\tModelMetrics(classifier, y_val_list, y_val_pred_list, y_val_prob_list, 'cv')", "def cv(data, folds, model):\n def rmsle(predicted, actual):\n # Root Mean Squared Logarithmic Error\n return mean_squared_error(\n np.log(predicted+1),\n np.log(actual+1)\n ) ** 0.5\n\n errors = []\n print \" Cross Validation in progress...\"\n kf = cross_validation.KFold(n=len(data.index), n_folds=folds)\n for i, (train_index, validation_index) in enumerate(kf):\n print ' F%d.' % i\n train = data.iloc[train_index]\n validation = data.iloc[validation_index]\n\n model.fit(train)\n prediction = model.predict(validation)\n actual = data.iloc[validation_index]['count'].as_matrix()\n error = rmsle(prediction, actual)\n errors.append(error)\n return np.mean(errors)", "def cross_validate(self, inss, num_folds, folds_name=None, gen_folds=False):\n kcv = CrossValidation(num_folds, inss, self.train, self.rank_inss)\n\n # loads/generates folds\n if gen_folds:\n kcv.create_folds(group_by=\"session\")\n if folds_name is not None:\n kcv.save_folds(folds_name)\n else:\n kcv.load_folds(folds_name)\n\n # Cross validation\n inss = kcv.run()\n inss.__class__ = CERInstances\n for ins in inss.get_all():\n ins.__class__ = CERInstance\n return inss", "def nested_cv(X, y, model, n_splits, n_folds, unique_id):\n \n cv = StratifiedKFold(n_splits=n_splits,\n shuffle=True,\n random_state=42) # Outer CV\n \n i_start = 0\n i_list = []\n results_df = None\n cv_path = unique_id + '_NestedCV.pkl'\n \n if os.path.isfile(cv_path) == True: # If CV is incomplete, resume\n results_df = pd.read_pickle(cv_path)\n i_start = results_df.Outer_fold.max() + 1\n print('Resuming cross-validation from fold ' + str(i_start + 1))\n \n # Generate indices to split data by StratifiedKFold\n # Append indices for each fold to list \n for tr_i, te_i in cv.split(X,y):\n i_list.append([tr_i, te_i])\n \n # For each fold...\n for i in range(i_start, len(i_list)):\n results_list = []\n print('Beginning fold ' + str(i+1) + ' of ' + str(len(i_list)))\n \n # Split data into training and test tests\n X_train = X.loc[X.index.intersection(i_list[i][0])]\n y_train = y.loc[y.index.intersection(i_list[i][0])]\n X_test = X.loc[X.index.intersection(i_list[i][1])]\n y_test = y.loc[y.index.intersection(i_list[i][1])]\n\n start = time.time()\n \n # Fit the HyperoptEstimator to training data (optimise model)\n model.fit(X_train,\n y_train,\n n_folds=n_folds, # Inner stratified k-fold CV\n cv_shuffle=True)\n \n end = time.time()\n duration = end - start\n\n # Use optimised model to predict labels for test data\n y_pred = model.predict(X_test)\n score = f1_score(y_test, y_pred, average='weighted') # Evaluate\n \n # Everything below: formats and/or calculates results for output file\n sorted_labels = np.sort(y_test.unique())\n unweighted_score = f1_score(y_test, y_pred,\n average=None,\n labels=sorted_labels)\n c_matrix = confusion_matrix(y_test, y_pred,\n labels=sorted_labels)\n\n for trial in range(len(model.trials.trials)):\n if model.trials.trials[trial].get('result').get('status') == 'ok':\n trial_loss = model.trials.trials[trial].get('result').get('loss')\n trial_duration = model.trials.trials[trial].get('result').get('duration')\n else:\n trial_loss = np.nan\n trial_duration = np.nan\n \n results_list.append([i,\n score,\n unweighted_score,\n le.inverse_transform(sorted_labels),\n c_matrix,\n duration,\n trial,\n trial_loss,\n trial_duration])\n \n append_df = pd.DataFrame(results_list,\n columns=['Outer_fold',\n 'Outer_score',\n 'Outer_unweighted_scores',\n 'Outer_unweighted_score_labels',\n 'Outer_confusion_matrix',\n 'Outer_training_duration',\n 'Trial',\n 'Trial_loss',\n 'Trial_duration'])\n if i == i_start:\n if results_df is not None:\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n else:\n final_df = append_df\n final_df.to_pickle(cv_path)\n \n else:\n results_df = pd.read_pickle(cv_path)\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n final_df.to_pickle(cv_path)", "def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))", "def cross_validation_experiment(train_data, train_labels):\n accuracies = []\n for i in range(1, 200):\n avg = cross_validation(train_data, train_labels, i, 10)\n accuracies.append(avg)\n fig = plt.figure()\n dim = np.arange(1,len(accuracies)+1)\n plt.plot(dim,accuracies, label='Accuracy')\n plt.xlabel('k')\n plt.ylabel('accuracy')\n plt.grid()\n plt.legend()\n plt.tight_layout()\n fig.savefig('knn_cross_validation.png')\n best_k = np.argmax(accuracies)+1\n return best_k", "def train_kfold(self, blank_model, model_params, dataset, batch_size, drop_last_batch=True):\n kfold = StratifiedKFold(n_splits=self.cv, shuffle=True)\n best_val_loss = 999999\n best_cv = 0\n best_model = None\n for i, (train_idx, test_idx) in enumerate(kfold.split(dataset, dataset.targets)):\n # Initialize model\n #model = copy.deepcopy(blank_model)\n model = blank_model(**model_params).to(model_params['device'])\n print(model.dev)\n #print(model_params['device'])\n print('Training fold %d'%(i), flush=True)\n train_ds = torch.utils.data.Subset(dataset, train_idx)\n test_ds = torch.utils.data.Subset(dataset, test_idx)\n train_loader = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=drop_last_batch, num_workers=self.num_work)\n test_loader = torch.utils.data.DataLoader(test_ds, batch_size=batch_size, shuffle=True, drop_last=drop_last_batch, num_workers=self.num_work)\n # Train model\n epoch_hist = model.train_model(train_loader, test_loader=test_loader, n_epochs=self.n_epochs, learning_rate=self.lr, train_patience=self.train_p, test_patience=self.test_p, save_model=False)\n # Save attributes\n self.cv_res_dict[i]['history'] = epoch_hist\n if 'valid_loss' in epoch_hist.keys():\n self.cv_res_dict[i]['best_valid_loss'] = np.min(epoch_hist['valid_loss'])\n #if self.return_model:\n #self.cv_res_dict[i]['model'] = model\n if best_val_loss > np.min(epoch_hist['valid_loss']):\n best_val_loss = np.min(epoch_hist['valid_loss'])\n best_cv = i\n best_model = model\n # Save if save all\n if self.save_all:\n full_path = self.path_dir+self.model_prefix+'fold_'+str(i)+'.pt'\n print('Saving model at %s'%(full_path), flush=True)\n torch.save(model.state_dict(), full_path)\n\n # delete model from memory ?\n del model\n if model_params['device'] == torch.device('cuda'):\n torch.cuda.empty_cache()\n\n self.best_cv = best_cv\n print('Best Fold: %d'%(self.best_cv), flush=True)\n # Save best model to path\n if self.save_best and not self.save_all:\n path_best = self.path_dir+self.model_prefix+'best_fold.pt'\n print('Saving best model at %s'%(path_best), flush=True)\n torch.save(best_model.state_dict(), path_best)\n elif self.save_best and self.save_all:\n print('Best model already saved for fold %d'%(self.best_cv), flush=True)\n\n return", "def __init__(self, splits):\n\t\tself.kfold = KFold(splits)", "def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)", "def cross_validation(self, stock_data):\n\n self.X_train, self.X_test, self.y_train, self.y_test = cross_validation.train_test_split(stock_data['X'], stock_data['Y'], test_size=0.2, train_size=0.8, random_state=3)", "def validate_kfold(\n x: ndarray,\n y: ndarray,\n x_sample_dim: int,\n y_sample_dim: int,\n n_splits: int,\n stratify: bool,\n models: List[Model],\n generator: RandomGenerator,\n save_fold_accs: bool,\n) -> List[KFoldResults]:\n # don't work with original arrays which may have sample index in strange location\n length = int(x.shape[x_sample_dim])\n idx = np.arange(0, length, dtype=int)\n # convert to labels if we have a one-hot (will fail for dummy coding)\n # y_split = np.argmax(y, axis=1 - int(np.abs(y_sample_dim))) if y.ndim == 2 else y\n y_split = np.argmax(y, axis=1 - np.abs(y_sample_dim).astype(int)) if y.ndim == 2 else y\n\n # can't trust kfold shuffling in multiprocessing, shuffle ourselves\n generator.shuffle(idx)\n y_split = y_split[idx]\n\n kfolder = StratifiedKFold if stratify else KFold\n kfold = kfolder(n_splits=n_splits, shuffle=False)\n\n results = []\n for model, (train_idx, val_idx) in zip(models, kfold.split(idx, y_split)):\n x_train = array_indexer(x, x_sample_dim, train_idx)\n y_train = array_indexer(y, y_sample_dim, train_idx)\n model.fit(x_train, y_train)\n\n acc = None\n y_pred = None\n if save_fold_accs:\n x_val = array_indexer(x, x_sample_dim, val_idx)\n y_val = array_indexer(y, y_sample_dim, val_idx)\n y_pred = model.predict(x_val)\n acc = 1 - np.mean(get_y_error(y_pred, y_val, y_sample_dim))\n results.append(KFoldResults(model, acc, y_pred, val_idx))\n\n return results", "def _do_training_cross_validation(self) -> None:\n\n cfg = self.cfg_\n fit_kwargs = {'classes': list(self.data_.classes)}\n\n # Store all of the samples used during cross-validation\n self.y_training_set_all_ = list(self._generate_samples(self.train_ids_, 'y'))\n\n # Initialize learner objects with the optimal set of parameters\n # learned from the grid search round (one for each\n # sub-experiment of the cross-validation round)\n for learner, learner_name in zip(self.learners_, self.learner_names_):\n self.cv_learners_[learner_name] = \\\n [learner(**self.learner_gs_cv_params_[learner_name])\n for i in range(len(self.data_.training_set))]\n\n # Make a list of empty lists corresponding to each estimator\n # instance for each learner, which will be used to store the\n # performance metrics for each cross-validation\n # leave-one-fold-out sub-experiment\n self.cv_learner_stats_ = [[] for _ in cfg.learners]\n\n # Fit the `SelectPercentile` feature selector (if applicable)\n if cfg.feature_selection_percentile != 1.0:\n loginfo('Removing {0}% of the features during training round...'\n .format(100 - 100*cfg.feature_selection_percentile))\n feature_selector = \\\n (SelectPercentile(chi2,\n percentile=100*cfg.feature_selection_percentile)\n .fit(self._vectorize_and_sparsify_data(self.training_vec_,\n self.train_ids_),\n self.y_training_set_all_))\n\n # For each fold of the training set, train on all of the other\n # folds and evaluate on the one left out fold\n for i, held_out_fold in enumerate(self.data_.training_set):\n\n loginfo('Cross-validation sub-experiment #{0} in progress'\n .format(i + 1))\n\n # Use each training fold (except for the held-out set) to\n # incrementally build up the model\n training_folds = (self.data_.training_set[:i]\n + self.data_.training_set[i + 1:])\n y_train_all = []\n for j, training_fold in enumerate(training_folds):\n\n # Get the training data\n y_train = list(self._generate_samples(training_fold, 'y'))\n y_train_all.extend(y_train)\n X_train = self._vectorize_and_sparsify_data(self.training_vec_,\n training_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_train = feature_selector.transform(X_train)\n\n # Iterate over the learners\n for learner_name in self.learner_names_:\n\n # Partially fit each estimator with the new training\n # data (specifying the `classes` keyword argument if\n # this is the first go-round and it's a learner that\n # requires this to be specified initially)\n (self.cv_learners_[learner_name][i]\n .partial_fit(X_train,\n y_train,\n **fit_kwargs if not j and learner_name\n in self.requires_classes_kwarg_\n else {}))\n\n # Get mean and standard deviation for actual values\n y_train_all = np.array(y_train_all)\n y_train_mean = y_train_all.mean()\n y_train_std = y_train_all.std()\n\n # Get test data\n y_test = list(self._generate_samples(held_out_fold, 'y'))\n X_test = self._vectorize_and_sparsify_data(self.training_vec_,\n held_out_fold)\n if cfg.feature_selection_percentile != 1.0:\n X_test = feature_selector.transform(X_test)\n\n # Make predictions with the modified estimators\n for j, learner_name in enumerate(self.learner_names_):\n\n # Make predictions with the given estimator,rounding the\n # predictions\n y_test_preds = \\\n np.round(self.cv_learners_[learner_name][i].predict(X_test))\n\n # Rescale the predicted values based on the\n # mean/standard deviation of the actual values and\n # fit the predicted values within the original scale\n # (i.e., no predicted values should be outside the range\n # of possible values)\n y_test_preds_dict = \\\n ex.rescale_preds_and_fit_in_scale(y_test_preds,\n self.data_.classes,\n y_train_mean,\n y_train_std)\n\n if cfg.rescale:\n y_test_preds = y_test_preds_dict['rescaled']\n else:\n y_test_preds = y_test_preds_dict['fitted_only']\n\n # Evaluate the predictions and add to list of evaluation\n # reports for each learner\n (self.cv_learner_stats_[j]\n .append(ex.evaluate_predictions_from_learning_round(\n y_test=y_test,\n y_test_preds=y_test_preds,\n classes=self.data_.classes,\n prediction_label=cfg.prediction_label,\n non_nlp_features=cfg.non_nlp_features,\n nlp_features=cfg.nlp_features,\n learner=self.cv_learners_[learner_name][i],\n learner_name=learner_name,\n games=cfg.games,\n test_games=cfg.games,\n _round=i + 1,\n iteration_rounds=self.data_.folds,\n n_train_samples=len(y_train_all),\n n_test_samples=len(held_out_fold),\n rescaled=cfg.rescale,\n transformation_string=self.transformation_string_,\n bin_ranges=cfg.bin_ranges)))", "def cv_multiclass_fold(Y,num_fold=10):\n\t\n (K,N) = Y.shape\n indices = dict()\n Nk = dict()\n for k in range(K):\n # select indices belonging to class k\n indices[k] = list((Y[k,:]==1).nonzero()[0])\n rand.shuffle(indices[k])\n Nk[k] = len(indices[k])/num_fold\n\t\n index_list = []\n\n for k in range(K):\n for i in range(num_fold-1):\n # split class-k indices into num_fold random sets\n try:\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n except IndexError:\n index_list.append([])\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n try:\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n except IndexError:\n index_list.append([])\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n\n return index_list", "def Kfold_LR(X, y, n, rs=None):\n scoring_metrics = ['neg_mean_absolute_error',\n 'neg_mean_squared_error',\n 'neg_root_mean_squared_error',\n 'r2']\n lr = LinearRegression()\n # kf = KFold(n_splits=n, shuffle=True, random_state=rs)\n tss2 = TimeSeriesSplit(n_splits=n)\n result = cross_validate(lr, X, y, cv=tss2, scoring=scoring_metrics,\n return_train_score=True, return_estimator=True)\n return pd.DataFrame(result), result['estimator'][-1]", "def makeFolds(data, k):\r\n # randomize columns\r\n order = data.columns.tolist()\r\n random.shuffle(order)\r\n # split into folds (specified by k)\r\n folds = []\r\n fold = 0\r\n dist = len(order) / k\r\n while fold < k:\r\n start = int(round(fold * dist))\r\n end = int(round(start + dist))\r\n folds.append(order[start:end])\r\n fold = fold + 1\r\n return folds", "def cross_validation(y, x, k_indices, k, initial_w, model_name, max_iters=0, gamma=0, lambda_=0):\n # get k'th subgroup in test, others in train\n test_idx = k_indices[k]\n idx_tr = np.arange(len(k_indices))\n train_idx = k_indices[idx_tr != k]\n train_idx = train_idx.flatten()\n\n # form data with polynomial degree\n #x_tr = build_poly(x[train_idx], degree)\n #x_te = build_poly(x[test_idx], degree)\n x_tr = x[train_idx]\n x_te = x[test_idx]\n y_tr = y[train_idx]\n y_te = y[test_idx]\n\n # applying the model \n #initial_w = np.zeros(x_tr.shape[1])\n if(model_name == 'least_squares_GD'):\n w_star, mse = least_squares_GD(y_tr, x_tr, initial_w, max_iters, gamma)\n elif(model_name == 'least_squares_SGD'):\n w_star, mse = least_squares_SGD(y_tr, x_tr, initial_w, max_iters, gamma)\n elif(model_name == 'least_squares'):\n w_star, mse = least_squares(y_tr, x_tr)\n elif(model_name == 'ridge_regression'):\n w_star, mse = ridge_regression(y_tr, x_tr, lambda_)\n elif(model_name == 'logistic_regression'):\n w_star, mse = logistic_regression(y_tr, x_tr, initial_w, max_iters, gamma)\n elif(model_name == 'reg_logistic_regression'):\n w_star, mse = reg_logistic_regression(y_tr, x_tr, lambda_, initial_w, max_iters, gamma)\n \n\n # calculate the loss for train and test data\n if(model_name == 'logistic_regression' or model_name == 'reg_logistic_regression'):\n #loss_tr = compute_loss_LG(y_tr, x_tr, w_star)\n y_te_ = (1+y_te)/2\n loss_te = compute_loss_LG(y_te_, x_te, w_star)\n #print(loss_te)\n mod_pred = predict_labels(w_star, x_te, logistic = True)\n #mod_pred = (1+mod_pred)/2\n acc = calculate_accuracy(mod_pred, y_te)\n else:\n #loss_tr = compute_loss_MSE(y_tr, x_tr, w_star)\n loss_te = compute_loss_MSE(y_te, x_te, w_star)\n mod_pred = predict_labels(w_star, x_te)\n acc = calculate_accuracy(mod_pred, y_te)\n\n return acc, loss_te", "def cv_5_fold(dataFrame):\n dataframe_collection = {}\n i = 0\n j = 0\n l = 0\n guessed_right = 0\n k = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39]\n\n k_values = []\n # array to store the accuracy evaluation for each number of K\n accuracy_values = {}\n\n myDict = {}\n for j in range(len(k)): # for all values of K neighbour\n\n print(k[j])\n predicted_right = 0\n total_number = 0\n five_accuracies = []\n for i in range(0, 5):\n #aggregating dataframes by fold - e.g. 1 fold becomes test dataframe; 2,3,4,5 folds become one training dataframe\n trainingDataFrame = dataFrame.loc[dataFrame[15] != (i / 4.00)]\n trainingDataFrame = trainingDataFrame.drop([15], axis=1).reset_index(drop=True)\n testDataFrame = dataFrame.loc[dataFrame[15] == (i / 4.00)]\n testDataFrame = testDataFrame.drop([15], axis=1).reset_index(drop=True)\n\n # output is an array of predicted income values for testDataFrame\n output = knn(trainingDataFrame, testDataFrame, k[j])\n\n # for every fold validation loop calculate the accuracy:\n for instance in range(len(testDataFrame)):\n # checking number of right predictions\n if (output[instance] == testDataFrame[14].iloc[instance]):\n predicted_right += 1.00\n total_number += 1.00\n\n # calculate accuracy as percentage of number of prediction divided by total\n accuracy = (predicted_right / total_number) * 100.0\n # add acccuracies for each of the 5 fold tests to an array\n five_accuracies.append(accuracy)\n\n # PROVIDE FINAL EVALUATION FOR K = J, BY FINDING OUT AVERAGE ACCURACY OF THE FIVE FOLD LOOPS:\n evaluation = 0.0\n for accuracy in range(len(five_accuracies)):\n evaluation += five_accuracies[accuracy]\n\n evaluation = evaluation / 5\n\n accuracy_values.update({k[j]: evaluation})\n\n accuracy_values = collections.OrderedDict(sorted(accuracy_values.items()))\n\n # compute which number of neigbors garners greatest accuracy:\n maxAccuracy = 0\n best_neighbour = 0\n # loop through dictionary values:\n for v in accuracy_values.items():\n # if the value is greater than the current maximum, make it the maximum\n if (v[1] > maxAccuracy):\n maxAccuracy = v[1]\n best_neighbour = v[0]\n\n print(\"Max accuracy \", maxAccuracy)\n print(\"Best Neighbor: \", best_neighbour)\n\n # make a text file containing the K-number and associated accuracy:\n str_x = \"k value | accuracy\" + \"\\n\"\n for k, v in accuracy_values.items():\n str_x += str(k) + \" | \" + str(v) + \"\\n\"\n print(str_x)\n\n text_file = open(\"grid.results.txt\", 'w')\n text_file.write(str_x)\n text_file.close()", "def get_folds(X, y, k):\n # temporarily change the 1/-1 nature of y to 1/0\n _y = (y + 1) / 2\n # partition the examples into postive and negative sets\n positive_indices = np.where(_y)[0]\n negative_indices = np.where(_y - 1)[0]\n assert len(positive_indices) + len(negative_indices) == len(y)\n\n # shuffle both lists\n np.random.shuffle(positive_indices)\n np.random.shuffle(negative_indices)\n\n # create k buckets of indices of (approximately) equal size\n positive_folds_indices = \\\n np.array(np.array_split(positive_indices, k))\n negative_folds_indices = \\\n np.array(np.array_split(negative_indices, k))\n\n train_X, train_y, test_X, test_y = [], [], [], []\n for i in range(k):\n train_folds = np.concatenate((np.arange(0, i), np.arange(i+1, k)))\n pos_train_indices = np.concatenate(positive_folds_indices[train_folds])\n neg_train_indices = np.concatenate(negative_folds_indices[train_folds])\n pos_test_indices = positive_folds_indices[i]\n neg_test_indices = negative_folds_indices[i]\n\n train_X.append(\n np.concatenate((X[pos_train_indices], X[neg_train_indices]))\n )\n train_y.append(\n np.concatenate((y[pos_train_indices], y[neg_train_indices]))\n )\n test_X.append(\n np.concatenate((X[pos_test_indices], X[neg_test_indices]))\n )\n test_y.append(\n np.concatenate((y[pos_test_indices], y[neg_test_indices]))\n )\n\n return zip(train_X, train_y, test_X, test_y)", "def split_label_for_crossValidation(labels, k_fold = 3):\n\ttest_labels = []\n\ttest_num = len(labels) / k_fold\n\n\tfor i in range(test_num):\n\t\ttest_labels.append(labels.pop(random.randint(0,len(labels) - 1)))\n\n\tvalid_labels = labels\n\n\treturn (test_labels, valid_labels)", "def cross_validate(self, X, y):\n roc_ap = []\n kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)\n for train_ix, test_ix in kfold.split(X, y):\n train_X, test_X = X.iloc[train_ix, :], X.iloc[test_ix, :]\n train_y, test_y = y[train_ix], y[test_ix]\n self.train(train_X, train_y)\n roc_ap.append(self.test(test_X, test_y))\n return roc_ap", "def __init__(self, splits):\n\t\tself.kfold = StratifiedKFold(splits)", "def cross_validate(self, curr_dataset, num_folds, max_depth, min_samples_per_node,\n is_stratified=True, print_tree=False, seed=None, print_samples=False,\n use_stop_conditions=False, max_p_value_chi_sq=0.1):\n classifications = [0] * curr_dataset.num_samples\n num_correct_classifications = 0\n num_correct_classifications_wo_unkown = 0\n total_cost = 0.0\n total_cost_wo_unkown = 0.0\n classified_with_unkown_value_array = [False] * curr_dataset.num_samples\n num_unkown = 0\n unkown_value_attrib_index_array = [0] * curr_dataset.num_samples\n max_depth_per_fold = []\n num_nodes_per_fold = []\n num_valid_nominal_attributes_in_root_per_fold = []\n num_values_root_attribute_list = []\n num_trivial_splits = 0\n time_taken_pruning_per_fold = []\n num_nodes_pruned_per_fold = []\n num_correct_trivial_classifications = 0\n\n fold_count = 0\n\n sample_indices_and_classes = list(enumerate(curr_dataset.sample_class))\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n random.shuffle(sample_indices_and_classes)\n shuffled_sample_indices, shuffled_sample_classes = zip(*sample_indices_and_classes)\n\n if is_stratified:\n for (training_randomized_indices,\n validation_randomized_indices) in StratifiedKFold(n_splits=num_folds).split(\n shuffled_sample_indices,\n shuffled_sample_classes):\n\n training_samples_indices = [shuffled_sample_indices[index]\n for index in training_randomized_indices]\n validation_sample_indices = [shuffled_sample_indices[index]\n for index in validation_randomized_indices]\n\n if print_samples:\n print('Samples used for validation in this fold:')\n print(validation_sample_indices)\n print()\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n else:\n for (training_samples_indices,\n validation_sample_indices) in KFold(n_splits=num_folds).split(\n shuffled_sample_indices):\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n\n return (classifications,\n num_correct_classifications,\n num_correct_classifications_wo_unkown,\n total_cost,\n total_cost_wo_unkown,\n classified_with_unkown_value_array,\n num_unkown,\n unkown_value_attrib_index_array,\n time_taken_pruning_per_fold,\n num_nodes_pruned_per_fold,\n max_depth_per_fold,\n num_nodes_per_fold,\n num_valid_nominal_attributes_in_root_per_fold,\n num_values_root_attribute_list,\n num_trivial_splits,\n 100.0 * num_correct_trivial_classifications / curr_dataset.num_samples)", "def kfold_scoring(self, data_frame, target, pipeline):\n\n fold_score = []\n macro = ['recall', 'f1', 'precision']\n number_of_folds = -1\n Folds = {}\n\n kf = KFold(n_splits=10, random_state=None, shuffle=True)\n\n for train_index, test_index in kf.split(data_frame):\n X_train = data_frame[train_index]\n X_test = data_frame[test_index]\n y_train = target[train_index]\n y_test = target[test_index]\n number_of_folds = number_of_folds + 1\n # Append the predicted labels.\n y_predict = self.fit_predict_model(X_train, y_train, X_test, pipeline)\n\n Folds[str(number_of_folds)] = {\n \"predicted\": y_predict,\n \"Actual\": y_test\n }\n\n if self.problem_type == 'regression':\n if self.scoring is not None:\n result = self.regression_scoring_function[self.scoring](y_predict, y_test)\n else:\n result = self.regression_scoring_function['r2_score'](y_predict, y_test)\n else:\n if self.scoring is not None:\n if self.scoring not in macro:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test)\n else:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test, average='macro')\n else:\n result = self.classification_scoring_function['f1'](\n y_predict, y_test, average='macro')\n\n fold_score.append(result)\n self.pipeline_dict['folds'] = Folds\n return np.mean(fold_score)", "def cross_validation(self, x, t):\n # Initialize accuracy / hyperparameters\n best_accuracy = 0.0\n best_reg = 0.0\n\n # Cross-validation 80-20\n N = x.shape[0]\n N_train = int(math.floor(0.8 * N))\n\n # Initialize the grid search hyperparameters\n min_reg = 0.001\n max_reg = 1000\n log_min_reg = np.log(min_reg)\n log_max_reg = np.log(max_reg)\n reg_list = np.logspace(log_min_reg, log_max_reg, num=7, base=math.e)\n\n for reg in reg_list:\n accuracy = np.zeros((self.k_fold))\n for i in range(self.k_fold):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_x, random_t = zip(*map_index)\n\n train_x = random_x[:N_train]\n valid_x = random_x[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=reg, max_iter=1000, \n random_state=self.random_state)\n self.train(train_x, train_t)\n accuracy[i] = self.model.score(valid_x, valid_t)\n\n mean_accuracy = np.mean(accuracy)\n # print(mean_accuracy)\n if mean_accuracy > best_accuracy:\n best_accuracy = mean_accuracy\n best_reg = reg\n print(\"The new best hyperparameters are : \", best_reg)\n\n print(\"Best hyperparameters are : \", best_reg)\n print(\"Valid Accuracy :\", best_accuracy)\n self.reg = best_reg\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=best_reg, max_iter=1000, \n random_state=self.random_state)\n self.train(x, t)", "def cross_validation(y, x, k_indices, k, lambda_, degree):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # get k'th subgroup in test, others in train: TODO\n # ***************************************************\n all_indices = np.arange(y.shape[0])\n excepted = np.setdiff1d(all_indices,k_indices[k])\n x_test = x[k_indices[k]]\n y_test = y[k_indices[k]]\n x_train = x[excepted]\n y_train = y[excepted]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # form data with polynomial degree: TODO\n # ***************************************************\n x_train_extended = build_poly(x_train, degree)\n #print(x_train_extended.shape)\n x_test_extended = build_poly(x_test, degree)\n #print(x_test_extended.shape)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # ridge regression: TODO\n # ***************************************************\n loss_tr,w_tr = ridge_regression(y_train,x_train_extended,lambda_)\n #print(\"One new regression\")\n #print(w_tr)\n loss_te = compute_loss(y_test,x_test_extended,w_tr)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # calculate the loss for train and test data: TODO\n # ***************************************************\n return loss_tr, loss_te", "def cross_validate_model(self, X_train, y_train):\n\n\t\t# Build a stratified k-fold cross-validator object\n\t\tskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n\n\t\t'''\n\t\tEvaluate the score by cross-validation\n\t\tThis fits the classification model on the training data, according to the cross-validator\n\t\tand reports the scores.\n\t\tAlternative: sklearn.model_selection.cross_validate\n\t\t'''\n\t\tscores = cross_val_score(self.classifier, X_train, y_train, scoring='accuracy', cv=skf)\n\n\t\tprint(\"%.2f seconds: Cross-validation finished\" % time.process_time())\n\n\t\t# Log the cross-validation scores, the mean score and the 95% confidence interval, according to:\n\t\t# http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics\n\t\t# https://en.wikipedia.org/wiki/Standard_error#Assumptions_and_usage\n\t\t# print(\"Scores = %s\" % scores)\n\t\t# print(\"Accuracy: %0.2f (±%0.2f)\" % (scores.mean()*100, scores.std()*2*100))\n\t\t# ↳ https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html", "def cross_validation_step_sgd(ratings, k_fold=4, gamma=0.01, num_features=25,\n lambda_user=0.1, lambda_item=0.7, num_epochs=20, verbose=False):\n tr_errors = []\n te_errors = []\n folds = k_fold_split(ratings, k=k_fold)\n for fold in range(k_fold):\n print('Fold {}/{}'.format(fold+1, k_fold))\n train, test = split_matrix(ratings, folds[fold])\n train_rmse, test_rmse, _, _ = matrix_factorization_sgd(train, test, gamma=gamma, num_features=num_features,\n lambda_user=lambda_user, lambda_item=lambda_item,\n num_epochs=num_epochs, verbose=verbose)\n tr_errors.append(train_rmse)\n te_errors.append(test_rmse)\n return np.mean(tr_errors), np.mean(te_errors)", "def cross_validation(y, x, k_indices, k, lambda_):\n\n # get k'th subgroup in test, others in train\n te_indice = k_indices[k]\n x_te = x[te_indice]\n y_te = y[te_indice]\n\n tr_indice = k_indices[~(np.arange(k_indices.shape[0]) == k)]\n tr_indice = tr_indice.reshape(-1)\n x_tr = x[tr_indice]\n y_tr = y[tr_indice]\n\n score_table = []\n\n # ridge regression\n #print('Ridge inside cross val started')\n weights, loss = ridge_regression(y_tr, x_tr, lambda_)\n\n y_te_predict = predict_labels(weights, x_te)\n score = (y_te_predict == y_te).mean()\n score_table.append(score)\n\n # calculate the loss for train and test data\n e_tr = y_tr - x_tr.dot(weights)\n loss_tr = np.sqrt(2 * compute_mse(e_tr))\n\n e_te = y_te - x_te.dot(weights)\n loss_te = np.sqrt(2 * compute_mse(e_te))\n\n return loss_tr, loss_te, np.array(score_table)", "def k_fold_linear(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n\n test = temp.pop(i)\n train = pd.concat(temp)\n test_labels = list(test['Labels'])\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n test_predictions = [round(x, 1) for x in predict_linear_regression(test.drop(['Labels'], axis=1), model)]\n train_predictions = [round(x, 1) for x in predict_linear_regression(train.drop(['Labels'], axis=1), model)]\n\n Confusion_Matrix(test_predictions, test_labels)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)", "def n_fold_cross_validation(examples, num_folds=5):\n random.shuffle(examples, random.seed(12345))\n batches = [examples[i::num_folds] for i in range(num_folds)]\n n_fold_sets = [\n (\n [example for flat in (batches[:i] + batches[i + 1 :]) for example in flat],\n batch,\n )\n for i, batch in enumerate(batches)\n ]\n\n return np.array(n_fold_sets)", "def k_fold_selection(self,training_dataframe, selected_column, ignored_columns):\n \n # getting all the outcomes (i.e. from the selected column)\n outcomes = training_dataframe[selected_column].factorize()[0]\n # remove selected column from dataframe\n training_dataframe.drop(selected_column, axis=1, inplace=True)\n \n # remove all 'ignored columns' from dataframe\n ignored_columns = ast.literal_eval(ignored_columns)\n for column in ignored_columns:\n training_dataframe.drop(column, axis=1, inplace=True)\n \n column_states = {}\n # factorize the columns and store the state names of each feature as a dict\n for column in training_dataframe.columns:\n factorized_column = training_dataframe[column].factorize()\n training_dataframe[column] = factorized_column[0]\n column_states[column] = factorized_column[1].tolist()\n \n # intialize the k-fold cross validation object\n kf = KFold(len(outcomes), n_folds=10, shuffle=True, random_state=14)\n \n for iter_id, item in enumerate(kf):\n # for the first iteration, the classification model needs to be initialized at the end\n if iter_id == 0:\n # training segment\n features_train = np.array([list(training_dataframe.ix[i]) for i in item[0]])\n outcomes_train = np.array([outcomes[i] for i in item[0]])\n \n # evaluation segment\n features_test = np.array([list(training_dataframe.ix[i]) for i in item[1]])\n outcomes_test = np.array([outcomes[i] for i in item[1]])\n \n # create the classifier\n best_clf = BernoulliNB()\n best_clf.fit(features_train, outcomes_train)\n best_accuracy = math.ceil((best_clf.score(features_test,outcomes_test, sample_weight=None)*100)*100)/100\n continue\n \n # training segment\n features_train = np.array([list(training_dataframe.ix[i]) for i in item[0]])\n outcomes_train = np.array([outcomes[i] for i in item[0]])\n \n # evaluation segment\n features_test = np.array([list(training_dataframe.ix[i]) for i in item[1]])\n outcomes_test = np.array([outcomes[i] for i in item[1]])\n \n # create the classifier\n clf = BernoulliNB()\n clf.fit(features_train, outcomes_train)\n accuracy = math.ceil((clf.score(features_test,outcomes_test,sample_weight=None)*100)*100)/100\n \n # hold the best model in best_clf\n if accuracy > best_accuracy:\n accuracy = best_accuracy\n best_clf = clf\n \n return best_accuracy, best_clf, column_states, training_dataframe.columns.tolist()", "def cross_valid_key(model,x,key,preds,target,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n keys = x[key].unique().tolist()\r\n \r\n\r\n\r\n for idx, item in enumerate([1,2,3,4,5]):\r\n\r\n xtrain,xtest = split_camp(x,keys,0.2)\r\n \r\n model.fit(xtrain[feat],xtrain[target])\r\n\r\n ypred = model.predict(xtest[feat])\r\n \r\n ytrue= xtest[target].values \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[target].tolist(),ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {idx} out of 5')\r\n print(f'Key {item}')\r\n print(f'{metric}: {score[idx]}')\r\n\r\n \r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def cross_validate(data, k, distance_metric):\n fraction_correct = 0.00\n correctly_classified = 0\n for i, test_data in enumerate(data):\n training_data = []\n for j in range(len(data)):\n if j!=i:\n training_data.append(data[j])\n observed_classification = knn_classify_point(test_data, training_data, k, distance_metric)\n actual_classification = test_data.classification\n if observed_classification == actual_classification:\n correctly_classified += 1\n fraction_correct = float(correctly_classified/len(data))\n return fraction_correct", "def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV", "def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold", "def stratify_folds(labels, k):\n\n if k < 1:\n raise ValueError('Number of folds, k, must be positive: {}'.format(k))\n\n # Split indices into k folds\n skf_test_model = StratifiedKFold(shuffle=True, n_splits=k)\n\n skf_test = skf_test_model.split(np.zeros(len(labels)), labels)\n skf_test = [skf_set[1] for skf_set in skf_test]\n\n for cv_set in range(k):\n skf_test[cv_set] = np.expand_dims(skf_test[cv_set], axis=1)\n skf_test[cv_set] = np.append(skf_test[cv_set], np.repeat(cv_set, skf_test[cv_set].shape[0]).T[:, np.newaxis],\n axis=1)\n\n # Create one n x 2 array\n # * where n is the number of samples\n # * column 1 is the variant index\n # * column 2 is the set the variant belongs to\n\n fold_array = np.concatenate(skf_test, axis=0)\n\n # Replace indices with label indices\n fold_array[:, 0] = labels.index[fold_array[:, 0]]\n\n # Sort by variant index\n fold_array = fold_array[fold_array[:, 0].argsort(), :]\n\n # Return array\n return fold_array", "def cross_validate(self, train_size):\n train, val, test_x, test_y = [], [], [], []\n for country in self.countries:\n tr, v, te_x, te_y = country.split_k_fold(train_size, self.horizon)\n train.append(tr), val.append(v), test_x.append(te_x), test_y.append(te_y)\n return np.stack(train), np.stack(val), np.stack(test_x), np.stack(test_y)", "def run_cross_validation_create_models(nfolds=10):\n print(\"nfold value=\",nfolds)\n x_train, y_train, x_train_id = load_images_train()\n print(len(x_train))\n\n # input image dimensions\n batch_size = 16\n nb_epoch = 32\n random_state = 159\n\n seed = 7\n np.random.seed(seed)\n kfold = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=random_state)\n #kfold = StratifiedShuffleSplit(n_splits=nfolds,test_size=0.1,train_size=0.7,random_state=random_state)\n cvscores = []\n models = []\n\n image_array = np.asarray(x_train, dtype=np.float32)\n # print(image_array.shape)\n\n datagen_test = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n preprocessing_function=pre_processing_image\n )\n\n datagen = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n preprocessing_function=pre_processing_image,\n vertical_flip=True,\n horizontal_flip=True,\n fill_mode='nearest'\n )\n\n start_time = time.time()\n print(\"Datagen.fit started\")\n datagen.fit(image_array, augment=True, rounds=3)\n print('Fit Completed: {} seconds'.format(round(time.time() - start_time, 2)))\n\n img_label = np_utils.to_categorical(y_train, 8)\n\n yfull_train = dict()\n num_fold = 0\n sum_score = 0\n\n for train_index, test_index in kfold.split(x_train, y_train):\n # create model\n model = create_model()\n train_x = image_array[train_index]\n train_y = img_label[train_index]\n validate_x = image_array[test_index]\n validate_y = img_label[test_index]\n\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n print('Split train: ', len(train_x), len(train_y))\n print('Split valid: ', len(validate_x), len(validate_y))\n\n callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=1, mode='auto')]\n\n model.fit_generator(generator=datagen.flow(train_x, train_y, batch_size=batch_size, shuffle=True),\n steps_per_epoch=len(image_array)/32, epochs=nb_epoch, verbose=1,\n callbacks=callbacks,validation_data=(validate_x,validate_y),\n validation_steps=len(image_array)/32)\n\n predictions_valid = model.predict(validate_x.astype('float32'), batch_size=batch_size, verbose=1)\n\n\n score = log_loss(validate_y, predictions_valid)\n print('Score log_loss: ', score)\n\n\n sum_score += score * len(test_index)\n\n # Store valid predictions\n for i in range(len(test_index)):\n yfull_train[test_index[i]] = predictions_valid[i]\n\n models.append(model)\n\n score = sum_score / len(x_train)\n print(\"Log_loss train independent avg: \", score)\n\n info_string = 'loss_' + str(score) + '_folds_' + str(10) + '_ep_' + str(28)\n return info_string,models", "def cross_validation_datasets(self, fold):\n if fold > len(self): fold = len(self) / 2\n stratified = self.stratified_bunches(fold)\n datasets = []\n for index in range(len(stratified)):\n gold = GoldInstances(training_as_gold(stratified[index]))\n rest = flatten(stratified[:index]) + flatten(stratified[index + 1:])\n training = TrainingInstances(rest)\n datasets.append((training, gold))\n return datasets", "def nestedValidation(X, y, kernel):\n\n # list of slack variable values to test the SVM with\n C_list = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]\n # number of iterations to run the bootstraping\n B = 30\n\n # split positive and negative valued samples\n positive_samples = list(np.where(y==1)[0])\n negative_samples = list(np.where(y==-1)[0])\n\n # randomize data for testing\n np.random.shuffle(positive_samples)\n np.random.shuffle(negative_samples)\n\n # split samples into two folds with similar proportion of positive:negative\n samples_in_fold1 = positive_samples[:106] + negative_samples[:178]\n samples_in_fold2 = positive_samples[106:] + negative_samples[178:]\n\n y_pred = np.zeros(len(X), int)\n\n # finding the best slack value and its corresponding error\n best_err = 1.0\n best_C = 0.0\n\n print(\"testing with first fold\")\n for C in C_list:\n err = bootstrapping(B, X[samples_in_fold1], y[samples_in_fold1], C, kernel)\n print(\"C=\", C, \"err=\", err)\n if (err <= best_err):\n best_err = err\n best_C = C\n\n print(\"Best C=\", best_C)\n\n alg = SVC(C=best_C, kernel=kernel)\n alg.fit(X[samples_in_fold1], y[samples_in_fold1])\n y_pred[samples_in_fold2] = alg.predict(X[samples_in_fold2])\n\n best_err = 1.1 # any value greater than 1.0\n best_C = 0.0\n\n print(\"testing with second fold\")\n for C in C_list:\n err = bootstrapping(B, X[samples_in_fold2], y[samples_in_fold2], C, kernel)\n print(\"C=\", C, \"err=\", err)\n if (err <= best_err):\n best_err = err\n best_C = C\n\n print(\"Best C=\", best_C)\n\n alg = SVC(C=best_C, kernel=kernel)\n alg.fit(X[samples_in_fold2], y[samples_in_fold2])\n y_pred[samples_in_fold1] = alg.predict(X[samples_in_fold1])\n\n err = np.mean(y != y_pred)\n\n print(\"Nested Validation Error=\", err)\n return err", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def kfold_run(self, estimators):\n model = self.model_build(estimators)\n super().kfold_run(model)", "def cross_validation(feature_train, help_rank_train, model_name):\n clf = svm.SVC(kernel='linear', C=1).fit(feature_train, help_rank_train)\n clf_model = open(model_name,'wb')\n dump(clf, clf_model, -1)\n return", "def cross_validation_accuracy(clf, X, labels, k):\n ###TODO\n\n cv = KFold(n=len(labels),n_folds=k)\n accuracies = []\n\n \n for train_indices, test_indices in cv:\n \n clf.fit(X[train_indices], labels[train_indices])\n predicted = clf.predict(X[test_indices])\n acc = accuracy_score(labels[test_indices], predicted)\n accuracies.append(acc)\n \n #print('accuracies = ',accuracies) \n #avg = np.mean(accuracies,dtype=np.float64)\n return(np.mean(accuracies,dtype=np.float64))", "def cross_validate(pipeline, data, cv=4):\n print \"Running cross validation...\"\n (Xcv, ycv) = data\n kfold = KFold(n_splits=cv, shuffle=True, random_state=42)\n results = []\n for train_idx, val_idx in kfold.split(Xtrain):\n pipeline.fit(Xcv[train_idx], ycv[train_idx])\n results.append(accuracy_score(\n ycv[val_idx], pipeline.predict(Xcv[val_idx])\n ))\n print \"{} +/- {}\".format(np.mean(results), np.std(results))", "def cross_validation(feature_matrix, labels, k, flag = 0, T = 100):\n (nsamples, nfeatures) = feature_matrix.shape\n nlabels = len(labels)\n subset_size = nsamples / k\n percentages = []\n\n for i in xrange(k):\n cross_feature_matrix = np.concatenate((feature_matrix[:i*subset_size], feature_matrix[(i+1)*subset_size:]))\n cross_labels = np.concatenate((labels[:i*subset_size], labels[(i+1)*subset_size:]))\n test_matrix = feature_matrix[i*subset_size:(i+1)*subset_size]\n test_labels = labels[i*subset_size:(i+1)*subset_size]\n\n if flag == 0:\n theta_vector, theta_0 = averager(cross_feature_matrix, cross_labels)\n elif flag == 1:\n theta_vector, theta_0 = train_perceptron(cross_feature_matrix, cross_labels)\n elif flag == 2:\n theta_vector, theta_0 = train_passive_agressive(cross_feature_matrix, cross_labels, T)\n\n label_output = perceptron_classify(test_matrix, theta_0, theta_vector)\n\n correct = 0\n for i in xrange(0, len(label_output)):\n if(label_output[i] == test_labels[i]):\n correct = correct + 1\n\n percentages.append(100.0 * correct / len(label_output))\n\n return sum(percentages) / len(percentages)", "def fold_data(fold, datas):\n training, validation = fold\n folded = {}\n\n # fold Xtrain\n if datas.get('Xtrain') is not None:\n folded['Xtrain'] = datas['Xtrain'][training, :]\n folded['Xtest'] = datas['Xtrain'][validation, :]\n\n # fold Ktrain\n if datas.get('Ktrain') is not None:\n folded['Ktrain'] = datas['Ktrain'][training, :][:, training]\n folded['Ktest'] = datas['Ktrain'][validation, ][:, training]\n # if datas.get('Ktest') is not None:\n # folded['Ktest'] = datas['Ktest'][:, validation]\n # else:\n # folded['Ktest'] = folded['Xtest'].dot(folded['Xtrain'].T)\n\n # fold Ytrain\n if datas.get('Ytrain') is not None:\n folded['Ytrain'] = datas['Ytrain'][training, :]\n folded['Ytest'] = datas['Ytrain'][validation, :]\n\n return folded", "def _cross_valid_feature(max_fold, num_movies, num_users, lamda, subplt):\n loss_train_vector = [.0]*31\n loss_cross_vector = [.0]*31\n cost_train_per_fold = [.0]*max_fold\n cost_cross_per_fold = [.0]*max_fold\n for i in range(1, 31, 1):\n for k in range(1, max_fold + 1, 1):\n cost_train_per_fold[k-1], cost_cross_per_fold[k-1] = (\n _cross_valid_fold(k, num_movies, num_users,\n i, lamda))\n loss_train_vector[i] = np.mean(cost_train_per_fold)\n loss_cross_vector[i] = np.mean(cost_cross_per_fold)\n\n # draw the Loss v.s num_feature graph\n subplt.plot(loss_train_vector, \"r\")\n subplt.plot(loss_cross_vector, \"b\")\n v1 = np.array(loss_cross_vector)\n v2 = np.array(loss_train_vector)\n v3 = v1 + v2\n sel_feature = np.argmin(v3[1:]) + 1\n subplt.plot(v3, \"g\", label=\"lambda=\"+str(lamda))\n plt.axis([1, 30, 0, 1.2*max(v3)])\n return sel_feature", "def choose_k ( \n X,\n y,\n model_call,\n param_grid,\n scoring_func = accuracy,\n cv = KFoldStratifiedCV ( number_of_folds = 3 ),\n):\n grid_search_cv = GridSearchCV (\n model_callable = model_call,\n param_grid = param_grid,\n scoring_func = scoring_func,\n cv_object = cv,\n )\n \n # Get the last sorted value and take k from that values\n return sorted ( list ( grid_search_cv.get_cv_scores ( X, y ) ), key = lambda x: x [ 1 ] ) [ -1 ][ 0 ][ \"k\" ]\n # End choose_k()", "def cross_validation_accuracy(clf, X, labels, k):\n abc = KFold(k, False)\n answer = []\n\n for train_idx, test_idx in abc.split(X):\n Xt, Xs = X[train_idx],X[test_idx]\n tr, ts=labels[train_idx],labels[test_idx]\n clf.fit(Xt,tr)\n final_ans = clf.predict(Xs)\n acc1 = accuracy_score(ts, final_ans)\n answer.append(acc1)\n\n return np.mean(np.array(answer))", "def set_cross_validation(x,y):\n\tx_train_1 = x[50:]\n\ty_train_1 = y[50:]\n\tx_test_1 = x[:50]\n\ty_test_1 = y[:50]\n\tx_train_2 = np.concatenate((x[:50], x[100:]),axis=0)\n\ty_train_2 = np.concatenate((y[:50], y[100:]),axis=0)\n\tx_test_2 = x[50:100]\n\ty_test_2 = y[50:100]\n\tx_train_3 = np.concatenate((x[:100], x[150:]),axis=0)\n\ty_train_3 = np.concatenate((y[:100], y[150:]),axis=0)\n\tx_test_3 = x[100:150]\n\ty_test_3 = y[100:150]\n\tx_train_4 = x[:150]\n\ty_train_4 = y[:150]\n\tx_test_4 = x[150:]\n\ty_test_4 = y[150:]\n\n\tx_train = [x_train_1,x_train_2,x_train_3,x_train_4]\n\ty_train = [y_train_1,y_train_2,y_train_3,y_train_4]\n\tx_test = [x_test_1,x_test_2,x_test_3,x_test_4]\n\ty_test = [y_test_1,y_test_2,y_test_3,y_test_4]\n\t# print 'cross val shapes', x_train.shape, y_train.shape, x_test.shape, y_test.shape\n\treturn x_train, y_train, x_test, y_test", "def train_and_test_k_fold(\n ds, prd, k=10, comm=config.comm, online=False, classes=None, parallel_test=False,\n cycles_per_barrier=10):\n train_and_test = lambda tr, te: train_and_test_once(\n tr, te, prd, comm=comm, online=online, classes=classes, parallel_test=parallel_test,\n cycles_per_barrier=cycles_per_barrier)\n\n if k <= 0:\n raise ValueError(\"k must be positive\")\n elif k == 1:\n splits = ds.split(10)\n train = concatenate(splits[j] for j in range(9))\n test = splits[9]\n return train_and_test(train, test)\n else:\n r = null_training_result()\n for train, test in get_k_fold_data(ds, k=k):\n r += train_and_test(train, test)\n comm.barrier()\n\n return r", "def choose_k ( \n feature_matrix,\n target_array,\n model_call,\n param_grid,\n scoring_func = accuracy,\n cv = KFoldStratifiedCV ( number_of_folds = 3 ),\n):\n grid_search_cv = GridSearchCV (\n model_callable = model_call,\n param_grid = param_grid,\n scoring_func = scoring_func,\n cv_object = cv,\n )\n \n # Get the last sorted value and take k from that values\n return sorted ( list ( grid_search_cv.get_cv_scores ( feature_matrix, target_array ) ), key = lambda x: x [ 1 ] ) [ -1 ][ 0 ][ \"k\" ]\n # End choose_k()", "def _get_devset_cv(self, train_x, train_y, dev_x, dev_y, n_folds):\n folds = []\n n_train = len(train_y)\n n_dev = len(dev_y)\n dev_ids = [n_train + i for i in xrange(n_dev)]\n # create stratified K-folds over the training data\n skf = StratifiedKFold(n_splits=NFOLDS, shuffle=True)\n for train_ids, test_ids in skf.split(train_x, train_y):\n folds.append((train_ids,\n np.concatenate((test_ids, dev_ids))))\n train_x += dev_x\n train_y += dev_y\n return folds, train_x, train_y", "def cross_validate(all_tetrode_data, target, tetrode_ids, tetrode_units, verbose=True):\n kf = StratifiedKFold(n_splits=10)\n y_true = np.zeros(target.shape)\n y_hat = np.zeros(target.shape)\n i = 0\n\n for train_index, test_index in kf.split(np.zeros(target.shape[0]), target.argmax(axis=-1)):\n X_train, X_test = select_data(all_tetrode_data, train_index), select_data(all_tetrode_data, test_index)\n y_train, y_test = target[train_index, :], target[test_index, :]\n\n model = build_tetrode_model(tetrode_ids, tetrode_units)\n checkpointer = ModelCheckpoint('temp_model.h5',\n verbose=0, save_best_only=True)\n hist = model.fit(X_train, y_train,\n nb_epoch=200, batch_size=20,\n validation_data=(X_test, y_test),\n callbacks=[checkpointer], verbose=0)\n best_model = load_model('temp_model.h5')\n\n n = y_test.shape[0]\n y_true[i:(i + n), :] = y_test\n y_hat[i:(i + n), :] = best_model.predict(X_test)\n i += n\n\n if verbose:\n accuracy = max(hist.history['val_acc'])\n print('Current fold validation accuracy: {acc}'.format(acc=accuracy))\n\n return y_true, y_hat", "def train_cv(X_train, Y_train, nfold = 5, early_stopping_rounds = 20):\n # model params\n params = { \"objective\" : \"multiclass\",\n \"num_class\" : 6,\n \"verbosity\" : -1 }\n\n # create dataset for lightgbm\n lgb_train = lgb.Dataset(X_train, Y_train)\n \n # cross validate to find optimal no of iterations\n r = lgb.cv(params, \n lgb_train, \n 10000,\n early_stopping_rounds = early_stopping_rounds,\n nfold = nfold,\n feval = accuracy_error,\n metrics = 'None',\n verbose_eval = True,\n seed = 42)\n\n # Highest score\n r_best = np.max(r['accuracy-mean'])\n\n # best number of estimators\n best_estimators = np.argmax(r['accuracy-mean']) + 1\n print(best_estimators)\n\n print(f'The maxium accuracy on the validation set was {r_best:.5f}')\n print(f'The ideal number of iterations was {best_estimators}.')\n\n # Fit on all of the training data using the ideal number of iterations\n model = lgb.LGBMClassifier(n_estimators=best_estimators, n_jobs = -1,\n **params, random_state = 42) \n model.fit(X_train, Y_train)\n\n return model" ]
[ "0.77007174", "0.7679591", "0.76575476", "0.76463956", "0.7470647", "0.7434266", "0.7416312", "0.73822284", "0.7370632", "0.73616713", "0.73327446", "0.73284346", "0.722847", "0.7186071", "0.71467024", "0.7122442", "0.70457053", "0.6964643", "0.6961495", "0.6944377", "0.69214356", "0.6885895", "0.6834962", "0.6831402", "0.683031", "0.68242896", "0.6814457", "0.68019813", "0.67791474", "0.67709404", "0.67501765", "0.67373943", "0.67334545", "0.6729874", "0.67278045", "0.66968393", "0.66899306", "0.6672713", "0.6671377", "0.66663545", "0.6656561", "0.6654989", "0.6636143", "0.65926963", "0.6578277", "0.6569758", "0.6548368", "0.6512602", "0.6503532", "0.6499182", "0.6449522", "0.6435872", "0.64254946", "0.6407219", "0.6386662", "0.63842106", "0.63194793", "0.62940174", "0.6280331", "0.6276167", "0.6274001", "0.62617725", "0.6247634", "0.62458295", "0.62285143", "0.6227249", "0.62102973", "0.6208086", "0.6193006", "0.61873674", "0.6184935", "0.6173738", "0.6169475", "0.6169334", "0.61504996", "0.6146204", "0.6135572", "0.6120028", "0.6117503", "0.61110556", "0.611091", "0.6097301", "0.6084769", "0.6051688", "0.6036438", "0.6029118", "0.6020981", "0.6016158", "0.60135776", "0.6013367", "0.6009401", "0.59847695", "0.5974658", "0.5969564", "0.5957593", "0.5951031", "0.5934232", "0.5927389", "0.592584", "0.5921864" ]
0.63284665
56
Returns train and validation indices respecting the temporal ordering of the data.
def time_series_cross_validation(random_state: np.random.RandomState, num_splits: int, indices: np.ndarray, **kwargs: Any ) -> List[Tuple[np.ndarray, np.ndarray]]: test_size = kwargs['n_prediction_steps'] n_repeats = kwargs['n_repeats'] cv = TimeSeriesSplit(n_splits=num_splits, test_size=test_size * n_repeats, gap=0) splits = [( indices[split[0]], indices[split[1][[-1 - n * test_size for n in reversed(range(n_repeats))]]]) for split in cv.split(indices)] return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_train_index():\n data_size = (NUM_CLASS - 1) * NUM_DATA_PER_CLASS\n return np.array([i for i in range(0, data_size)])", "def get_training_index():\n return list(range(0, 305))", "def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices", "def get_inputs_train():\n x = tf.constant(extract_pandas_data(x_train))\n y = tf.constant(y_train.values)\n return x, y", "def ordered_indices(self):\n return self.base_dataset.ordered_indices()", "def build_index_groups(train):\n nz_row, nz_col = train.nonzero()\n nz_train = list(zip(nz_row, nz_col))\n\n grouped_nz_train_byrow = group_by(nz_train, index=0)\n nz_row_colindices = [(g, np.array([v[1] for v in value]))\n for g, value in grouped_nz_train_byrow]\n\n grouped_nz_train_bycol = group_by(nz_train, index=1)\n nz_col_rowindices = [(g, np.array([v[0] for v in value]))\n for g, value in grouped_nz_train_bycol]\n return nz_train, nz_row_colindices, nz_col_rowindices", "def __train_test_splits(self):\n # By default, our indices are just 0-n\n split_indices = list(range(len(self.data)))\n # If shuffling, use our shared Random instance to shuffle our indices before slicing\n if self.shuffle:\n np.random.shuffle(split_indices)\n # Regardless of shuffle, take the first self.train_proportion for training, and the last\n # 1 - self.train_proportion records as test\n train_n = int(self.train_proportion * len(self.data))\n training_indices = split_indices[:train_n]\n test_indices = split_indices[train_n:]\n return training_indices, test_indices", "def get_indices_input_target(num_obs, input_len, step_size, forecast_horizon, target_len):\n input_len = round(input_len) # just a precaution\n start_position = 0\n stop_position = num_obs - 1\n\n inpseq_first_idx = start_position\n inpseq_last_idx = inpseq_first_idx + input_len\n target_first_idx = inpseq_last_idx + forecast_horizon\n target_last_idx = target_first_idx + target_len\n print(\"target_last_idx = {}\".format(target_last_idx))\n print(\"stop_position = {}\".format(stop_position))\n indices = []\n while target_last_idx <= stop_position:\n indices.append((inpseq_first_idx, inpseq_last_idx, target_first_idx, target_last_idx))\n inpseq_first_idx += step_size\n inpseq_last_idx += step_size\n target_first_idx += inpseq_last_idx + forecast_horizon\n target_last_idx += target_first_idx + target_len\n return indices", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def get_train_val_indices(dataset, train_ratio, val_ratio):\n features = get_feature_labels_files(dataset)[0]\n indices = get_random_idx(features, 42).tolist()\n train_indices = indices[:int(train_ratio * len(indices))]\n val_indices = indices[int(train_ratio * len(indices)): int(train_ratio * len(indices)) + int(val_ratio * len(indices)) + 1]\n test_indices = indices[int(train_ratio * len(indices)) + int(val_ratio * len(indices)) + 1::]\n return [train_indices, val_indices, test_indices]", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def train_valid_index_split_two_stage(all_index, train_size_1 = None, train_size_2 = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\n\ttrain_size_2 = len(all_index) if train_size_2 is None else train_size_2\n\ttrain_index_2_ = np.random.choice(all_index, train_size_2, replace = False)\n\ttrain_index_2, valid_index_2 = np.split(train_index_2_, [int(train_size_2*(1-valid_split))])\n\n\tall_index = np.setdiff1d(all_index, train_index_2)\n\ttrain_index_1_ = np.random.choice(all_index, train_size_1-train_size_2, replace = False)\n\ttrain_index_1, valid_index_1 = np.split(train_index_1_, [int((train_size_1-train_size_2)*(1-valid_split))])\n\ttrain_index_1 = np.hstack([train_index_1, train_index_2])\n\tvalid_index_1 = np.hstack([valid_index_1, valid_index_2])\n\treturn train_index_1, valid_index_1, train_index_2, valid_index_2", "def getFeaturesIndices(self, tag, history, in_data=True):\n indices = []\n if in_data:\n self.__checkFeatureIndex__(self.__f100__((history.getWord(), tag)), indices)\n self.__checkFeatureIndex__(self.__f103__((history.getT_2(), history.getT_1(), tag)), indices)\n self.__checkFeatureIndex__(self.__f104__((history.getT_1(), tag)), indices)\n return indices", "def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n #X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n #y_val = y[val_idxs]\n\n return X_train, X_test, y_train, y_test,", "def make_training_set(ind_list, training_data): \n \n exp = training_data[ind_list[0]] \n X_train = exp[0]\n u_train = exp[1] \n\n for i in ind_list[1:]: \n exp = training_data[i]\n X_train = np.append(X_train, exp[0], axis=0)\n u_train = np.append(u_train, exp[1], axis=0)\n\n return X_train, u_train", "def get_valid_indices():\n return [i for i, val in enumerate(all_topics) if val[1] == \"1\"]", "def check_input_indices(self):\n for i, curr_input in enumerate(self.inputs.order_by(\"dataset_idx\"), start=1):\n if i != curr_input.dataset_idx:\n raise ValidationError(\"Inputs are not consecutively numbered starting from 1\")", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def get_prediction_indices(self):\r\n if self.full_df['Dates'][0] > self.full_df['Dates'][len(self.full_df) - 1]:\r\n self.full_df = self.full_df[::-1]\r\n self.full_df.reset_index(inplace=True)\r\n self.full_df.drop('index', axis=1, inplace=True)\r\n date_condition = ((self.full_df['Dates'] <= self.pred_end) &\r\n (self.full_df['Dates'] >= self.pred_start))\r\n self.pred_indices = list(self.full_df[date_condition].index)", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def split_validation_training_index(allind, splitsize, do_offset, offset_steps):\n i = offset_steps\n lval = splitsize\n if not do_offset:\n i_val = allind[:lval]\n i_train = allind[lval:]\n else:\n i_val = allind[i * lval:(i + 1) * lval]\n i_train = np.concatenate([allind[0:i * lval], allind[(i + 1) * lval:]], axis=0)\n if len(i_val) <= 0:\n print(\"Warning: #Validation data is 0, take 1 training sample instead\")\n i_val = i_train[:1]\n\n return i_train, i_val", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def train_test_dataset(self, train_rate=0.8):\n point_date = int(len(self.y) * train_rate)\n y_to_train = self.y[:point_date]\n y_to_val = self.y[point_date:]\n predict_date = len(self.y) - len(y_to_train) # the number of data points for the test set\n date_val = self.y.index[point_date]\n return y_to_train, y_to_val, predict_date, date_val", "def get_inputs_test():\n x = tf.constant(extract_pandas_data(x_test))\n y = tf.constant(y_test.values)\n return x, y", "def train_test_split(data, validate_size=0.3):\r\n\r\n split = len(data) * (1 - validate_size)\r\n split = int(split)\r\n train = data[:split]\r\n validate = data[split:]\r\n\r\n return train, validate", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def make_idx_data(revs, word_idx_map, maxlen=60, is_split = True):\r\n X_train, X_trial, X_test,y_train, y_trial,y_test, lex_train, lex_trial = [], [], [], [], [], [], [], []\r\n for rev in revs:\r\n sent = get_idx_from_sent(rev['text'], word_idx_map)\r\n y = rev['y']\r\n if is_split:\r\n if rev['split'] == 1:\r\n X_train.append(sent)\r\n y_train.append(y)\r\n\r\n elif rev['split'] == -1:\r\n X_trial.append(sent)\r\n y_trial.append(y)\r\n else:\r\n X_test.append(sent)\r\n y_test.append(-1)\r\n\r\n if is_split:\r\n X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)\r\n X_trial = sequence.pad_sequences(np.array(X_trial), maxlen=maxlen)\r\n # X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)\r\n y_train = np_utils.to_categorical(np.array(y_train))\r\n y_trial = np_utils.to_categorical(np.array(y_trial))\r\n # y_valid = np.array(y_valid)\r\n\r\n lex_train = train_lexicon.values\r\n lex_trial = trial_lexicon.values\r\n lex_train = np.array(lex_train)\r\n lex_trial = np.array(lex_trial)\r\n return [X_train, X_trial, y_train, y_trial, lex_train, lex_trial]\r\n else:\r\n X_test = sequence.pad_sequences(np.array(X_test), maxlen=117)\r\n lex_test = test_lexicon.values\r\n lex_test = np.array(lex_test)\r\n return [X_test, lex_test]", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def test_parse_split_index_ordering():\n index = [5, 37, 38, 56, 111] # test has max index 9999\n split = \"test\"\n kwargs = dict(epochs=1, batch_size=1, dataset_dir=DATASET_DIR, shuffle_files=False)\n ds = datasets.mnist(split=split, **kwargs)\n fixed_order = []\n for i, (x, y) in enumerate(ds):\n if i in index:\n fixed_order.append(x)\n if i >= max(index):\n break\n\n sliced_split = f\"{split}[{index}]\"\n ds = datasets.mnist(split=sliced_split, **kwargs)\n output_x = [x for (x, y) in ds]\n assert len(fixed_order) == len(output_x)\n for x_i, x_j in zip(fixed_order, output_x):\n assert (x_i == x_j).all()", "def get_train_test_data(All_Data, Tr_Ind, Tst_Ind):\r\n\r\n\ttr_data = np.concatenate([data_m[ind] for data_m, ind in zip(All_Data, Tr_Ind)], axis=0)\r\n\ttst_data = np.concatenate([data_m[ind] for data_m, ind in zip(All_Data, Tst_Ind)], axis=0)\r\n\r\n\treturn tr_data, tst_data", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }", "def train(self):\r\n for class_ in set(self.train_classes):\r\n data = map(lambda (ind, datum): datum, filter(lambda (ind, datum): self.train_classes[ind] == class_, enumerate(self.train_data)))\r\n self.distribution.index_data(data, class_)", "def split_train_and_test(num_examples, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n # Train and validation indexes\n train_val_idx = all_samples_idx[0:len(all_samples_idx) - test_examples]\n test_idx = all_samples_idx[len(all_samples_idx) - test_examples:len(all_samples_idx)]\n\n return [train_val_idx, test_idx]", "def getTrainTimes(t1,testTimes):\n trn=t1.copy(deep=True)\n for i,j in testTimes.iteritems():\n df0=trn[(i<=trn.index)&(trn.index<=j)].index # train starts within test\n df1=trn[(i<=trn)&(trn<=j)].index # train ends within test\n df2=trn[(trn.index<=i)&(j<=trn)].index # train envelops test\n trn=trn.drop(df0.union(df1).union(df2))\n return trn", "def data_split(X, y):\n folds = KFold(n_splits=SPLITS, shuffle=True, random_state=RANDOM_STATE)\n train_indices, validation_indices = list(folds.split(X))[-1][0], list(folds.split(X))[-1][1]\n\n X_train = X.iloc[train_indices]\n X_validation = X.iloc[validation_indices]\n\n y_train = y.iloc[train_indices]\n y_validation = y.iloc[validation_indices]\n\n return X_train, X_validation, y_train, y_validation", "def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y", "def get_cv_indices(df, col, time_slices):\n return [\n ## get train and holdout indices for slice\n tuple(get_row_indices(df, col, slc[x]) for x in range(2))\n\n ## get indices for each slice\n for slc in time_slices\n ]", "def getFeaturesIndices(self, tag, history, in_data=True):\n indices = super().getFeaturesIndices(tag, history, in_data)\n word = history.getWord()\n position = history.getIndex()\n for suffix in self.data.getSuffixesForWord(word):\n self.__checkFeatureIndex__(self.__f101__((suffix, tag)), indices)\n for prefix in self.data.getPrefixesForWord(word):\n self.__checkFeatureIndex__(self.__f102__((prefix, tag)), indices)\n self.__checkFeatureIndex__(self.__f105__(tag), indices)\n self.__checkFeatureIndex__(self.__fNum__(word), indices)\n self.__checkFeatureIndex__(self.__fCap__(word, position), indices)\n return indices", "def loo(self):\n loo = list()\n for i in range(self.data.shape[0]):\n train_index = [i for i in range(self.data.shape[0])]\n train_index.pop(i)\n test_index = i\n loo.append([train_index,test_index])\n\n return (loo)", "def get_inputs_test_S():\n x = tf.constant(extract_pandas_data(x_test_S))\n y = tf.constant(y_test_S.values)\n return x, y", "def get_data_idx(self)->list:\n return self.__data_idx", "def _get_valid_indices(\n self,\n _signals: np.ndarray,\n _labels: np.ndarray,\n window_start_indices: np.ndarray = None,\n elm_start_indices: np.ndarray = None,\n elm_stop_indices: np.ndarray = None,\n valid_t0: np.ndarray = None,\n labels: np.ndarray = None,\n signals: np.ndarray = None,\n ) -> Tuple[\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray\n ]:\n # allowed indices; time data points which can be used for creating the data chunks\n _valid_t0 = np.ones(_labels.shape, dtype=np.int32)\n _valid_t0[\n -(config.signal_window_size + config.label_look_ahead) + 1 :\n ] = 0\n\n # indices for active elm events in each elm event\n active_elm_events = np.nonzero(_labels >= 0.5)[0]\n\n if signals is None:\n # initialize arrays\n window_start_indices = np.array([0])\n elm_start_indices = active_elm_events[0]\n elm_stop_indices = active_elm_events[-1]\n valid_t0 = _valid_t0\n signals = _signals\n labels = _labels\n else:\n # concat on axis 0 (time dimension)\n last_index = len(labels) - 1\n window_start_indices = np.append(\n window_start_indices, last_index + 1\n )\n elm_start_indices = np.append(\n elm_start_indices, active_elm_events[0] + last_index + 1\n )\n elm_stop_indices = np.append(\n elm_stop_indices, active_elm_events[-1] + last_index + 1\n )\n valid_t0 = np.concatenate([valid_t0, _valid_t0])\n signals = np.concatenate([signals, _signals], axis=0)\n labels = np.concatenate([labels, _labels], axis=0)\n\n return (\n signals,\n labels,\n valid_t0,\n window_start_indices,\n elm_start_indices,\n elm_stop_indices,\n )", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes", "def reserve_validation_for_testing(self):\n info(\"Reserving validation data for testing.\")\n if self.test_idx is not None and self.test_idx.size > 0:\n warning(f\"Reserving validation for testing but {len(self.test_idx)} test index exists!\")\n warning(f\"Deleting existing test indexes\")\n self.test_idx = []\n self.test_via_validation = True\n for i in range(len(self.trainval_idx)):\n # fetch and replace the validation index\n val_idx = self.trainval_idx[i][1]\n self.trainval_idx[i] = (self.trainval_idx[i][0], np.arange(0, dtype=np.int32))\n # add it as the test index\n self.test_idx.append(val_idx)", "def indices(self, _user=None):\n return [p.index for p in self.get_active_smallvariant_cases()]", "def split_train_validation_and_test(num_examples, val_percentage, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n val_examples = int(np.ceil(num_examples * val_percentage))\n # Train and validation indexes\n train_idx = all_samples_idx[0:len(all_samples_idx) - test_examples - val_examples]\n val_idx = all_samples_idx[len(all_samples_idx) - test_examples - val_examples:len(all_samples_idx) - test_examples]\n test_idx = all_samples_idx[len(all_samples_idx) - test_examples:]\n train_idx.sort()\n val_idx.sort()\n test_idx.sort()\n\n return [train_idx, val_idx, test_idx]", "def get_data(t_idx):\n train_data = []\n train_label = []\n valid_data = []\n valid_label = []\n test_data = []\n test_label = []\n \n lines = []\n #f = open('/home/liwenzhe/myworkspace/is13/dataset/closingAdjLog.csv', 'rb')\n f = open('dataset/closingAdjLog.csv')\n reader = csv.reader(f)\n for row in reader:\n lines.append(row)\n lines.pop(0)\n \n n = len(lines)\n # use the first 80% for the training data, the next 10% for the validation data\n # and the last 10% for the test data. \n # TODO : create folds instead. \n for i in xrange(n-1, -1, -1):\n curr_line = lines[i]\n if i > n * 0.2:\n # add into training data\n if t_idx == 0:\n train_data.append(curr_line[t_idx+1:])\n else:\n train_data.append(curr_line[:t_idx-1] + curr_line[t_idx:])\n train_label.append(curr_line[t_idx]) \n elif i > n * 0.1:\n # add into validation data\n if t_idx == 0:\n valid_data.append(curr_line[t_idx+1:])\n else: \n valid_data.append(curr_line[:t_idx-1] + curr_line[t_idx:])\n valid_label.append(curr_line[t_idx]) \n else:\n # add into test data\n if t_idx == 0:\n test_data.append(curr_line[t_idx+1:])\n else:\n test_data.append(curr_line[:t_idx-1] + curr_line[t_idx:])\n test_label.append(curr_line[t_idx]) \n \n return [train_data, train_label, valid_data, valid_label, test_data, test_label]", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def _get_instance_indices(self, classes, num_detections, batch_index,\n class_id):\n classes = classes[batch_index:batch_index+1, ...]\n _, max_detections = shape_utils.combined_static_and_dynamic_shape(\n classes)\n # Get the detection indices corresponding to the target class.\n # Call tf.math.equal with matched tensor shape to make it tf.lite\n # compatible.\n valid_detections_with_kpt_class = tf.math.logical_and(\n tf.range(max_detections) < num_detections[batch_index],\n tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))\n instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]\n # Cast the indices tensor to int32 for tf.lite compatibility.\n return tf.cast(instance_inds, tf.int32)", "def getValidationData(data, labels):\n validationData = []\n for dataTensor in data:\n validationData.append(dataTensor[int(len(dataTensor) * (1 - configuration['nn']['validationSplit'])):])\n validationLabel = labels[int(len(labels) * (1 - configuration['nn']['validationSplit'])):]\n return validationData, validationLabel", "def getValidationData(data, labels):\n validationData = []\n for dataTensor in data:\n validationData.append(dataTensor[int(len(dataTensor) * (1 - configuration['nn']['validationSplit'])):])\n validationLabel = labels[int(len(labels) * (1 - configuration['nn']['validationSplit'])):]\n return validationData, validationLabel", "def get_data_params(self):\n\n minx, maxx = self.get_minx_maxx(normalized=True)\n\n # get the column indexes of categorical features after one-hot-encoding\n self.encoded_categorical_feature_indexes = self.get_encoded_categorical_feature_indexes()\n\n return minx, maxx, self.encoded_categorical_feature_indexes", "def sorted_inputs(self):\n return self.inputs.order_by(\"dataset_idx\")", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def train_test_official(self):\n return self.sub_set(self.idcs_train), self.sub_set(self.idcs_test)", "def get_insert_indices(my_timestamps, existing_timestamps):\n existing_timestep = existing_timestamps[1] - existing_timestamps[0]\n my_timestep = my_timestamps[1] - my_timestamps[0]\n\n # make sure the time delta is ok\n if existing_timestep != my_timestep:\n raise Exception(\"Existing dataset has different timestep (mine=%d, existing=%d)\"\n % (my_timestep, existing_timestep))\n\n my_offset = (my_timestamps[0] - existing_timestamps[0]) // existing_timestep\n my_end = my_offset + len(my_timestamps)\n\n return my_offset, my_end", "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def get_valid_indices(labels):\n idxs = (labels != invalid_labels[0])\n for l in invalid_labels[1:]:\n idxs = idxs & (labels != l)\n return idxs", "def train_test_indices(n, train_ratio=0.7):\n train_split_index = int(train_ratio * n)\n shuffled_indices = np.random.permutation(n)\n train_indices = shuffled_indices[:train_split_index]\n test_indices = shuffled_indices[train_split_index:]\n return train_indices, test_indices", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n\n pool = Subset(train_data, subsample_idx)\n labelled = Subset(train_data, labelled_idx)\n # get ranking of datapoints\n idx_to_add = self.score(model, pool, labelled)\n\n # choose top scoring datapoints to label\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n\n return new_labelled_idx, new_unlabelled_idx", "def dt_train_test(dt, xTrain, yTrain, xTest, yTest):\n # train the model\n dt.train(xTrain, yTrain['label'])\n # predict the training dataset\n yHatTrain = dt.predict(xTrain)\n trainAcc = accuracy_score(yTrain['label'], yHatTrain)\n # predict the test dataset\n yHatTest = dt.predict(xTest)\n testAcc = accuracy_score(yTest['label'], yHatTest)\n return trainAcc, testAcc", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def make_idx_data(revs, word_idx_map, maxlen=60):\n X_train, X_test, X_dev, y_train, y_dev,= [], [], [], [], []\n for rev in revs:\n sent = get_idx_from_sent(rev['text'], word_idx_map)\n y = rev['y']\n if rev['split'] == 1:\n X_train.append(sent)\n y_train.append(y)\n elif rev['split'] == 0:\n X_dev.append(sent)\n y_dev.append(y)\n elif rev['split'] == -1:\n X_test.append(sent)\n\n X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)\n X_dev = sequence.pad_sequences(np.array(X_dev), maxlen=maxlen)\n X_test = sequence.pad_sequences(np.array(X_test), maxlen=maxlen)\n y_train = np_utils.to_categorical(np.array(y_train))\n y_dev = np_utils.to_categorical(np.array(y_dev))\n\n return [X_train, X_test, X_dev, y_train, y_dev,]", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def train_test_official(self):\n return self.sub_set(self.train_idcs), self.sub_set(self.test_idcs)", "def get_train_input(self, prev, i):\n pass", "def train_valid_index_split(all_index, train_size = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\ttrain_size = len(all_index) if train_size is None else train_size\n\ttrain_index_ = np.random.choice(all_index, train_size, replace = False)\n\ttrain_index, valid_index = np.split(train_index_, [int(train_size*(1-valid_split))])\n\treturn train_index, valid_index", "def ordered_indices(self):\r\n '''we need random order'''\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n '''\r\n if self.tgt_sizes is not None:\r\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\r\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\r\n '''\r\n return indices", "def index_in_epoch(self):\n return self._index_in_epoch", "def train_batch_idx(self) -> int:\n return self._train_batch_idx", "def index(self):\n return self._epochs_completed * self._size + self._index_in_epoch", "def get_dataset_index(\n dataset: xr.Dataset, time_dataset_index: xr.Dataset\n) -> xr.Dataset:\n dataset_index = xr.Dataset()\n n_steps = len(next(iter(time_dataset_index.values())))\n for dim, var in dataset.items():\n if set(var.dims).intersection(time_dataset_index.keys()):\n time_dim = var.dims[0]\n assert time_dim in time_dataset_index, (\n f\"'{time_dim}' does not seems to be a time \"\n f\"dimensions in {time_dataset_index.keys()}. \"\n \"For the moment, only time dimension as first dim is supported.\"\n )\n dataset_index[dim] = time_dataset_index[time_dim]\n else:\n if not onp.shape(var):\n dataset_index[dim] = xr.DataArray(onp.arange(n_steps), dims=(\"step\",))\n else:\n values_atleast_1d = onp.atleast_1d(var.values)\n # grid = onp.indices(values_atleast_1d.shape)\n flat_idx = onp.arange(len(values_atleast_1d.ravel()))\n dataset_index[dim] = xr.DataArray(\n onp.outer(onp.arange(n_steps), flat_idx),\n dims=(\"step\", dim + \"_flat_idx\"),\n )\n return dataset_index", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def train(self, data, labels, validationData, validationLabels): # data x training, y_training\n # ## TODO: Your code here\n count = util.Counter()\n\n #print 'Labels of the data is:', labels #printing all the labels from which the frequency has to be found\n\n for x in labels: # for loop\n count[x] += 1\n\n #print count\n val = count.argMax()\n self.guess = val\n #print val", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def get_valid_indices(self):\n valid_indices = []\n for idx in range(len(os.listdir(self.fdir))):\n with open(os.path.join(self.fdir, f\"{idx}.json\"), \"r\") as f:\n data = json.load(f)\n if data[\"article\"] and data[\"abstract\"]:\n valid_indices.append(idx)\n return valid_indices", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def test_train_spl(data, testsize):\n test = data.tail(testsize)\n train = data.head(data.shape[0] - testsize)\n return test, train", "def _get_indx(self, t):\n t = np.array(t)\n a = (t[:, np.newaxis] <= self._data['stop']) & (t[:, np.newaxis] >=\n self._data['start'])\n return np.array([np.where(row)[0][0] for row in a])", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def train(self, training_data):\n pass", "def train_test_split(coordinates, data, weights=None, **kwargs):\n args = check_fit_input(coordinates, data, weights, unpack=False)\n ndata = args[1][0].size\n indices = np.arange(ndata)\n split = next(ShuffleSplit(n_splits=1, **kwargs).split(indices))\n train, test = (tuple(select(i, index) for i in args) for index in split)\n return train, test", "def __get_x_y_from_training_validation(\n logger, training, validation, predictors, target):\n if training is not None:\n training_X, training_Y = __get_x_y_from_data(logger, training, predictors, target)\n if validation is not None:\n validation_X, validation_Y = __get_x_y_from_data(logger, validation, predictors, target)\n return training_X, training_Y, validation_X, validation_Y", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n # if this is the first step, then just return the seed set\n self.num_steps += 1\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def train_valid_split(X, y):\n random_indexes = np.random.permutation(len(y))\n train_inds = random_indexes[:(0.75*len(y))]\n valid_inds = random_indexes[(0.75*len(y)):]\n return X[train_inds], y[train_inds], X[valid_inds], y[valid_inds]", "def pt_index(*args):\n index = []\n x = check_pt_data(args[0])\n i = 0\n for line in args[0].Data.PTData.pt_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def timeseries_train_test_split(X, y, test_size):\n\n # get the index after which test set starts\n test_index = int(len(X)*(1-test_size))\n\n X_train = X.iloc[:test_index]\n y_train = y.iloc[:test_index]\n X_test = X.iloc[test_index:]\n y_test = y.iloc[test_index:]\n\n return X_train, X_test, y_train, y_test", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def find_index_in_cell(train, test, x_min, y_min, step):\n data_coor = train['x'].values\n data_in_x = np.logical_and(data_coor > x_min, data_coor < (x_min + step))\n data_coor = train['y'].values\n data_in_y = np.logical_and(data_coor > y_min, data_coor < (y_min + step))\n data_cell = np.logical_and(data_in_x, data_in_y)\n train_cell = train.iloc[data_cell]\n\n data_coor = test['x'].values\n data_in_x = np.logical_and(data_coor > x_min, data_coor < (x_min + step))\n data_coor = test['y'].values\n data_in_y = np.logical_and(data_coor > y_min, data_coor < (y_min + step))\n data_cell = np.logical_and(data_in_x, data_in_y)\n test_cell = test.iloc[data_cell]\n\n train_label_col = 'place_id'\n train_labels = train_cell[train_label_col]\n del train_cell[train_label_col]\n\n return train_cell, test_cell, train_labels", "def _iter_indices(self, frame, y):\n n_samples = frame.shape[0]\n n_train, n_test = _validate_shuffle_split(n_samples,\n self.test_size, self.train_size)\n\n # need to validate y...\n y = _val_y(y)\n target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist())\n\n classes, y_indices = np.unique(target, return_inverse=True)\n n_classes = classes.shape[0]\n\n class_counts = bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError('The least populated class in y has only 1 '\n 'member, which is too few. The minimum number of labels '\n 'for any class cannot be less than 2.')\n\n if n_train < n_classes:\n raise ValueError('The train_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_train, n_classes))\n\n if n_test < n_classes:\n raise ValueError('The test_size=%d should be greater than or '\n 'equal to the number of classes=%d' % (n_test, n_classes))\n\n rng = check_random_state(self.random_state)\n p_i = class_counts / float(n_samples)\n n_i = np.round(n_train * p_i).astype(int)\n t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int))\n\n for _ in range(self.n_splits):\n train = []\n test = []\n\n for i, class_i in enumerate(classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = np.where((target == class_i))[0][permutation]\n\n train.extend(perm_indices_class_i[:n_i[i]])\n test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])\n\n # Might end up here with less samples in train and test than we asked\n # for, due to rounding errors.\n if len(train) + len(test) < n_train + n_test:\n missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0]\n missing_indices = rng.permutation(missing_indices)\n n_missing_train = n_train - len(train)\n n_missing_test = n_test - len(test)\n\n if n_missing_train > 0:\n train.extend(missing_indices[:n_missing_train])\n if n_missing_test > 0:\n test.extend(missing_indices[-n_missing_test:])\n\n train = rng.permutation(train)\n test = rng.permutation(test)\n\n yield train, test", "def get_relevant_indices(dataset, classes, target_classes):\n indices = []\n for i in range(len(dataset)):\n # Check if the label is in the target classes\n label_index = dataset[i][1] # ex: 3\n label_class = classes[label_index] # ex: 'cat'\n if label_class in target_classes:\n indices.append(i)\n return indices", "def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n # TODO get some metrics on the scores/plot?\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def get_dataloaders_with_index(path=\"../../data\", batch_size=64, num_labeled=250,\n lbl_idxs=None, unlbl_idxs=None, valid_idxs=None, which_dataset='cifar10', validation=True):\n\n # Define transform to normalize data\n normalize = transforms.Normalize(\n mean=[0.4914, 0.4822, 0.4465],\n std=[0.2023, 0.1994, 0.2010],\n )\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n if which_dataset == 'cifar10':\n train_set = CustomCIFAR10(root=path, train=True, transform=transform)\n test_set = CustomCIFAR10(root=path, train=False, transform=transform)\n elif which_dataset == 'svhn':\n train_set = datasets.SVHN(root=path, split='train', download=True, transform=transform)\n test_set = datasets.SVHN(root=path, split='test', download=True, transform=transform)\n else:\n raise Exception('Not supported yet')\n\n\n # Split indexes between labeled, unlabeled and validation\n if which_dataset == 'cifar10':\n training_labels = train_set.targets\n elif which_dataset == 'svhn':\n training_labels = train_set.labels\n else :\n training_labels = train_set.targets\n\n if validation:\n train_labeled_idxs, train_unlabeled_idxs, val_idxs = labeled_unlabeled_val_split(training_labels, int(num_labeled / 10))\n else:\n train_labeled_idxs, train_unlabeled_idxs = labeled_unlabeled_split(training_labels, int(num_labeled / 10))\n val_idxs = []\n\n # If indexes are provided, use them\n if lbl_idxs is not None:\n train_labeled_idxs = lbl_idxs\n train_unlabeled_idxs = unlbl_idxs\n val_idxs = valid_idxs\n\n # Define samplers using indexes\n train_labeled_sampler = SubsetRandomSampler(train_labeled_idxs)\n train_unlabeled_sampler = SubsetRandomSampler(train_unlabeled_idxs)\n val_sampler = SubsetRandomSampler(val_idxs)\n\n # Create data loaders\n train_labeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_labeled_sampler, num_workers=0)\n train_unlabeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_unlabeled_sampler, num_workers=0)\n val_loader = DataLoader(train_set, batch_size=batch_size, sampler=val_sampler, num_workers=0)\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=0)\n\n if not validation:\n val_loader = test_loader\n\n return train_labeled_loader, train_unlabeled_loader, val_loader, test_loader, train_labeled_idxs, train_unlabeled_idxs, val_idxs", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)" ]
[ "0.6553474", "0.6376504", "0.6233736", "0.5944764", "0.59190506", "0.5901625", "0.58788", "0.58554393", "0.58391976", "0.5713936", "0.56517017", "0.563909", "0.56207126", "0.56050724", "0.5597223", "0.5592754", "0.55752015", "0.5573872", "0.5544355", "0.5543345", "0.55142456", "0.54863673", "0.54665613", "0.54592353", "0.5439544", "0.5425093", "0.5417525", "0.540605", "0.53963584", "0.5385493", "0.5346848", "0.53380233", "0.53228843", "0.53082895", "0.5307924", "0.53043944", "0.5291495", "0.5289874", "0.52824676", "0.52793777", "0.52676076", "0.5266283", "0.52631307", "0.5254811", "0.52469337", "0.52409935", "0.5239364", "0.5237311", "0.52353936", "0.52246827", "0.52230966", "0.5202075", "0.51943463", "0.51943463", "0.51880616", "0.51842093", "0.51774013", "0.5174889", "0.5174463", "0.5168468", "0.51682645", "0.51605535", "0.5154648", "0.51538706", "0.51487094", "0.5147456", "0.5147347", "0.51407135", "0.5136275", "0.51277447", "0.512179", "0.51090187", "0.510697", "0.5101098", "0.51003253", "0.50974303", "0.5080972", "0.5078626", "0.50756705", "0.50715166", "0.50715107", "0.50594485", "0.50512207", "0.50488174", "0.5046671", "0.50461113", "0.5042864", "0.5042834", "0.5037596", "0.50338566", "0.503385", "0.50285023", "0.5026845", "0.50262535", "0.50236195", "0.5003633", "0.5001806", "0.49987125", "0.49944222", "0.49873993", "0.49855462" ]
0.0
-1
Returns the indices without performing any operation on them. To be used for fitting on the whole dataset. This strategy is not compatible with HPO search.
def no_resampling(random_state: np.random.RandomState, indices: np.ndarray) -> np.ndarray: return indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices(self) -> np.ndarray:\n return self.impl.indices", "def get_indices(self):\r\n return self._indices", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def get_unprescribed_indexes(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n all_indexes = np.arange(total_dof)\n return np.delete(all_indexes, self.prescribed_indexes)", "def _exclude_indices(self):\n idx = self._next_idx\n exclude = np.arange(idx - 1, idx + self.obs_len) % self._maxsize\n return exclude", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def _getNonPrototypeIndices(self, clusters: ndarray) -> ndarray:\n return np.delete(np.arange(self.dataSize), clusters.flatten())", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def indices(self):\n return self.index.indices", "def getIndices(self):\r\n return self._indices", "def getLandmarkindices(self):\n return self.subsetindices", "def indices(self):\n return range(len(self))", "def ordered_indices(self):\n return self.base_dataset.ordered_indices()", "def get_indexes(self):\n return set(k.index for k in self if k.has_index)", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def indices(self, fit):\r\n lam = self.lam_reeval if self.lam_reeval else 2 + len(fit) / 20\r\n reev = int(lam) + ((lam % 1) > np.random.rand())\r\n return np.argsort(array(fit, copy=False)[:2 * (reev + 1)])[:reev]", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def indices(self, position=None):\n \n raise NotImplementedError()", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)", "def get_unlabeled_idx(X_train, labeled_idx):\n return np.arange(X_train.shape[0])[np.logical_not(np.in1d(np.arange(X_train.shape[0]), labeled_idx))]", "def get_unused_indices(program):\n used = get_used_indices(program)\n all_indices = set(range(len(program.var_types) - 1))\n return all_indices - used", "def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes", "def indices(self, _user=None):\n return [p.index for p in self.get_active_smallvariant_cases()]", "def _free_indicies(self):\n return np.logical_not(self._fixed_indicies)", "def indices(self):\n return self._kbounded_partitions", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def _selected_indices(self, subset):\n # We want the DataFrame to be indexed the same way its values array is\n ftr = self.frametracks.reset_index(drop=True)\n if subset is not None:\n ftr['tmpindex'] = ftr.index.values\n ftr = ftr.set_index('particle').reindex(subset).set_index('tmpindex')\n if self.autoclip:\n # Boundaries are computed for the whole system\n xmin = self.frametracks.x.min() + self.nncutoff\n xmax = self.frametracks.x.max() - self.nncutoff\n ymin = self.frametracks.y.min() + self.nncutoff\n ymax = self.frametracks.y.max() - self.nncutoff\n r = ftr.index[ (ftr.x > xmin) & (ftr.x < xmax) & \\\n (ftr.y > ymin) & (ftr.y < ymax) ].values.astype(int)\n else:\n r = ftr.index.values.astype(int)\n if self.fast:\n return np.random.permutation(r)[:int(len(r) / 10)]\n else:\n return r", "def create_jackknife_indexes(data):\n from numpy import arange, delete\n\n index_range = arange(0, len(data))\n return (delete(index_range, i) for i in index_range)", "def get_agent_indices(array):\t\n\tagent_indices = np.argwhere(array != 0)\n\treturn agent_indices", "def get_indices ( self, X ):\n \n # Shuffle if `self.shuffle` is true.\n nrows = X.shape [ 0 ]\n return (\n np.random.permutation (\n np.arange ( nrows )\n ) # Shuffle the rows if `self.shuffle`\n if self.shuffle\n else np.arange ( nrows )\n ) # End get_indices()", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def drop_indices(self, df) -> None:\n assert self.is_appropriate_data_instance(df)\n # no operation needed", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def all_sampled_nodes_indexes(self) -> torch.LongTensor:\n all_sampled_nodes_indexes: _typing.Any = self.__all_sampled_nodes_indexes\n return all_sampled_nodes_indexes", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def get_data_idx(self)->list:\n return self.__data_idx", "def index(self):\n return self.data.index.values", "def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]", "def indexing(self):\n return exclusions.closed()", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def eligible_edges_with_indexes(self):\n return enumerate(self.edges)", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def labeled_indices(self):\n return self._labeled_indices", "def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes", "def test_get_indices_no_items_to_search(self):\r\n item_to_find = []\r\n self.assertEqual(_get_indices(self.dist_matrix_header, item_to_find),\r\n [])\r\n item_to_find = ''\r\n self.assertEqual(_get_indices(self.dist_matrix_header, item_to_find),\r\n [])\r\n item_to_find = None\r\n self.assertEqual(_get_indices(self.dist_matrix_header, item_to_find),\r\n [])", "def _get_actor_unfilled_indices(self, actor_index, entries_per_buffer):\n filled_indices = set(\n self._get_replay_buffer_filled_indices(self._replay_buffers, actor_index)\n )\n actor_id_set = set(range(0, entries_per_buffer))\n unfilled_indices = actor_id_set - filled_indices\n return unfilled_indices", "def get_index(self):\n return self.inverted_index", "def get_cached_indices(self, start=None, end=None):\n params = {}\n indices = [\n y[\"sample_identifier\"]\n for y in self.mongo_database.cache.find(\n params, {\"_id\": 0, \"sample_identifier\": 1}\n )[start:end]\n ]\n return np.unique(indices).tolist()", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def query_indices(self):\n\n return set(\n ix for ix in self.es_client.cat.indices(\n index=f'{self.normalized_hostname}-*', h='index'\n ).splitlines()\n )", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def ordered_indices(self):\n return self.d1.ordered_indices()\n # RETURN BASED ON D1's sizes", "def reset_indexes(self) -> None:\n raise NotImplementedError", "def get_pulling_indices(self, weight):\n pass", "def mainIndices(self):\n return self.i1, self.i2", "def untie_everything(self):\r\n self.tied_indices = []", "def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def exclude_indices(x, excl):\n # ==========================================================================\n return np.array([x[i] for i in np.arange(len(x)) if i not in excl])", "def _notstaticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = []\n for k in range(len(self.idxs)):\n idxs.append([self.idxs[k][i] for i in inds])\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def test_ks_test_empty_indices():\n out = compute_indices_ks_test([], 1000, mode=\"D+\")\n assert all(o is None for o in out)", "def get_prediction_indices(self):\r\n if self.full_df['Dates'][0] > self.full_df['Dates'][len(self.full_df) - 1]:\r\n self.full_df = self.full_df[::-1]\r\n self.full_df.reset_index(inplace=True)\r\n self.full_df.drop('index', axis=1, inplace=True)\r\n date_condition = ((self.full_df['Dates'] <= self.pred_end) &\r\n (self.full_df['Dates'] >= self.pred_start))\r\n self.pred_indices = list(self.full_df[date_condition].index)", "def get_analytically_computed_optimization_parameter_indices(self):\n indices = []\n if '/offsetParameterIndices' in self.f:\n indices.extend(self.f['/offsetParameterIndices'])\n\n if '/scalingParameterIndices' in self.f:\n indices.extend(self.f['/scalingParameterIndices'])\n\n if '/sigmaParameterIndices' in self.f:\n indices.extend(self.f['/sigmaParameterIndices'])\n\n return list(set(indices))", "def indexes(self):\n return getattr(self, '_indexes', None)", "def _get_saved_indices(self, columns):\n # FIXME - assuming that the columns never change\n self._saved_indices = []\n for i in range(len(columns)):\n if self.pattern.match(columns[i]):\n self._saved_indices.append(i)\n return self._saved_indices", "def index(self):\n return self.dataset.index", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def get_unread_indexes(self):\n pass", "def get_indications(self):\n indications = np.zeros_like(self.predictions)\n for i in range(self.predictions.shape[0]):\n ind = np.where(self.predictions[i, :] - self.labels != 0.0)[0]\n indications[i, ind] = 1.0\n\n return indications", "def get_data_indices(aperiodic_mode):\n\n indices = {\n 'CF' : 0,\n 'PW' : 1,\n 'BW' : 2,\n 'offset' : 0,\n 'knee' : 1 if aperiodic_mode == 'knee' else None,\n 'exponent' : 1 if aperiodic_mode == 'fixed' else 2\n }\n\n return indices", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def mesh_span_indices(self):\n self._ensure_mesh()\n k2m = self._knots_to_mesh\n return np.where(k2m[1:] != k2m[:-1])[0]", "def indexed_dataset(self) -> Dict[int, List]:\n if self.__indexed_dataset is None:\n dataset = self.dataset()\n truncated_dataset = dataset[:1000]\n self.__indexed_dataset = {\n i: dataset[i] for i in range(len(dataset))\n }\n return self.__indexed_dataset", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()", "def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)", "def get_cont_indices(self, X, max_ord):\n indices = np.zeros(X.shape[1]).astype(bool)\n for i, col in enumerate(X.T):\n col_nonan = col[~np.isnan(col)]\n col_unique = np.unique(col_nonan)\n if len(col_unique) > max_ord:\n indices[i] = True\n return indices", "def nonzero_indices(a):\n return (np.nonzero(a)[0])", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def get_recorded_indices(self, application_vertex, variable):\n if variable not in self.__sampling_rates:\n return []\n if self.__indexes[variable] is None:\n return range(application_vertex.n_atoms)\n return self.__indexes[variable]", "def getValidIndicies(self, points):\n try:\n inds = [ i for i in range(points.size) if points.flat[i] in self.maskSet ]\n return inds\n except Exception as error:\n print(\"failed in getValidIndicies\", error)\n return -1", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def indices(online: bool = False) -> dict:\n return _get_indices(online)", "def _get_indices(scores: np.ndarray, shuffle_prop: float) -> np.ndarray:\n return _shuffle_subset(scores.argsort().argsort(), shuffle_prop)", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def get_vacancy_indices(array):\t\n\tvacancy_indices = np.argwhere(array == 0)\n\treturn vacancy_indices" ]
[ "0.7181901", "0.69676125", "0.69128466", "0.6892532", "0.68412507", "0.67869836", "0.6754658", "0.67145926", "0.6703281", "0.66633874", "0.65803033", "0.6534292", "0.6509533", "0.64898914", "0.6441676", "0.6402624", "0.63538474", "0.6317454", "0.6311111", "0.6274377", "0.62597823", "0.61934173", "0.6184967", "0.61595553", "0.6139873", "0.6131104", "0.61173064", "0.61140794", "0.60923517", "0.60844386", "0.60834616", "0.6048968", "0.6044923", "0.604333", "0.60267025", "0.6018209", "0.60120195", "0.5997289", "0.59921813", "0.59745514", "0.5969518", "0.59615505", "0.5959979", "0.594442", "0.592906", "0.59277785", "0.5921589", "0.59213626", "0.59092915", "0.59091157", "0.590873", "0.5908398", "0.5899501", "0.5879913", "0.58794636", "0.58774966", "0.5875598", "0.587039", "0.58675563", "0.5866331", "0.58522666", "0.5850105", "0.5848071", "0.58422244", "0.5827174", "0.58257365", "0.5825301", "0.582299", "0.5814833", "0.5814449", "0.58019155", "0.5800825", "0.57961255", "0.57934785", "0.5780337", "0.57793057", "0.5775312", "0.576487", "0.57564765", "0.575372", "0.5752068", "0.57451147", "0.57406557", "0.57380444", "0.5732746", "0.57260627", "0.5711779", "0.5704463", "0.56967676", "0.56938285", "0.56898373", "0.5671925", "0.56660926", "0.56602806", "0.5657068", "0.5654192", "0.5650681", "0.5643973", "0.56419224", "0.5640484" ]
0.65598696
11
Get open accounts Returns array with active account numbers
async def get_open_accounts(self): result = [] URL = API_HOST + "/api/resources/header" async with async_timeout.timeout(TIMEOUT): response = await self.session.get(URL) json_data = await response.json() accounts = json_data["data"]["accounts"]["data"]["data"] for account in accounts: if account["statusCategory"] == STATUS_CATEGORY_OPEN: result.append(account["accountNumber"]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def list_accounts(self):\n pass", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def get_accounts(self):\r\n return self._accounts", "def get_accounts(self):\n return self.accounts", "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def returnOpenOrders(self, account=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n\n orders = self.dpay.rpc.get_open_orders(account, limit=1000)\n return orders", "def get_accounts(self):\n return self.accounts.all()", "def accounts(self):\r\n return acc.Accounts(self)", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def list_active_customers():\n customer_active = Customer.select().where(Customer.status == 'Active')\n print('{} Active Customers'.format(len(customer_active)))\n return len(customer_active)", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def list_active_customer():\n active_customer = Customer.select().where(Customer.is_active).count()\n LOGGER.info('Number of active customers retrieved.')\n return active_customer", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def list_active_customers():\n init_database()\n return Customer.select().where(Customer.active_status).count()", "def display_accounts(cls):\n return cls.account_list", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def list_active_customers():\n with cm.DATABASE.transaction():\n # .select() has a .where() method to specify criteria for searching\n active_customers = cm.Customer.select().where(\n cm.Customer.status == \"Active\").count()\n LOGGER.info(\"Active customers: %s\", active_customers)\n return active_customers", "def getactiveusers(self):\n\n select_activeusers = (\n \"SELECT count(DISTINCT username) FROM public.jobs \"\n \"WHERE latestjobversion = True AND insertdate BETWEEN Date(%s) AND Date(%s) \"\n \"AND (username NOT IN (%s)) \"\n )\n\n\n self.pgcursor.execute(select_activeusers, (self.startdate, self.enddate, self.adminusers))\n\n activeusers = 0\n x = self.pgcursor.fetchone()\n if x is not None:\n activeusers = x[0]\n\n # print(\"No of active users: {0}\".format(activeusers))\n return activeusers", "def active_users(self, *args, **kwargs):\r\n return self._get('ActiveUsers', *args, **kwargs)", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def get_open_orders(self):\n url = 'https://coincheck.com/api/exchange/orders/opens'\n headers = make_header(url, access_key=self.access_key, secret_key=self.secret_key)\n r = requests.get(url, headers=headers, timeout=self.timeout)\n return json.loads(r.text)", "def list_active_customers():\n return Customer.select().where(Customer.is_active).count()", "def accounts(self):\n return self._accounts.values()", "def return_active_users():\n return json.dumps(app.active_users)", "def list_active_customers():\n with database.transaction():\n query = (Customer\n .select(fn.COUNT(Customer.status).alias('count'))\n .where(Customer.status == 'Active'))\n LOGGER.info(query)\n\n customer_count = [item.count for item in query]\n LOGGER.info('Number of active customers: %s', customer_count[0])\n\n return customer_count[0]", "def retrieve_open_issues(self):\n return self._retrieve_issues(\"open\")", "def list_active_customers():\n count_query = (Customer\n .select(Customer, fn.COUNT(Customer.name)\n .alias('cust_count'))\n .where(Customer.status == 'active'))\n for count in count_query:\n return count.cust_count", "def get_open_transactions(self):\n return self.__open_transactions[:]", "def get_accounts(self):\n me = objects.AdUser(fbid=\"me\")\n my_accounts = list(me.get_ad_accounts(fields=[\n 'id',\n 'name',\n 'timezone_name',\n 'amount_spent',\n 'currency']))\n return my_accounts", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n return active_customer_count\n except Exception as unknown_error:\n print(f'Error. Not able to count number of active customers. {unknown_error}')", "async def get_open_order_nos(self):\n success, error = await self._rest_api.get_open_orders(self._raw_symbol)\n if error:\n return None, error\n order_nos = []\n for order_info in success:\n order_no = \"{}_{}\".format(order_info[\"orderId\"], order_info[\"clientOrderId\"])\n order_nos.append(order_no)\n return order_nos, None", "def listaccounts(self, minconf=1, as_dict=False):\n if as_dict:\n return dict(self.proxy.listaccounts(minconf))\n else:\n return list(self.proxy.listaccounts(minconf).keys())", "def test_getTopAcounts(self, **params):\n req = Account.getTopAccounts()\n print(req)\n self.assertEqual(req['success'], True)", "def get_in_active_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "async def get_open_order_nos(self):\n success, error = await self._rest_api.get_open_orders(self._raw_symbol)\n if error:\n return None, error\n else:\n order_nos = []\n for order_info in success:\n order_no = \"{}_{}\".format(order_info[\"orderId\"], order_info[\"clientOrderId\"])\n order_nos.append(order_no)\n return order_nos, None", "def accounts_inactive(request):\r\n user_list = UserMgr.get_list(active=False)\r\n ret = {\r\n 'count': len(user_list),\r\n 'users': [dict(h) for h in user_list],\r\n }\r\n return _api_response(request, ret)", "def get_open_orders(self, market):\n #{'success': True, 'message': '', 'result': [{'Uuid': None, 'OrderUuid': '7f43f22f-586b-46d8-a4b2-f457cfeb2aac', 'Exchange': 'BTC-GEO', 'OrderType': 'LIMIT_SELL', 'Quantity': 2.03478908, 'QuantityRemaining': 2.03478908, 'Limit': 0.00097503, 'CommissionPaid': 0.0, 'Price': 0.0, 'PricePerUnit': None, 'Opened': '2017-07-03T14:13:20.903', 'Closed': None, 'CancelInitiated': False, 'ImmediateOrCancel': False, 'IsConditional': False, 'Condition': 'NONE', 'ConditionTarget': None}]}\n #{'success': 1, 'return': {'240005185729406': {'pair': 'lsk_btc', 'type': 'sell', 'amount': 1, 'rate': 0.096319, 'timestamp_created': '1499255345', 'status': 0}}}\n result = self.api_query('ActiveOrders', {'pair': market})\n\n openOrder =[]\n if result['success'] == 1:\n try :\n for key, value in result['return'].items():\n openOrder.append({'OrderUuid':key})\n result = {'success': True, 'message': '', 'result' : openOrder}\n except:\n result = {'success': False, 'message': '', 'result': openOrder}\n else:\n result = {'success': False, 'message': '', 'result': openOrder}\n\n return result", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def get_active_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == True).fetch()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200", "def accounts():", "def get(self):\n return sync.get_open_orders()", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "async def get_open_order_nos(self):\n success, error = await self._rest_api.get_open_orders(self._raw_symbol)\n if error:\n return False, error\n order_nos = []\n for order_info in success[\"data\"]:\n order_nos.append(order_info[\"order_id\"])\n return order_nos, None", "def open_orders(self, **params):\n return self._get('openOrders', signed=True, params=params)", "def users_active(self):\n return self.users(\"inactive == NO\")", "def get_fedcm_account_list(self):\n pass", "def get(self):\n held_accounts = User.get_held_accounts(\n get_jwt_identity(), initialize_models=True)\n\n schema = AccountsListSchema(many=True)\n response = schema.dumps(held_accounts)\n\n return jsonify_response(json.loads(response.data), 200)", "def get_all_open_orders(self):\n self.__init_client()\n open_orders = retry(lambda: self.client\n .futures_get_open_orders(symbol=self.pair)) \n if len(open_orders) > 0:\n return open_orders\n else:\n return None", "def activeusercount(self):\n sql = '''select to_char(count(*)-1, 'FM99999999999999990') retvalue \n from v$session where username is not null \n and status='ACTIVE' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def accounts(self):\r\n return accounts.Accounts(self)", "def get_accounts(fints_login, user_scope):\n from erpnextfints.utils.fints_controller import FinTSController\n interactive = {\"docname\": user_scope, \"enabled\": True}\n\n return {\n \"accounts\": FinTSController(\n fints_login,\n interactive).get_fints_accounts()\n }", "def get_open_orders(self, asset=None):\n try:\n self.ask_request()\n response = self._request('orders', None)\n order_statuses = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_statuses:\n raise ExchangeRequestError(\n error='Unable to retrieve open orders: {}'.format(\n order_statuses['message'])\n )\n\n orders = []\n for order_status in order_statuses:\n order, executed_price = self._create_order(order_status)\n if asset is None or asset == order.sid:\n orders.append(order)\n\n return orders", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def return_open_orders(self, currency_pair):\n return self.api_query('returnOpenOrders', {\"currencyPair\": currency_pair})", "def get_accounts_for_ou(logger, options, org_client, path):\n logger.debug(\"Getting accounts for OU: %s\", path)\n org_unit = get_ou_from_path(logger, org_client, path)\n ous = []\n if options.no_recursive:\n ous.append(org_unit)\n else:\n ous.extend(get_child_ous(logger, org_client, org_unit))\n\n result = []\n for org_unit in ous:\n args = {\"ParentId\":org_unit[\"Id\"]}\n accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent,\n \"Accounts\", **args)\n for acc in accounts:\n acc[\"Path\"] = org_unit[\"Path\"]\n if 'Status' in acc:\n if acc['Status'] != 'SUSPENDED':\n result.append(acc)\n else:\n logger.info(\"found suspended account %s, ignoring it.\" % acc)\n return result", "def ldap_get_active_members():\n return _ldap_get_group_members('active')", "def accounts(self):\r\n return resources.Accounts(self)", "def list_active_emails():\n db_customers = Customers.select().where(Customers.status)\n LOGGER.debug(\"Returning list of active customer emails\")\n email_list = [x.email_address for x in db_customers]\n LOGGER.info(\"Email list: %s\", email_list)\n return email_list", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def get_status(self, rows):\n\n\t\taccount_status = {}\n\t\tfor row in rows:\n\t\t\t(account_number, status) = (int(row[0]), row[2])\n\t\t\tif account_status.has_key(account_number):\n\t\t\t\taccount_status[account_number].append(status)\n\t\t\t\t# Log account information if account has more than 1 current active status\n\t\t\t\tself.log.debug(\"Multiple Current Statuses for Account Number:\" + account_number)\n\t\t\telse:\n\t\t\t\taccount_status[account_number] = [status]\n\n\t\treturn account_status", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information", "def accounts():\n if not session.get('authed', False):\n flash(\"Please log in.\")\n return redirect(my_url('index'))\n account_ids = redis_client.smembers('%s-accounts' % session['phone'])\n accounts = [kloudless.Account.retrieve(i) for i in account_ids]\n callback_url = quote_plus(my_url('auth_callback'))\n return render_template('accounts.html', accounts=accounts, app_number=APP_NUMBER,\n callback_url=callback_url, app_id=KLOUDLESS_APP_ID)", "def getFoursquareCheckins(self, accessToken):\n return self.getFoursquareApi('users/self/checkins', accessToken)", "def getCustomerAccount(self):\n self.logger.debug(\"\")\n for cust in self.getCustomerAccountData():\n accounts = len(cust['accounts'])\n self.logger.debug(\"%d accounts in %s\", accounts, cust['CustomerId'])\n ii = 1\n for acct in cust['accounts']:\n self.logger.debug(\"yield %s, %s\", cust['CustomerId'], acct['Id'])\n yield cust['CustomerId'], acct['Id'], ii, accounts\n ii += 1", "def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body", "async def get_in_active_users_async(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch_async().get_result()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200", "def getActiveObjects(doc):\n lst = list()\n op = doc.GetFirstObject()\n while op:\n if op.GetBit(c4d.BIT_ACTIVE) == True: \n lst.append(op)\n op = Helpers.getHNext(op)\n return lst", "def accounts(web3):\n return web3.eth.accounts", "def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()", "def fetch_users(self):\n users = super(type(self), self).fetch_users()\n return list(filter(self._check_active, users))", "def _get_ad_accounts() -> [adaccount.AdAccount]:\n system_user = user.User(fbid='me')\n ad_accounts = system_user.get_ad_accounts(fields=['account_id',\n 'name',\n 'created_time',\n 'timezone_offset_hours_utc'])\n return list(ad_accounts)", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def get_list(active=None, order=None, limit=None):\r\n user_query = User.query.order_by(User.username)\r\n\r\n if active is not None:\r\n user_query = user_query.filter(User.activated == active)\r\n\r\n if order:\r\n user_query = user_query.order_by(getattr(User, order))\r\n else:\r\n user_query = user_query.order_by(User.signup)\r\n\r\n if limit:\r\n user_query = user_query.limit(limit)\r\n\r\n return user_query.all()", "def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts", "async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n request = {\n 'filter': {\n 'open': True,\n },\n }\n return await self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))", "def getInterestedUsers():", "def get_users(self, email):\n print(\"bu\")\n active_users = UserModel._default_manager.filter(**{\n '%s__iexact' % UserModel.get_username_field_name(): username,\n 'is_active': True,\n })\n print(active_users)\n # active_users = UserModel._default_manager.filter(**{\n # '%s__iexact' % UserModel.get_email_field_name(): email,\n # 'is_active': True,\n # })\n return (u for u in active_users if u.has_usable_password())", "def open_orders(self, **params):\n return self._get('option/openOrders', signed=True, params=params, version=None)", "def _get_linkedin_accounts(self, linkedin_access_token):\n response = requests.get(\n 'https://api.linkedin.com/v2/me?projection='\n + '(id,localizedLastName,localizedFirstName,'\n + 'profilePicture(displayImage~:playableStreams))',\n headers={\n 'Authorization': 'Bearer ' + linkedin_access_token,\n 'cache-control': 'no-cache',\n 'X-Restli-Protocol-Version': '2.0.0'\n }\n ).json()\n\n if ('id' in response and 'localizedLastName' in response\n and 'localizedFirstName' in response):\n linkedin_account_id = 'urn:li:person:' + response['id']\n\n try:\n image_url = response['profilePicture']['displayImage~']['elements'][0]['identifiers'][0]['identifier']\n linkedin_profile_image = base64.b64encode(requests.get(image_url).content)\n except Exception:\n linkedin_profile_image = ''\n\n # TODO - STD: add each companies page\n return [{\n 'name': response['localizedLastName'] + ' ' + response['localizedFirstName'],\n 'linkedin_account_id': linkedin_account_id,\n 'linkedin_access_token': linkedin_access_token,\n 'image': linkedin_profile_image\n }]\n\n return []", "def list(self, request, *args, **kwargs):\n queryset = BankConnections.objects.filter(user=self.request.user)\n \n response = [{\n \"connected\": True if connection.isTokenValid else False,\n \"bank\": connection.bank_branch.bank.id\n } for connection in queryset]\n\n return Response(response)", "def get_active_users(mins):\n min_dt = now() - timedelta(minutes=mins)\n return User.objects.filter(profile__accessed__gte=min_dt).count()", "def get_users(self, email):\n active_users = User.objects.filter(\n email__iexact=email,\n is_active=True\n )\n return (u for u in active_users)", "def get_company_users(self, company_referece, active=True):\n url = 'companies/{0}/users'.format(company_referece)\n if active:\n data = {'status_in_company': 'active'}\n else:\n data = {'status_in_company': 'inactive'}\n result = self.get(url, data)\n return result.get('users', result)", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def list_users(access_token):\n request_url = OKTA_URL + \"api/v1/users\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request", "def returnOpenOrders(self, currency_pair=\"all\"):\n pass", "def show_accounts(conn, userid):\n print('\\n\\nAccount statment for user', (userid))\n with conn.cursor() as curs:\n curs.execute('SELECT id, type, balance FROM accounts WHERE owner_id=%s', (userid,))\n rows = curs.fetchall()\n print('Number of results:', curs.rowcount)\n for row in rows:\n print(row)" ]
[ "0.63580984", "0.6303217", "0.62876016", "0.6213196", "0.61325455", "0.61324793", "0.6089535", "0.6086548", "0.6079627", "0.6079134", "0.6050323", "0.6013716", "0.59957004", "0.59836334", "0.59793425", "0.5972705", "0.5965284", "0.59572", "0.5945932", "0.59283507", "0.5911551", "0.58857083", "0.58805954", "0.5874453", "0.58570915", "0.5855002", "0.58417356", "0.58339196", "0.5831123", "0.5826693", "0.57704306", "0.5758257", "0.5756581", "0.5748724", "0.5748157", "0.56824094", "0.568109", "0.567365", "0.56666267", "0.56568575", "0.5639378", "0.5627847", "0.5625639", "0.5624547", "0.5618651", "0.561276", "0.55751914", "0.5560087", "0.55544776", "0.5542565", "0.5542394", "0.5522518", "0.5509156", "0.54996496", "0.5495248", "0.546898", "0.54689515", "0.5450077", "0.544766", "0.54432875", "0.5436816", "0.54146427", "0.54051924", "0.5399515", "0.5395862", "0.53814876", "0.537924", "0.53598934", "0.5351286", "0.53503925", "0.53408813", "0.53301525", "0.53277856", "0.53259075", "0.53198946", "0.5317665", "0.5307938", "0.53026325", "0.5273024", "0.52723044", "0.526244", "0.52620655", "0.5250415", "0.5243769", "0.5240452", "0.522988", "0.52152175", "0.5210329", "0.5209422", "0.52005637", "0.519889", "0.51965934", "0.5196394", "0.51901567", "0.51639503", "0.51595116", "0.51556957", "0.51549405", "0.51547194", "0.514872" ]
0.8321022
0
Logging out from fpl
async def logout(self): _LOGGER.info("Logging out") URL_LOGOUT = API_HOST + "/api/resources/logout" try: async with async_timeout.timeout(TIMEOUT): await self.session.get(URL_LOGOUT) except Exception as e: _LOGGER.error(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():", "def logout():\n login()", "def logout(self):", "def logout(self):\n pass", "def do_logout():\n\n session['authenticated'] = False\n session['username'] = None\n session['name'] = None\n session['cpi'] = None\n session['grp_size'] = None\n\n return home()", "def logOut(self):\n self.client.logout()", "def logout(self):\r\n self._api_entrypoint.logout(self._session_token)", "def logout():\n rino.login.logout()", "def logout(self):\n logger.info(\"Logging out\")\n self._limited_call(self._requests.get, constants.FA_ROOT + \"/logout/\")", "def logoff(self,username,password):\n functions.checkLogged()\n thread.stop()\n try:\n r = urllib2.urlopen(\"http://cs302.pythonanywhere.com/logoff?username={0}&password={1}\".format(username, password), timeout=4)\n except socket.timeout, e:\n logging.debug('Request to login server timed out')\n except urllib2.URLError, e:\n logging.debug('Unable to contact the login server')\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout(self, **kwargs):\n\tself.call('logout')", "def logout(self):\n self.client.get(f\"{host}/logout\")", "def logout_user():\n pass", "def logout(self):\n self.__send_command(\"LOGOUT\")", "def logout():\n print(colored('\\nYou have successfully logged out.\\n', 'yellow',\n attrs=['bold']))\n exit()", "def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project", "def logout():\n return logout_user()", "def logout():\n session.clear()\n return redirect(\"/showlog\")", "def logout(self):\n try:\n log.info(\"Logging out of the netscaler\")\n self.post(\"/logout\", {\"logout\": {}})\n except BadNetScaler as error:\n log.error(\"Failed to logout of the netscaler: %s\", error)\n self.sessionid = \"\"", "def t24_logoff(self):\n if self.home_page:\n self.login_page = self.home_page.sign_off()\n self.home_page = None", "def test_logout(self):\r\n self.logout()", "def log_out():\n if 'name' in session:\n PLAN.logout_user(session['name'])\n session.pop('name', None)\n return redirect(url_for('log_in'))\n return redirect(url_for('log_in'))", "def logout(self):\n url = \"https://%s/game/index.php?page=logout\" % self.server\n #\"https://s103-pt.ogame.gameforge.com/game/index.php?page=logout\"\n self.session.get(url)", "def logout(self):\n self.auth = None", "def logout():\n session.pop('logged_in', None)\n session.pop('fname', None)\n session.pop('patron', None)\n flash('You were logged out')\n return redirect('/')", "def logoff(self):\n\t\treturn Job(SDK.PrlSrv_Logoff(self.handle)[0])", "def log_out(self):\n DB.log_out()\n self.customer.log_out()\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()", "def log_out(self):\n self.__is_logged_in = False", "def _logout(self):\n self.api_query(action=\"logout\")\n self._cookiejar.clear()\n self._save_cookiejar()", "def logout():\n\n do_logout()\n flash(f\"You are now logged out!\", \"success\")\n return redirect('/')", "def logout(self, request):\n pass", "def logout(self):\n with self.client.post(\"/logout\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.success()\n self.user.username = None\n # go to UnauthenticatedTasks\n self.interrupt()", "def _CAS_logout(self):\n import urllib\n redirect(\"%s?service=%s\" % (self.cas_logout_url, self.cas_my_url))", "def logout():\n name = current_user.name\n logout_user()\n return f\"Logged out of {name}'s account!\"", "def axapi_logoff(self):\n module = 'logoff'\n method = 'POST'\n response = self.axapi_call(module, method,'')\n if '2' in str(response.status_code):\n print(self.device + ' Successfully logged off of the device')\n else:\n print(self.device + ' There was an error trying to log off of the device')", "def logout():\n\n do_logout()\n flash('successfully logged out')\n return redirect(\"/\")", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"tips\"))", "def logout_post():\n\treturn \"LOGOUT\"", "def log_out_user(self):\n flask_login.logout_user()", "def logout():\n session.pop('userinfo', None)\n # no more steps necessary, because we don't keep the token around\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def logout():\n update_session(\"X-GEMINI-APIKEY\", \"\")\n set_secret_key(\"\".encode())\n set_login_state(False)", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def logout():\n\n session.pop(\"leader_logged_in\", False)\n session.pop(\"leader_id\", None)\n session.pop(\"leader_email\", None)\n\n return redirect(f\"{BASEPATH}/login\")", "def do_logout():\n del session[CURRENT_USER_KEY]", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def remove_florlp_session(session):\n try:\n requests.get(FLORLP_LOGOUT_URL,\n cookies=session,\n allow_redirects=False)\n except requests.exceptions.RequestException, ex:\n log.debug('error while removing session: %s', ex)", "def fusion_api_logout_appliance(self, headers=None):\n # logger._log_to_console_and_log_file(\"Logging out of appliance\")\n return self.loginsession.logout(headers)", "def logout():\n flash(_('You were logged out'))\n session.pop('user_id', None)\n return redirect(url_for('index'))\n #return redirect(url_for('public_timeline'))", "def logOut(self, e):\n\n\t\tself.unBind()\n\t\tself.menu_manager.runLogin()\n\t\tself.main_menu_window.root.destroy()", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('get_devices'))", "def logout():\n\n do_logout()\n flash(\"Successfully logged out\", \"success\")\n return redirect('/')", "def logout(self):\n self.__isroot = 0", "def login():", "def login():", "def logout():\n logout_user()\n return redirect(url_for('main.index'))", "def logout():\r\n logout_user()\r\n flash('You were logged out.')\r\n return redirect(url_for('index'))", "def close(self):\n\n if self.closed:\n return\n\n url = '{0}://{1}/admin/launch?script=rh&template=logout&action=logout'\n\n try:\n resp = self._handle.open(url.format(self.proto, self.host))\n pg = resp.read()\n if 'You have been successfully logged out.' not in pg:\n self.log('Failed logout, somehow:\\n{0}'.format(pg))\n else:\n self._closed = True\n except (urllib2.HTTPError, urllib2.URLError) as e:\n self.log('{0}: {1}'.format(e.__class__.__name__, e))", "def logout(self):\n self.__aceQLHttpApi.logout()", "def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))", "def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))", "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout(self):\n data = {'action': 'logout'}\n self.call(data)\n self._high_limits = None\n return True", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out', 'success')\n return redirect(url_for('show_entries'))", "def logout():\n logout_user()\n flash('You have successfully been logged out')\n\n # redirect to login page\n return redirect(url_for('auth.login'))", "def logout():\n session_logout()\n return redirect(url_for(\"home\"))", "def GET_logout(self):\r\n self.logout()\r\n return self.redirect('/')", "def logout():\n \n del session[\"logged_in\"]\n flash(\"See you later! ;)\")\n return redirect('/')", "def logout(session):\r\n response = session.get(LOGOUT_URL)\r\n response.raise_for_status()", "def logout():\n # clear user data from session and flag as logged out\n for x in ['provider', 'state', 'user']:\n if x in flask.session:\n del flask.session[x]\n flask.session['logged_in'] = False\n\n flash('logout successful', 'info')\n return redirect(request.referrer or url_for('catalog.index'))", "def logout(self):\n self.getLink('Logout').click()\n self.html_redirect()\n assert 'You have been logged out successfully.' in self.message, \\\n 'Not successfully logged out: message={0.message!r}'.format(self)", "def logout_redirect():\n login_session.clear()\n flash('You have logged out')\n return redirect(url_for('show_homepage'))", "def logout():\n\n session.clear()\n return redirect(url_for('index'))", "def logout():\n\n session.clear()\n return redirect(url_for('index'))", "def logout():\n session.pop('username', None)\n session.pop('user_id', None)\n session.pop('logged_in', None)\n session.pop('is_admin', None)\n\n flash('Successfully logged out', 'alert-info')\n\n return redirect(url_for('index'))", "def _handle_logout(self):\n self.food_service.log_out()\n self._handle_after_logout()", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout(endpoint):\n communicator = ClickCallback()\n logout_command().with_communicator(communicator).build().execute(endpoint=endpoint)\n click.secho(\"Successfully logged out.\", fg=\"green\")", "def logout():\n flask_login.logout_user()\n flask.flash('Logged out')\n return flask.redirect(flask.url_for('index'))", "async def exit(self, ctx):\n print('Logging out...')\n await ctx.bot.logout()", "def logout():\n\n session.clear()\n crud.logout()\n \n return \"Logout Success\"", "def logout():\n session['user_id'] = None\n session['user_email'] = None\n return redirect(url_for('main'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n # redirect to the login page\n return redirect(url_for('view.login'))", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('leaderboard'))", "def _backend_logout_cleanup(self, name):\n self.log.info(\"User logged out: %s\", name)\n self.clear_login_cookie()\n self.statsd.incr('logout')", "def logout():\n session.clear()\n return redirect(url_for('index'))", "def logout():\n session['logged_in'] = False\n return '', 204", "def logout():\n session.clear()\n return redirect(url_for(\"index\"))", "def logout():\n session.clear()\n return redirect(url_for(\"index\"))", "def logout():\n session.clear()\n return redirect(url_for(\"index\"))", "def logout(self):\n flask_login.logout_user()\n self.app.logger.info(\"User logged out!\")\n return 'Logged out'", "def logout():\n\n # remove the username from the session if it is there\n out_user = current_user.get_id()\n logout_user()\n logger.info(out_user + ' has been logged out.')\n return redirect(url_for('home'))", "def logout():\n\n oidc.logout()\n return 'Hi, you have been logged out! <a href=\"/oidc/\">Return</a>'", "def logout():\n oidc.logout()\n return 'Hi, you have been logged out! <a href=\"/\">Return</a>'", "def logout():\n session.clear()\n return redirect(url_for(\"home\"))", "def logout():\n logout_user()\n return redirect(url_for(\".login\"))", "def logout():\n session.clear()\n return redirect(url_for('home'))", "def test_logout(self):\n # Logging in voluntarily the quick way:\n resp = self.app.get('/login_handler?login=manager&password=managepass',\n status=302)\n resp = resp.follow(status=302)\n ok_('authtkt' in resp.request.cookies,\n 'Session cookie was not defined: %s' % resp.request.cookies)\n # Logging out:\n resp = self.app.get('/logout_handler', status=302)\n ok_(resp.location.startswith('http://localhost/post_logout'))\n # Finally, redirected to the home page:\n home_page = resp.follow(status=302)\n authtkt = home_page.request.cookies.get('authtkt')\n ok_(not authtkt or authtkt == 'INVALID',\n 'Session cookie was not deleted: %s' % home_page.request.cookies)\n eq_(home_page.location, 'http://localhost/')", "def logout():\n session.pop('logged_in', None)\n return redirect(url_for('home'))", "def logout():\n flash('You were logged out')\n session.pop('username', None)\n return redirect(url_for('welcome_page'))" ]
[ "0.7692397", "0.73293793", "0.7156266", "0.70811003", "0.7076655", "0.70410043", "0.70080215", "0.6955402", "0.6835801", "0.6776245", "0.67325246", "0.6707584", "0.67055", "0.6672805", "0.66292965", "0.661934", "0.6611719", "0.6587299", "0.6579601", "0.6559363", "0.6532657", "0.652281", "0.6512069", "0.6510874", "0.6507719", "0.64661473", "0.6452549", "0.64431024", "0.63743013", "0.6358471", "0.6343165", "0.6332889", "0.6330973", "0.631329", "0.62983817", "0.6291967", "0.6291616", "0.62868255", "0.62825036", "0.62788975", "0.6276006", "0.6266274", "0.6264183", "0.62421834", "0.62360024", "0.62360024", "0.6233862", "0.6231952", "0.6231309", "0.6228663", "0.62275606", "0.6218061", "0.6217264", "0.6214039", "0.6214039", "0.62111115", "0.62108487", "0.6204969", "0.62048197", "0.6199702", "0.6199702", "0.6199417", "0.6187214", "0.61870974", "0.6179584", "0.61776316", "0.6177053", "0.61740357", "0.61738926", "0.6168662", "0.61684394", "0.61679053", "0.61578757", "0.61578757", "0.61557317", "0.6154602", "0.6152497", "0.6152497", "0.61523116", "0.61460096", "0.6140666", "0.6140635", "0.61343503", "0.6124619", "0.612217", "0.6120571", "0.6114195", "0.6114046", "0.611088", "0.611088", "0.611088", "0.61102104", "0.6108009", "0.6105357", "0.6100985", "0.6098438", "0.6097895", "0.609622", "0.6093335", "0.60872936", "0.6086858" ]
0.0
-1
Get data from resources endpoint
async def update(self, account) -> dict: data = {} URL_RESOURCES_ACCOUNT = API_HOST + "/api/resources/account/{account}" async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_RESOURCES_ACCOUNT.format(account=account) ) account_data = (await response.json())["data"] premise = account_data.get("premiseNumber").zfill(9) data["meterSerialNo"] = account_data["meterSerialNo"] # currentBillDate currentBillDate = datetime.strptime( account_data["currentBillDate"].replace("-", "").split("T")[0], "%Y%m%d" ).date() # nextBillDate nextBillDate = datetime.strptime( account_data["nextBillDate"].replace("-", "").split("T")[0], "%Y%m%d" ).date() data["current_bill_date"] = str(currentBillDate) data["next_bill_date"] = str(nextBillDate) today = datetime.now().date() data["service_days"] = (nextBillDate - currentBillDate).days data["as_of_days"] = (today - currentBillDate).days data["remaining_days"] = (nextBillDate - today).days # zip code # zip_code = accountData["serviceAddress"]["zip"] # projected bill pbData = await self.__getFromProjectedBill(account, premise, currentBillDate) data.update(pbData) # programs programsData = account_data["programs"]["data"] programs = dict() _LOGGER.info("Getting Programs") for program in programsData: if "enrollmentStatus" in program.keys(): key = program["name"] programs[key] = program["enrollmentStatus"] == ENROLLED def hasProgram(programName) -> bool: return programName in programs and programs[programName] # Budget Billing program if hasProgram("BBL"): data["budget_bill"] = True bbl_data = await self.__getBBL_async(account, data) data.update(bbl_data) else: data["budget_bill"] = False # Get data from energy service data.update( await self.__getDataFromEnergyService(account, premise, currentBillDate) ) # Get data from energy service ( hourly ) # data.update( # await self.__getDataFromEnergyServiceHourly( # account, premise, currentBillDate # ) # ) data.update(await self.__getDataFromApplianceUsage(account, currentBillDate)) data.update(await self.__getDataFromBalance(account)) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')", "def list(self, **kwargs):\n data, self.endpoint = self.data_endpoint(kwargs)\n r = super(Resource, self).list(**data)\n\n # Change display settings and data format for human consumption\n self.configure_display(r)\n return r", "def get(self) -> Response:\n try:\n resource = request.args.getlist(\"resource\")[0]\n return set_response_headers(jsonify(get_fragments(resource)))\n except:\n return set_response_headers(jsonify(get_doc().generate()))", "def get_resources():\n user_id = session[\"email\"]\n resources = fm.get_resources(user_id)\n returned_val = dict(resources=resources)\n return jsonify(returned_val)", "def all(self, resource):\n return self.request('/' + resource)", "def get_resource_data(self, resource):\n url = self.api_url + resource\n return self.get_url_data(url)", "def get(self, resource, ids, page=1):\n url = urljoin(self.base_url, resource)\n response = requests.get(url, params={'ids[]': ids, 'page': page})\n\n return response.json()", "def get_resources(self, resource_id):\n url = \"%s/resource/%s\" % ('None', resource_id)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "async def get_resources(self, **kwargs) -> dict:\n resources = await self.request.get(self._base_path, **kwargs)\n self._sanitize_resources(resources)\n return resources", "def on_get_resource(self, req, resp, **params):\n instance = self.get_object(**params)\n resp.json(**instance.as_resource)", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def get(self, id=None, o=None):\n\n response = []\n current_user = self.get_current_user()\n\n # [?timestamp_start=<XXX>&timestamp_end=<XXX>]\n ts = self.get_argument('timestamp_start',None)\n te = self.get_argument('timestamp_end',None)\n\n # GET /resources\n if not id and not o and not ts and not te:\n cursor = yield r.table('resources') \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n # GET /resources?timestamp_start=<XXX>&timestamp_end=<XXX>\n elif not id and not o:\n try:\n nb_leases = yield r.table(\"leases\").count().run(self.dbconnection)\n if nb_leases > 0:\n # Resources NOT in Leases\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .filter( lambda resource:\n r.table(\"leases\").map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).contains(resource['id']).not_() \\\n ).run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n\n if ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n r.or_(l['start_time'].gt(int(te)),l['end_time'].lt(int(ts)))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n logger.debug(in_leases)\n response = response + in_leases\n\n if ts and not te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['end_time'].lt(int(ts))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n\n if not ts and te:\n # List of Resources ids in Leases but not in the given time range\n in_leases = yield r.table(\"leases\").filter(lambda l:\n l['start_time'].gt(int(te))\n ).map(lambda l:\n l['resources'].coerce_to('array')\n ).reduce(lambda left, right:\n left.set_union(right)\n ).map(lambda x:\n r.table('resources').get(x) \\\n ).run(self.dbconnection)\n response = response + in_leases\n else:\n # All available Resources (No Leases in DB)\n cursor = yield r.table('resources') \\\n .filter({'available':'true'}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n except Exception as e:\n logger.exception(e)\n\n # GET /resources/<id>\n elif not o and id and self.isUrn(id):\n\n cursor = yield r.table('resources') \\\n .filter({'id': id}) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/leases\n elif id and self.isUrn(id) and o == 'leases':\n cursor = yield r.table(o) \\\n .filter(lambda lease: lease[\"resources\"].contains(id)) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/slices\n elif id and self.isUrn(id) and o == 'slices':\n cursor = yield r.table(o) \\\n .filter(lambda slice: slice[\"resources\"]==id) \\\n .run(self.dbconnection)\n #\n\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n # GET /resources/<id>/testbeds\n elif id and self.isUrn(id) and o == 'testbeds':\n cursor = yield r.table('resources') .filter({'id': id}) \\\n .pluck('id','testbed','manager') \\\n .merge(lambda res: {\n 'testbeds': r.table('testbeds').get_all(res['testbed'], index='id') \\\n .coerce_to('array')\n }) \\\n .run(self.dbconnection)\n while (yield cursor.fetch_next()):\n item = yield cursor.next()\n response.append(item)\n else:\n self.userError(\"invalid request\")\n\n return\n\n self.finish(json.dumps({\"result\": response}, cls=myJSONEncoder))", "def GetResourcesSample():\n client = CreateClient()\n # Get a feed and print it\n feed = client.GetResources()\n PrintFeed(feed)", "def test_get_api_resources(self):\n pass", "def _get_resource(self, *args, **kwargs):\r\n r = []\r\n if kwargs['resource'] in self.resources:\r\n r = self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n else:\r\n ValueError(\"{resource} - Resource Not Supported\".format(**kwargs)) \r\n return r", "def get(self):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n ds_list = Datastreams.return_page_with_expand(\n top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n\n finally:\n return response", "def get_all(self, context, filters=None):\n try:\n db_resources_data = self.db_api.get_all_resources(\n context, **filters)\n\n _resources_data = []\n for db_resource_data in db_resources_data:\n _resources_data.append(_make_response(db_resource_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resources' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resources_data", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, url_or_path):\n return self.request.get(url_or_path).json()", "def get(self, data):\n pass", "def test_get_resource(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{ResourceTypeName.get()}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self._test_paging('/v1/resources', admin_headers, 10, 'resources')", "def get(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n _resource_data = _make_response(db_resource_data,\n meta_data=False)\n\n except exception.NotFound as e:\n raise e\n\n except Exception as e:\n msg = (\"Error retrieving the 'resource':%s. Reason: %s\"\n % (id_, e.message))\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n\n LOG.info(\"Resource manager data is %s \" %\n logging.mask_password(_resource_data))\n return _resource_data", "def get(self):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n if query_parameters:\n thing = None\n sensor = None\n if \"thing\" in query_parameters:\n thing = request.args[\"thing\"]\n if \"sensor\" in query_parameters:\n sensor = request.args[\"sensor\"]\n logging.debug(f\"thing={thing},sensor={sensor}\")\n datastreams = Datastreams.filter_by_thing_sensor(thing, sensor)\n else:\n result = {\"message\": \"no known query parameters\"}\n response = jsonify(result)\n response.status_code = 400\n return response\n\n except Exception as e:\n logging.warning(e)\n result = {\"message\": \"error\"}\n response = jsonify(result)\n response.status_code = 400\n return response\n\n if datastreams:\n response = jsonify(datastreams)\n response.status_code = 200\n return response\n else:\n result = {\"message\": \"No datastreams found\"}\n response = jsonify(result)\n response.status_code = 200\n return response", "def get_resources(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_resources(body)", "def get(owner, resource):\n resource = logic.resource.find(owner, resource)\n return jsonify(resource)", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "async def get(self, filters: dict = None):\n\n res = self.__get_data(filters)\n\n return res", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def handle_get():\n for resource in resources:\n\n # acquire lock\n res_lock = resource.get(\"lock\")\n res_lock.acquire()\n\n # Get if available\n if resource.get(\"available\") == \"true\":\n # Available - acquire resource and return\n resource.update({\"available\": \"false\"})\n res_lock.release()\n return jsonify(resource.get(\"data\"))\n\n # Not available, release and continue\n res_lock.release()\n\n # All resources are taken\n return app.make_response(('No available resource', 500))", "def getResourceData(self, authenticationToken, guid):\r\n pass", "def list(self):\n url = self._resource_name\n return self._get(url)", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, *args):\n return self.docs.get(*args)", "def ResourceList(self):\n url = AddToUrl(self, 'https://api.spiget.org/v2/resources?')\n return ApiSearch(url)", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def get(self, prefix, paths, data_type, use_models):\n pass", "def _get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self):\n\n try:\n args = api_parameters.parse_args()\n limit = args.get(\"limit\")\n controller = self.controller()\n schema = self.schema(many=True)\n raw_data = controller.get_list(**args)\n\n if limit:\n items = raw_data.items\n items = schema.dump(items)\n data = ResponseHandler.get_section_paginate(raw_data, items)\n\n else:\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def resources(self):", "def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json", "def get_resources(resource_client) -> list:\n resource_list = []\n paginator = resource_client.get_paginator(BOTO3_LIST_FUNCTION)\n pages = paginator.paginate()\n for page in pages:\n # Your going to have to look through the response and append the correct value to the list\n resource = page[\"something\"]\n resource_list = resource_list + resource\n return resource_list", "def get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(resource, lookup):\n\n documents = []\n response = {}\n etag = None\n req = parse_request(resource)\n embedded_fields = _resolve_embedded_fields(resource, req)\n\n # facilitate cached responses\n if req.if_modified_since:\n # client has made this request before, has it changed?\n # this request does not account for deleted documents!!! (issue #243)\n preflight_req = copy.copy(req)\n preflight_req.max_results = 1\n\n cursor = app.data.find(resource, preflight_req, lookup)\n if cursor.count() == 0:\n # make sure the datasource is not empty (#243).\n if not app.data.is_empty(resource):\n # the if-modified-since conditional request returned no\n # documents, we send back a 304 Not-Modified, which means that\n # the client already has the up-to-date representation of the\n # resultset.\n status = 304\n last_modified = None\n return response, last_modified, etag, status\n\n # continue processing the full request\n last_update = epoch()\n req.if_modified_since = None\n cursor = app.data.find(resource, req, lookup)\n\n for document in cursor:\n _build_response_document(document, resource, embedded_fields)\n documents.append(document)\n\n # build last update for entire response\n if document[config.LAST_UPDATED] > last_update:\n last_update = document[config.LAST_UPDATED]\n\n status = 200\n last_modified = last_update if last_update > epoch() else None\n\n # notify registered callback functions. Please note that, should the\n # functions modify the documents, the last_modified and etag won't be\n # updated to reflect the changes (they always reflect the documents\n # state on the database.)\n\n getattr(app, \"on_fetched_resource\")(resource, documents)\n getattr(app, \"on_fetched_resource_%s\" % resource)(documents)\n\n if config.DOMAIN[resource]['hateoas']:\n response[config.ITEMS] = documents\n response[config.LINKS] = _pagination_links(resource, req,\n cursor.count())\n else:\n response = documents\n\n # the 'extra' cursor field, if present, will be added to the response.\n # Can be used by Eve extensions to add extra, custom data to any\n # response.\n if hasattr(cursor, 'extra'):\n getattr(cursor, 'extra')(response)\n\n return response, last_modified, etag, status", "def get(self, request, resource=None, **kwargs):\n if resource is not None and resource != '':\n return self.to_simple(request, resource, **kwargs)\n\n return self.to_simple(request, self.collection, many=True, **kwargs)", "def get_resource(self, resource, method='get', data=None, headers=None, json=False, **kwargs): # nolint\r\n method = getattr(self.client, method)\r\n resource, headers, data = self.get_params(\r\n resource, headers, data, **kwargs)\r\n\r\n # Support JSON request\r\n if json:\r\n headers['content_type'] = 'application/json'\r\n data = simplejson.dumps(data)\r\n\r\n response = method(resource, data=data, **headers)\r\n return self._jsonify(response)", "def resource_bundle(resource_type, methods=[\"GET\"]):\n token = validate_auth()\n url = current_app.config.get('MAP_API') + resource_type\n params = {'_count': 1000}\n params.update(request.args)\n resp = requests.get(url, auth=BearerAuth(token), params=params)\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError as err:\n abort(err.response.status_code, err)\n\n return jsonify(resp.json())", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def _fetch_resource(self):\n\n qs = self._build_qs() # Build the query string\n url = self._build_url(qs) # Build the full url\n fp = self._api_call(url) # Fetch the data as a file pointer\n\n # Parse the list of dicts in to a dict generator\n return csv.DictReader(fp)", "def get_all(self):\n result_get = GetRest(function = self.function).performRequest()\n return result_get", "async def get_api_data(self, referer: str, params: list) -> list:\n return await asyncio.ensure_future(self.get_response(referer, params))", "def get(self, endpoint, data=None):\n if endpoint.startswith(\"http\"):\n url = endpoint\n elif endpoint.startswith(\"/\"):\n url = \"{}{}\".format(api_endpoint, endpoint)\n else:\n url = \"{}/{}\".format(api_endpoint, endpoint)\n\n response = requests.get(\n url,\n data=data,\n headers= {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.token,\n }\n )\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 404:\n print(\"404: Probably invalid endpoint\")\n else:\n print(\"ERROR IN REQUEST: {}\".format(response.content))\n return response", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n\n ds_list = Datastreams.filter_by_thing_id(\n id, top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def list(self):\n return self.request(\"GET\")", "def get_api_resources(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_resources\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/yaml'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get(self, *args, **kwargs):\n return self._hit(\"GET\", *args, **kwargs)", "def get(self, *args, **kwargs) -> Any:\n return self.raw_data.get(*args, **kwargs)", "def get(self, *args, **kwargs):", "def get(self, request, **resources):\r\n\r\n instance = resources.get(self._meta.name)\r\n if not instance is None:\r\n return instance\r\n\r\n return self.paginate(\r\n request, self.get_collection(request, **resources))", "def get_rest_data_source(uri):\n def players_from_rest():\n response = requests.get(uri)\n response.raise_for_status()\n return parse_players_json(response.text)\n return players_from_rest", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]", "def getResource(self, authenticationToken, guid, withData, withRecognition, withAttributes, withAlternateData):\r\n pass", "def get(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def get_data_by_uuid(request, uuid, hours):\n\tif request.method == 'GET':\n\t\treadings = get_data_by_uuid_helper(uuid, hours)\n\t\treturn Response(readings)", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "async def get(self, **context):\n return {}", "def get(self, *args, **kwargs):\n url = urljoin(self.instance(), args[0])\n return self._requests_call(util.requests_get, url, *args[1:], **kwargs)", "def index():\n return make_json_response(ENDPOINT_LIST)", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n return get_all()", "def _get_all_data(self, resource):\n response = self._get_raising('{}{}?per_page=100&page=1'.format(\n self.GH_API_ENDPOINT, resource\n ))\n yield from response.json()\n while 'next' in response.links:\n response = self._get_raising(response.links['next']['url'])\n yield from response.json()", "def getResourceDef(url, user, pWd, resourceName):\n \n print(\"getting resource for catalog:-\" + url + \" resource=\" + resourceName +\n ' user=' + user)\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n # print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\"} \n tResp = requests.get(apiURL, params={}, headers=header, auth=HTTPBasicAuth(user,pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n return tResp.status_code, json.loads(tResp.text)\n else:\n # not valid\n return tResp.status_code, None", "def _get(self):\n return self.request(method=\"get\", path=self.router.fields)", "def get(self, *args, **kwargs):\n pass", "def get(self, *args, **kwargs):\n pass", "def process_resource_listing_api(self, resources, listing_api, context):\n pass", "def get(cls, client, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tif not name :\n\t\t\t\tobj = l3param()\n\t\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def get(self, embed=None, fields=None):\r\n params = base.get_params(None, locals())\r\n\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def get(self, format=None, page=None, per_page=None):\r\n url = self.get_url()\r\n params = base.get_params(('page', 'per_page'), locals())\r\n headers = resource.mimetype_accept(format)\r\n\r\n return http.Request('GET', url, params, headers), parsers.parse_json", "def get(self, Muted=None, Page=None, PageSize=None, AfterSid=None):\r\n params = resource.get_params(None, locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def get(self, *args):", "def get(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'get', api_path, *args, **kwargs)", "def get_all_data():\n return jsonify(service.get_all_data())", "def get(self, path: str) -> Response:\n endpoint_ = checkEndpoint(\"GET\", path)\n if not endpoint_[\"method\"]:\n # If endpoint and Get method not supported in the API\n abort(endpoint_[\"status\"])\n return item_collection_get_response(path)", "def _get(self, path, params=None):\n return self._api.get_json(path, headers={\"Hawkular-Tenant\": self.tenant_id}, params=params)", "def resources(self, query):\n return _MockResponse()", "def resource_by_id(resource_type, resource_id, methods=[\"GET\"]):\n token = validate_auth()\n url = f\"{current_app.config.get('MAP_API')}{resource_type}/{resource_id}\"\n resp = requests.get(url, auth=BearerAuth(token))\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError as err:\n abort(err.response.status_code, err)\n\n return jsonify(resp.json())", "def get_data(self):", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def getAll(self):\n result_get = GetRest(function = self.function).performRequest()\n return result_get", "def get_resources(genome_build, fasta_ref, data):\n config = tz.get_in([\"config\", CONFIG_KEY], data)\n find_fn = _find_file(config)\n def normalize(f):\n return _get_id_fname(f)[-1]\n return sret.get_resources(genome_build, fasta_ref, config,\n data, _open_remote, _list(config), find_fn, normalize)", "def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")" ]
[ "0.7040653", "0.7014533", "0.6972194", "0.6868909", "0.6711269", "0.67088366", "0.6694223", "0.6652421", "0.65987545", "0.6597155", "0.6576477", "0.6572885", "0.65307814", "0.6527663", "0.6498717", "0.6484466", "0.64421904", "0.64192057", "0.63806015", "0.63806015", "0.63806015", "0.63806015", "0.63806015", "0.63404465", "0.634041", "0.63244736", "0.63162327", "0.6315349", "0.63125116", "0.63095367", "0.62898207", "0.62898207", "0.6270105", "0.6263308", "0.6260993", "0.6241089", "0.6229589", "0.62243724", "0.62228745", "0.62014276", "0.6190981", "0.6175432", "0.6169815", "0.6157241", "0.61440367", "0.61310714", "0.6126089", "0.6111376", "0.6110776", "0.60908294", "0.60900164", "0.6089801", "0.60857785", "0.60846335", "0.6081765", "0.6078532", "0.6076053", "0.6060969", "0.6055173", "0.6053119", "0.60488534", "0.6045393", "0.6034385", "0.6031441", "0.60314035", "0.6027085", "0.60203844", "0.60096425", "0.60088027", "0.6005958", "0.6000206", "0.5995594", "0.5993712", "0.59908074", "0.59869915", "0.59869915", "0.59869915", "0.59869915", "0.5983409", "0.5978004", "0.59772706", "0.59753376", "0.5975232", "0.5975232", "0.5970855", "0.59611374", "0.5961012", "0.5948385", "0.5946955", "0.5942093", "0.5935907", "0.59342885", "0.5932361", "0.5929217", "0.59288013", "0.5919266", "0.591631", "0.5916145", "0.5914592", "0.59103477", "0.5907958" ]
0.0
-1
get data from projected bill endpoint
async def __getFromProjectedBill(self, account, premise, currentBillDate) -> dict: data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_RESOURCES_PROJECTED_BILL.format( account=account, premise=premise, lastBillDate=currentBillDate.strftime("%m%d%Y"), ) ) if response.status == 200: projectedBillData = (await response.json())["data"] billToDate = float(projectedBillData["billToDate"]) projectedBill = float(projectedBillData["projectedBill"]) dailyAvg = float(projectedBillData["dailyAvg"]) avgHighTemp = int(projectedBillData["avgHighTemp"]) data["bill_to_date"] = billToDate data["projected_bill"] = projectedBill data["daily_avg"] = dailyAvg data["avg_high_temp"] = avgHighTemp except Exception as e: _LOGGER.error(e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def __getBBL_async(self, account, projectedBillData) -> dict:\n _LOGGER.info(\"Getting budget billing data\")\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(\n URL_BUDGET_BILLING_PREMISE_DETAILS.format(account=account)\n )\n if response.status == 200:\n r = (await response.json())[\"data\"]\n dataList = r[\"graphData\"]\n\n # startIndex = len(dataList) - 1\n\n billingCharge = 0\n budgetBillDeferBalance = r[\"defAmt\"]\n\n projectedBill = projectedBillData[\"projected_bill\"]\n asOfDays = projectedBillData[\"as_of_days\"]\n\n for det in dataList:\n billingCharge += det[\"actuallBillAmt\"]\n\n calc1 = (projectedBill + billingCharge) / 12\n calc2 = (1 / 12) * (budgetBillDeferBalance)\n\n projectedBudgetBill = round(calc1 + calc2, 2)\n bbDailyAvg = round(projectedBudgetBill / 30, 2)\n bbAsOfDateAmt = round(projectedBudgetBill / 30 * asOfDays, 2)\n\n data[\"budget_billing_daily_avg\"] = bbDailyAvg\n data[\"budget_billing_bill_to_date\"] = bbAsOfDateAmt\n\n data[\"budget_billing_projected_bill\"] = float(projectedBudgetBill)\n\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(\n URL_BUDGET_BILLING_GRAPH.format(account=account)\n )\n if response.status == 200:\n r = (await response.json())[\"data\"]\n data[\"bill_to_date\"] = float(r[\"eleAmt\"])\n data[\"defered_amount\"] = float(r[\"defAmt\"])\n except Exception as e:\n _LOGGER.error(e)\n\n return data", "def get_data(self):", "def get_data():\n pass", "def get_data():\n return", "def fetch_data(self):", "def get_bill_details(request):\n\n print request\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n customer_billing = data[telephone_number]['last_month_billing']\n print customer_billing\n\n customer_type = data[telephone_number]['type_customer']\n if customer_type == 'postpaid':\n\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \" Customer and currently using \" + data[telephone_number]['plan_details'] + \" plan type.\"\n if customer_billing['roaming'] == 'True':\n reply += \"You had used your cellphone while on roaming for which you were charged extra.\"\n elif customer_billing['data_exhaust'] == 'True':\n reply += \"You had used your data network after your allocated limit was exhausted. You were charged for these services\"\n elif customer_billing['subscribed'] == 'True':\n reply += \"You had subscribed to some promotional services for which you were charged in extra.\"\n else:\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \". We believe that this might be a mistake from our side and would like you to speak to our customer care executives separately.\"\n\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n\n print reply\n\n context['bill_details'] = reply\n\n return context", "def get_data(endpoint_name, arg=None,\n project_name=None, fields=None, size=get_setting_value('DEFAULT_SIZE'), page=0,\n data_category=None, query_args={}, verify=False, *args, **kwargs):\n endpoint = get_setting_value('GDC_API_ENDPOINT').format(endpoint=endpoint_name)\n if arg:\n endpoint = endpoint+'/{}'.format(arg)\n else:\n ## prep extra-params, including `from` param, as dict\n extra_params = {}\n if page>0:\n from_param = helpers.compute_start_given_page(page=page, size=size)\n extra_params.update({\n 'from': from_param,\n })\n if fields:\n extra_params.update({'fields': ','.join(helpers.convert_to_list(fields))})\n if dict(**kwargs):\n ## TODO check on whether this handles redundant param spec \n ## correctly\n extra_params.update(dict(**kwargs))\n params = _params.construct_parameters(project_name=project_name,\n size=size,\n data_category=data_category,\n query_args=query_args,\n verify=verify,\n **extra_params\n )\n # requests URL-encodes automatically\n log.info('submitting request for {endpoint} with params {params}'.format(endpoint=endpoint, params=params))\n response = requests_get(endpoint, params=params)\n log.info('url requested was: {}'.format(response.url))\n response.raise_for_status()\n return response", "def get(self, args):\n return Payment.query.offset(args['offset']).limit(args['limit'])", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def get_data(self, exportformat: str, oidrange: list):\r\n querystr = f\"?where=objectid+>%3D+{oidrange[0]}+AND+objectid+<%3D+{oidrange[1]}&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentsOnly=false&datumTransformation=&parameterValues=&rangeValues=&f={exportformat}\"\r\n req = requests.get(self.endpointurl + querystr)\r\n return req.json()", "def get_data(self):\r\n pass", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def _get_data(self):\n raise NotImplementedError()", "def get(self):\n # req = requests.get(\"\")\n # data = req.json()\n # print(data)\n query = Event.query.all()\n event_first_bill_pay(data=query, event_type=\"bill payment\")\n threading.Thread(target=event_notify_user, args=(\"Notify User\", query,)).start()\n threading.Thread(target=event_post_feedback, args=(\"Post Feeback\",)).start()\n return query", "def get(self, http, req_dict):\n\n\t\t# Turn account numbers into integers and remove duplicates\n\t\taccount_numbers = self.get_account_numbers( req_dict['account_numbers[]'] )\n\n\t\t# A primary did is a Physical line, with OFN disabled (e.g. not a softphone),\n\t\t# which is connected to a primary rate plan\n\n\t\tprimary_did_sql = \"\"\"\n\t\t\tselect distinct did.account_number,\n\t\t\t did.phone_number\n\t\t\t from did_numbers did\n\n\t\t\t join DID_BILLING_ITEM dbi\n\t\t\t on dbi.DID_NUMBER_ID = did.DID_NUMBER_ID\n\t\t\t and dbi.ACCOUNT_NUMBER = did.ACCOUNT_NUMBER\n\t\t\t and dbi.STATUS = 'A'\n\n\t\t\t join AP_REPORT.ITEM_MASTER_LU im\n\t\t\t on im.ITEM_CODE = dbi.SERVICE_ITEM_CODE\n\t\t\t and im.ITEM_TYPE = 'RATE_PLAN'\n\t\t\t and im.USE_CLASS = 'PRIMARY'\n\n\t\t\t where (did.date_end is null or did.date_end > SYSDATE + 90)\n\t\t\t and did.did_type_id = 'PHY'\n\t\t\t and did.ofn_enabled = 'N'\n\t\t\t and did.account_number in %s\n\t\t\t\"\"\" % self.account_sql_list\n\n\t\taccount_sql = \"\"\"\n\t\t\tselect\n\t\t\ta.ACCOUNT_NAME,\n\t\t\ta.STATUS,\n\t\t\tto_char(a.DATE_CREATED, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\ta.ACCOUNT_NUMBER,\n\t\t\tlower(au.USER_NAME),\n\t\t\tau.EMAIL_ADDRESS,\n\t\t\ta.PARTNER_ID,\n\t\t\tast.ACCOUNT_STATUS_TYPE,\n\t\t\tto_char(a.DATE_START, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\tto_char(a.DATE_END, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\tto_char(a.BILL_CYCLE_DATE_START, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\tto_char(a.BILL_CYCLE_DATE_END, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\tau.FIRST_NAME,\n\t\t\tau.LAST_NAME,\n\t\t\tat.ACCOUNT_TYPE,\n\t\t\tau.CONTACT_TELEPHONE,\n\t\t\ttz.TIMEZONE_ID,\n\t\t\ttz.TIMEZONE_NAME\n\t\t\tfrom\n\t\t\tACCOUNT a,\n\t\t\tACCOUNT_STATUS_TYPE ast,\n\t\t\tACCOUNT_TYPE at,\n\t\t\tACCOUNT_USER au,\n\t\t\tTIMEZONE tz\n\t\t\twhere a.ACCOUNT_NUMBER in %s\n\t\t\tand au.ACCESS_LEVEL = 'acctadmin'\n\t\t\tand ast.ACCOUNT_STATUS_TYPE_ID = a.STATUS\n\t\t\tand a.ACCOUNT_TYPE_ID = at.ACCOUNT_TYPE_ID\n\t\t\tand a.ACCOUNT_NUMBER = au.ACCOUNT_NUMBER\n\t\t\tand a.TIMEZONE_NUM = tz.TIMEZONE_NUM\n\t\t\torder by a.ACCOUNT_NUMBER\n\t\t\t\"\"\" % self.account_sql_list\n\n\t\tstatus_dates_sql = \"\"\"\n\t\t\tselect\n\t\t\tACCOUNT_NUMBER,\n\t\t\tto_char(COMPLETED_DATE, 'yyyy-mm-dd hh24:mi:ss'),\n\t\t\tACCOUNT_STATUS_TYPE_ID\n\t\t\tfrom account_status\n\t\t\tWHERE CURRENT_FLAG = 'Y'\n\t\t\tAND ACCOUNT_NUMBER in %s\n\t\t\t\"\"\" % self.account_sql_list\n\n\t\tlower_limit = 0\n\n\t\tret_obj = AccountResults()\n\n\t\twhile lower_limit < len(account_numbers):\n\t\t\t# Take self.account_sql_list_size (e.g. 10) account numbers at a time.\n\n\t\t\tparams = {}\n\t\t\tfor idx in xrange(0, self.account_sql_list_size):\n\t\t\t\tkey = \"account_number\" + str(idx + 1)\n\t\t\t\tif (lower_limit + idx) >= len(account_numbers):\n\t\t\t\t\tparams[key] = None\n\t\t\t\telse:\n\t\t\t\t\tparams[key] = account_numbers[lower_limit + idx]\n\n\t\t\tlower_limit += self.account_sql_list_size\n\n\t\t\t# Find the primary dids for these account numbers seperately,\n\t\t\t# because the query is crazy.\n\n\t\t\tauth_file = 'ps_oracle_sqlr'\n\n\t\t\tcurs = self.execute_query(auth_file, primary_did_sql, params)\n\t\t\trows = curs.fetchall()\n\t\t\tcurs.close()\n\n\t\t\tprimary_dids = self.get_primary_dids(rows)\n\n\t\t\t# Find Status Updated Dates and Account Status\n\n\t\t\tcurs = self.execute_query(auth_file, status_dates_sql, params)\n\t\t\trows = curs.fetchall()\n\t\t\tcurs.close()\n\n\t\t\tstatus_dates = self.get_status_dates(rows)\n\n\t\t\taccount_status = self.get_status(rows)\n\n\t\t\t# Find the rest of the data.\n\n\t\t\tcurs = self.execute_query(auth_file, account_sql, params)\n\t\t\trows = curs.fetchall()\n\t\t\tcurs.close()\n\n\t\t\tif curs.rowcount < 1:\n\t\t\t\traise RestDatabaseNotFound(\"None of the accounts were found.\")\n\n\t\t\tfor row in rows:\n\t\t\t\taccount = Account()\n\t\t\t\taccount.account_name = row[0]\n\t\t\t\taccount.status_code = row[1]\n\t\t\t\taccount.date_created = row[2]\n\t\t\t\tif account.date_created is not None:\n\t\t\t\t\taccount.date_created = account.date_created + ' +0000'\n\t\t\t\taccount.account_number = row[3]\n\t\t\t\t# Get the Account Status from the Account_Status Table from VDV.\n\t\t\t\tif account_status.has_key(account.account_number):\n\t\t\t\t\taccount.status_code = account_status.get(account.account_number)\n\t\t\t\telse:\n\t\t\t\t\taccount.status_code = row[1]\n\t\t\t\t\t# Log the account number as not having an active status in the account_status table\n\t\t\t\t\tself.log.debug(\"Account does not have an Active Status in the accout_status table. Account Number:\"+ account.account_number)\n\t\t\t\taccount.username = row[4]\n\t\t\t\taccount.email = row[5]\n\t\t\t\taccount.partner = row[6]\n\t\t\t\taccount.status = row[7]\n\t\t\t\taccount.account_start_date = row[8]\n\t\t\t\tif account.account_start_date is not None:\n\t\t\t\t\taccount.account_start_date = account.account_start_date + ' +0000'\n\t\t\t\taccount.account_end_date = row[9]\n\t\t\t\tif account.account_end_date is not None:\n\t\t\t\t\taccount.account_end_date = account.account_end_date + ' +0000'\n\t\t\t\taccount.bill_cycle_start_date = row[10]\n\t\t\t\tif account.bill_cycle_start_date is not None:\n\t\t\t\t\taccount.bill_cycle_start_date = account.bill_cycle_start_date + ' +0000'\n\t\t\t\taccount.bill_cycle_end_date = row[11]\n\t\t\t\tif account.bill_cycle_end_date is not None:\n\t\t\t\t\taccount.bill_cycle_end_date = account.bill_cycle_end_date + ' +0000'\n\t\t\t\taccount.first_name = row[12]\n\t\t\t\taccount.last_name = row[13]\n\t\t\t\taccount.account_type = row[14]\n\t\t\t\taccount.contact_telephone = row[15]\n\t\t\t\taccount.time_zone = row[16]\n\t\t\t\taccount.time_zone_name = row[17]\n\t\t\t\taccount.primary_did_numbers.did = primary_dids.get(account.account_number, [])\n\t\t\t\taccount.status_updated_date = status_dates.get(account.account_number)\n\n\t\t\t\tself.db_free()\n\n\t\t\t\tret_obj.account_info.append(account)\n\n\t\treturn ret_obj.dumps(SERVICE_ATTRIBUTES)", "def getStockData():\n pass", "def fetchGeoData():\n if request.method ==\"POST\":\n result = {}\n if request.get_json():\n post_requests = request.get_json()\n print(post_requests)\n result = db.getmapdata(post_requests['attr']) \n return result", "def local_bonds_prices():\n url1 = \"https://api.invertironline.com/token\"\n\n data = {\n \"username\": usuario,\n \"password\": password,\n \"grant_type\": \"password\" \n }\n response = requests.post(url1, data=data)\n if response.status_code == 200:\n content = response.text\n access_key = token_key(content)\n\n url2 = f'https://api.invertironline.com/api/v2/Cotizaciones/Bonos/Merval/argentina'\n datos = requests.get(url2, headers={\n 'Authorization': 'Bearer '+access_key\n })\n datos = json.loads(datos.text)\n datos = datos['titulos']\n datos = clean_assets(datos)\n return datos", "def get_data( obj, prm, lev, date, timelevel=0 ):\n \n parameter = obj( name = prm, level = lev, dataDate = date )[ timelevel ]\n print( parameter.dataDate )\n \n #-----Checking grit type----------------------------------------------\n if parameter.gridType == \"sh\":\n lat, lon, data = sh( parameter.values )\n elif parameter.gridType == \"reduced_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n elif parameter.gridType == \"regular_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n else: \n print ( parameter.gridType )\n \n return lat, lon, data", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def get(self):\n return {'bills': [bill.json() for bill in BillModel.find_all()]}", "def _fetch_data(self):\n pass", "def _get_dapall_from_api(self):\n\n url = marvin.config.urlmap['api']['dapall']['url']\n\n url_full = url.format(name=self.plateifu,\n bintype=self.bintype.name,\n template=self.template.name)\n\n try:\n response = self._toolInteraction(url_full)\n except Exception as ee:\n raise MarvinError('found a problem while getting DAPall: {0}'.format(str(ee)))\n\n if response.results['error'] is not None:\n raise MarvinError('found a problem while getting DAPall: {}'\n .format(str(response.results['error'])))\n\n data = response.getData()\n\n return data['dapall_data']", "def get(self, request):\n if 'brokerage' in request.GET:\n portfolios = Portfolio.objects.filter(bdc_user=self.request.user, brokerage=request.GET['brokerage'])\n else:\n portfolios = Portfolio.objects.filter(bdc_user=self.request.user)\n orders = Order.objects.filter(portfolio__in=portfolios).order_by('-date')\n return Response(OrderSerializer(orders, many=True).data)", "def handle(self):\n self.validate()\n if self.errors:\n raise InvalidDataException(self.errors)\n\n phone_number = self.data.get('phone_number')\n month = self.data.get('month')\n year = self.data.get('year')\n\n # if a period was not informed, get the current last one\n if not month and not year:\n year, month = last_period()\n\n bill_data = Bill.data_by_number_period(phone_number, month, year)\n return bill_data", "def get_data_from_endpoint(self, from_, to_, endpoint):\n endpoint = self.make_endpoint(endpoint)\n from_, to_ = str(from_), str(to_)\n payload = {\n 'auth': self.auth_token,\n 'id': self.monitor_id,\n 'start': from_,\n 'end': to_,\n 'extendLimit': 'true',\n 'fullContents': 'true'\n }\n\n r = self.session.get(endpoint, params=payload)\n ratelimit_remaining = r.headers['X-RateLimit-Remaining']\n #print ('Remaining Ratelimit = ' + str(ratelimit_remaining))\n\n # If the header is empty or 0 then wait for a ratelimit refresh.\n if (not ratelimit_remaining) or (float(ratelimit_remaining) < 1):\n #print('Waiting for ratelimit refresh...')\n sleep(self.ratelimit_refresh)\n\n return r", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def fetch(self, grid):\n with self.session as s:\n # Recursively handle either a grid or a single bound\n if type(grid[0]) is list:\n with click.progressbar(grid, label='Fetching journal') as g:\n for bound in g:\n self.fetch(bound)\n elif type(grid[0]) is tuple:\n # Construct the web request from the coordinates\n poly = ''.join(['{};{};'.format(point[0], point[1]) for point in grid])\n payload = {\n 'qt': 'spatial',\n 'pts': poly,\n 'rad': 0,\n 'rights': 'B'\n }\n url = 'https://alta.registries.gov.ab.ca/SpinII/SearchTitlePrint.aspx'\n\n sleep(3)\n r = s.get(url, params=payload)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n # Extract the table and load into a DataFrame\n try:\n table = soup.find('table', class_='bodyText')\n df = pd.read_html(str(table), index_col=0, header=0, parse_dates=False)[0]\n df['Registration Date'] = pd.to_datetime(df['Registration Date'], format='%d/%m/%Y')\n df['Change/Cancel Date'] = pd.to_datetime(df['Change/Cancel Date'], format='%d/%m/%Y')\n\n self.data.append(df)\n except:\n pass", "def getDataContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def get(self):\n args = parser_hms.parse_args()\n if args['amount'] == 'all':\n return GenericGet().get_data(args,1,False)\n elif args['amount'] == 'nearest':\n return GenericGet().get_data(args,1,True)", "def get(self, request, p_name, conn_name):\n project = Project.objects.get(name=p_name)\n connector = project.connector_set.filter(name=conn_name)\n serializer = ConnectorSerializer(connector[0], many=False)\n # Not modifying this as it works in tandem with the Thingworx app.\n return Response(serializer.data)", "def calculate_demo(self, request, parent_lookup_client, pk, format=None):\n\n retirement_plan = self.get_object()\n tickers = Ticker.objects.filter(~Q(state=Ticker.State.CLOSED.value))\n portfolio = []\n projection = []\n for idx, ticker in enumerate(tickers[:10]):\n percent = 0\n if idx <= 9:\n # 10% each for first 10 tickers\n percent = 10\n portfolio.append([ticker.id, percent])\n # grab 50 evenly spaced time points between dob and current time\n today = timezone.now().date()\n last_day = retirement_plan.client.date_of_birth + relativedelta(years=retirement_plan.selected_life_expectancy)\n day_interval = (last_day - today) / 49\n income_start = 20000\n assets_start = 100000\n for i in range(50):\n income = income_start + (i * 50)\n assets = assets_start + (i * 1000)\n dt = today + i * day_interval\n projection.append([d2ed(dt), assets, income])\n return Response({'portfolio': portfolio, 'projection': projection})", "def object_get(self, request):\n _view = _object_view(self, request)\n queried = ObjectPostings(self.cdb_object_id, request.params.mixed()).query()\n postings = [request.view(obj) for obj in queried[0]]\n\n _view.update({\n \"postings\": postings,\n \"result_complete\": queried[1]\n })\n return _view", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get(self):\n args = parser_degree.parse_args()\n if args['amount'] == 'all':\n return GenericGet().get_data(args,0,False)\n elif args['amount'] == 'nearest':\n return GenericGet().get_data(args,0,True)", "def get_pronto_data():\n download_if_needed(\"https://s3.amazonaws.com/pronto-data/open_data_year_one.zip\",\n \"open_data_year_one.zip\")", "def get(self, bill_id):\n try:\n bill = BillModel.find_by_id(bill_id)\n except:\n return {\"message\": \"An error occurred finding the item.\"}, 500 # Internal server error\n\n if bill:\n return bill.json()\n return {'message': 'Bill not found'}, 404", "def _get_floorplans(self, url):\n \n try:\n jdict = self._load_json(url)\n floorplans_groups = jdict['props']['homeDetails']['floorPlans']['floorPlanGroups']\n address_data = list(self._get_address(jdict))\n rental_data = []\n \n # different floorplans, e.g. studio, 1 bedroom 1 bathroom etc.\n for floorplans in floorplans_groups:\n plans = floorplans['plans']\n for section in plans:\n # this is the header \n section_data = self._get_section_data(section)\n rental_data.append(address_data+section_data+[url])\n units = section['units']\n # these are all the units under that header \n for unit in units:\n unit_data = self._get_section_data(unit)\n rental_data.append(address_data+unit_data+[url])\n return rental_data\n except:\n return None", "def ori_data_fbi(request):\n http = urllib3.PoolManager()\n\n\n #base_url=fbi_url(request)\n logging.Logger(base_url)\n print('I am inside the main function')\n # New request url\n request_url = base_url\n logging.Logger(request_url)\n\n\n payload = http.request('GET',\n request_url,\n headers={\n 'Content-Type': 'application/json',\n 'x-api-key': creds\n },\n fields={\n 'API_KEY':creds\n }\n )\n\n #*** only changing it for testing ***\n #return request_url\n # print(f'the type of payload is\\n {type(payload.data)}')\n print(payload.data)\n return load_into_bq(payload.data)\n #return payload.data", "def get(request, pk=None):\n calls = Price.objects.filter(pk=int(pk))\n serializer = PriceSerializer(calls, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_data_from_web():\n pass", "def data():\n return volumes_fetchers.get_json_data()", "def test_companies_company_id_data_bill_credit_notes_get(self):\n pass", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def getDataAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def get():", "def get():", "def bcp_get(self, **kwargs):\n pass", "def _get_consumption(self, url, start, end, aggregation):\n start = self._to_milliseconds(start)\n end = self._to_milliseconds(end)\n\n headers = {\"Authorization\": \"Bearer {}\".format(self.access_token)}\n params = {\n \"aggregation\": aggregation,\n \"from\": start,\n \"to\": end\n }\n r = requests.get(url, headers=headers, params=params)\n r.raise_for_status()\n return r.json()", "def get(self):\n return_status = None\n result = {}\n try:\n log.debug(\"Summary info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": sql }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n temp_d={}\n temp_d.update(rec['tags'])\n temp_d.update(dict(zip(rec['columns'],element)))\n result_d.append(temp_d)\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching aggregate data')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while aggregating the data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching aggregate data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def get(self):\n return self.get_data()", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")", "def get_invoice(payload):\n response = requests.post(url, data=payload)\n return response.json()", "def get(self, request):\n source = request.GET.get(\"source\", \"BLR\")\n destination = request.GET.get(\"destination\", \"DEL\")\n dateofdeparture = request.GET.get(\"date_of_departure\", \"20191027\")\n resp = get_flights(source, destination, dateofdeparture)\n return Response(resp)", "def get_estimate(start_lat, start_lon, end_lat, end_lon, token):\n endpoint = \"https://api.uber.com/v1/estimates/price\"\n params = {\n 'start_latitude': start_lat,\n 'start_longitude': start_lon,\n 'end_latitude': end_lat,\n 'end_longitude': end_lon,\n 'server_token': token\n }\n r = requests.get(endpoint, params=params)\n r.raise_for_status()\n return r.json().pop('prices', [])", "def get_raw_mercadopago_info():\n mercadopago_client_id = os.getenv('MERCADOPAGO_CLIENT_ID')\n mercadopago_client_secret = os.getenv('MERCADOPAGO_CLIENT_SECRET')\n\n mp = MP(mercadopago_client_id, mercadopago_client_secret)\n logger.debug('Connecting with mercadopago')\n\n filters = {'status': 'approved'}\n offset = 0\n results = []\n while True:\n response = mp.search_payment(filters, limit=LIMIT, offset=offset)\n assert response['status'] == 200\n logger.debug(\n 'Getting response from mercadopago, paging %s', response['response']['paging'])\n results.extend(response['response']['results'])\n if len(response['response']['results']) < LIMIT:\n break\n offset += LIMIT\n\n logger.info('Got response from mercadopago, %d items', len(results))\n return results", "def get_amount():\n conn = None\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n\n #tas\n raw={}\n raw['plan']={}\n raw['actual']={}\n\n cur.execute(\"SELECT slc.*,plc.amount as amount_plan,ppm.amount as price \\\n ,to_char((slc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_actual \\\n , to_char((plc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_plan \\\n ,aco.amount_actual_coal \\\n FROM vw_tta_cps_sum_land_clearing_now AS slc \\\n LEFT JOIN vw_tta_cps_price_per_month AS ppm \\\n ON slc.int_month=ppm.month \\\n AND slc.year_id=ppm.year_id \\\n LEFT JOIN vw_tta_cps_plan_land_clearing AS plc \\\n ON slc.periode_month_id=plc.periode_month_id \\\n AND slc.land_clearing_location_id=plc.land_clearing_location_id \\\n LEFT JOIN vw_tta_cps_sr_all_per_month_plan_actual as aco \\\n ON aco.int_month=slc.int_month \\\n WHERE ppm.jenis_price=10 \\\n AND aco.plan_id=1\")\n rowcount=cur.rowcount\n print(\"The number of row: \", rowcount)\n row = cur.fetchone()\n counter=0\n item={}\n\n if rowcount>0:\n f=open('../../data_cost_per_ton.csv','w')\n f.write('AREA,PLAN,ACTUAL\\n')\n while row is not None:\n #print(row)\n raw[\"plan\"]=row[11]\n raw[\"actual\"]=row[10]\n f.write(str('RAW')+','+str(raw[\"plan\"])+','+str(raw[\"actual\"])+\"\\n\")\n\n row = cur.fetchone()\n\n if rowcount>0:\n f.close()\n cur.close()\n\n print(str(datetime.datetime.now())+' '+str(rowcount)+' row updated')\n except (Exception, psycopg2.DatabaseError) as error:\n print(str(datetime.datetime.now())+' '+str(error))\n finally:\n if conn is not None:\n conn.close()", "def get_details(self):", "def get_data(self):\n data_obj = VerticaGetter()\n if self.context_name == 'PARTS':\n param_list = [self.start_date,self.end_date,self.param_name]\n param_list = [str(elem) for elem in param_list]\n res = data_obj.get_visitor_analysis(param_list)\n else:\n res = data_obj.get_visitor_analysis()\n return res", "def listings_data():\n\n stmt = db.session.query(nyc).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n df[\"latitude\"] = pd.to_numeric(df[\"latitude\"])\n df[\"longitude\"] = pd.to_numeric(df[\"longitude\"])\n df[\"accommodates\"] = pd.to_numeric(df[\"accommodates\"])\n\n data = df.to_dict(orient='index')\n # Create a dictionary entry for each row of metadata information\n # data = {}\n # for result in results:\n #\n # data[\"ID\"] = result[0]\n # data[\"LISTING_URL\"] = result[1]\n # data[\"NAME\"] = result[2]\n # data[\"HOST_ID\"] = result[3]\n # data[\"NEIGHBORHOOD\"] = result[4]\n # data[\"NEIGHBORHOOD_GROUP\"] = result[5]\n # data[\"CITY\"] = result[6]\n # data[\"ZIPCODE\"] = result[7]\n # data[\"LAT\"] = float(result[8])\n # data[\"LON\"] = float(result[9])\n #\n # print(data)\n\n return jsonify(data)", "def getPanchayatsAccurateData(request):\n if request.method == 'GET':\n panchayat=request.GET.get('panchayat', '')\n ptid=request.GET.get('ptid', '')\n block = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n district = request.GET.get('district', '')\n state = request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=500\n else:\n limit=int(limit)\n #PTID we need to make it exact. Finyaer make it current finyear\n panchayats = PanchayatStat.objects.filter(panchayat__name__icontains = panchayat, panchayat__id__icontains = ptid, panchayat__block__id__icontains = bid, panchayat__block__name__icontains=block, panchayat__block__district__name__icontains = district, panchayat__block__district__state__name__icontains = state, workDaysAccuracyIndex__gte = 90, finyear = '17')\n\n\n panchayats = panchayats[:limit]\n serializer = PanchayatStatSerializer(panchayats, many=True)\n return JsonResponse(serializer.data, safe=False)", "def get_data(rics: list, fields: list):\n data, err = ek.get_data(rics, fields)\n if err:\n print(err)\n return data", "def get_monthly_prism_ppt_data(year,month, plotPPTBounds):\n \"\"\" It is in the form of grid \"\"\"\n \n if(month<10):\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+\"0\"+str(month)+\"_bil.bil\"\n else:\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+str(month)+\"_bil.bil\" \n \n ppt_data = read_prism_bil(join(cf.root, cf.prism_dir, prism_file_path))\n \n hdr_dict = read_prism_hdr(join(cf.root, cf.prism_dir, prism_file_path).replace('.bil', '.hdr'))\n \n hdr_dict[\"ULXMAP\"] = float(hdr_dict[\"ULXMAP\"])\n hdr_dict[\"ULYMAP\"] = float(hdr_dict[\"ULYMAP\"])\n hdr_dict['NROWS'] = int(hdr_dict['NROWS'])\n hdr_dict['NCOLS'] = int(hdr_dict['NCOLS'])\n hdr_dict['XDIM'] = float(hdr_dict['XDIM'])\n hdr_dict['YDIM'] = float(hdr_dict['YDIM'])\n \n p1 = (hdr_dict[\"ULXMAP\"] - (hdr_dict['XDIM']/2), \n hdr_dict[\"ULYMAP\"] + (hdr_dict['YDIM']/2))\n\n p2 = (p1[0] + (hdr_dict['NCOLS']*hdr_dict['XDIM']),\n p1[1])\n\n p3 = (p2[0],\n p2[1] - (hdr_dict['NROWS']*hdr_dict['YDIM']))\n\n p4 = (p1[0],\n p3[1])\n \n lon_point_list = (p1[0], p2[0], p3[0], p4[0])\n lat_point_list = (p1[1], p2[1], p3[1], p4[1])\n \n ppt_bounds = Polygon(zip(lon_point_list, lat_point_list))\n \n if(plotPPTBounds):\n crs = {'init': 'epsg:4326'}\n m = folium.Map(zoom_start=10, tiles='cartodbpositron')\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[ppt_bounds]) \n \n folium.GeoJson(polygon).add_to(m)\n folium.LatLngPopup().add_to(m)\n m.save(\"Prism Bounds.html\")\n\n return ppt_bounds, ppt_data, hdr_dict", "def query_api(location):\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token,location)\n response = response.get('businesses')\n return response", "def _offset_call(self, url, params) -> Dict:\n response = self._get(url, params=params)\n raise_on_error(response)\n return response.json()", "def precipitation():\n # Calculate the date 1 year ago from the last data point in the database\n #Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Use query from notebook. Get the last date in database, then calc a year before \n last_date = session.query(func.max(Measurement.date)).first() \n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # filter to one year ago \n twelve_months_precip = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all()\n\n session.close()\n\n # create a list for results to jsonify \n\n list_data = []\n for months in twelve_months_precip:\n data = {}\n data[\"date\"] = months[0]\n data[\"prcp\"] = months[1]\n list_data.append(data)\n\n # jsonify the results \n\n return jsonify(list_data)", "def getPTData(*args):\n return args[0].Data.PTData.pt_data", "def GetGeData(self, *args, **kwargs):\n pass", "async def get_record(item: Item):\n X_new = item.to_df()\n item_str = item.to_string()\n project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_query_given_project(project_code)\n return return_json", "def getReport(request):\n\n\t#parameters needed for different REST API's\n\tparams = {\n\t\t'rid':-1,\n\t\t'year':-1,\n\t\t'con_num':-1,\n\t\t'assign_num':-1,\n\t\t'item_num':-1,\n\t\t'wtype': -1,\n\t\t'payno': -1,\n\t\t'snap': 0, #default is 0 for snapshots (for now)\n\t\t'issue_date': -1,\n\t}\n\n\t#loop over the parameters and set them if they appear in the api url\n\tfor p in params:\n\t\tif p in request.GET:\n\t\t\tparams[p] = request.GET[p]\n\n\n\t#get the request session and load data\n\ts = requests.Session()\n\tif not isinstance(rgen.ReportGenerator.get_url(params), dict):\n\t\tresponse = s.get(rgen.ReportGenerator.get_url(params))\n\n\t\t#set the iterator and the content\n\t\tit = json.loads(response.content)\n\t\tcontent = json.loads(response.content)\n\t\t\n\t\t#while a next page exists, parse the api\n\t\tpageNum = 1\n\t\twhile \"next\" in it:\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params) + '?page=' + str(pageNum))\n\t\t\tit = json.loads(response.content)\n\t\t\tcontent[\"items\"].extend(it[\"items\"])\n\t\t\tpageNum += 1\n\n\telse:\n\t\t#if the url is a list\n\t\tcontent = {}\n\t\tfor part in rgen.ReportGenerator.get_url(params):\n\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part])\n\t\t\tit = json.loads(response.content)\n\t\t\t#content = {\"part1\":{\"items\":[]}, \"part2\":{\"items\":[]}, \"part3\":{\"items\":[]}}\n\t\t\t\n\t\t\tcontent[part] = {}\n\t\t\tcontent[part][\"items\"] = []\n\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\n\t\t\tpageNum = 1\n\t\t\twhile \"next\" in it:\n\t\t\t\tresponse = s.get(rgen.ReportGenerator.get_url(params)[part] + '?page=' + str(pageNum))\n\t\t\t\tit = json.loads(response.content)\n\t\t\t\tcontent[part][\"items\"].extend(it[\"items\"])\n\t\t\t\tpageNum += 1\n\t\n\t#set the file object to be returned as a download\n\tfile = HttpResponse(rgen.ReportGenerator.formExcel(content, params), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\tif params[\"rid\"] == '70':\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + ' No.' + params['issue_date'] + '.xlsx'\n\telse:\n\t\tfile['Content-Disposition'] = 'attachment; filename=' + rgen.r_dict[params[\"rid\"]][1] + '.xlsx'\n\ts.close()\n\treturn file", "def _get(self, path, params=None):\n return self._api.get_json(path, headers={\"Hawkular-Tenant\": self.tenant_id}, params=params)", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def getData(self, local_cache):", "def get_floor_plan(port_id):\n url = 'https://api.archisketch.com/v1/public/projects/'\n response = requests.get(url + port_id + '/detail')\n response = response.json()['project']\n floor_plan = response['floorplans'][0]\n return floor_plan", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='GAS'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)", "def data():\n columns = request.form.getlist('columns')\n regions = request.form.getlist('fieldsArr')\n #print(regions)\n #for the visualization step, a copy of the 'RAW' regions.\n raw_regions = regions\n\n regions = regions[0].split(',')\n\n #catch if region is empty, should not be if the client hasn't modified\n #client side code\n reg_len = len(regions)\n if reg_len == 1:\n regions = ['AUS_CODE_2016,036']\n raw_regions = 'AUS_CODE_2016,036'\n #regions are split into a list and columns are a list, time to enfore\n # limits! up to 100 columns by 1000 cells max!\n if len(columns) > 10:\n columns = columns[:10]\n\n #extra 1 for the regions code yet to be extracted\n if len(regions) > 1001:\n regions = regions[:1001]\n\n area = regions[0]\n\n #check for no slected data itme and return a total pop count\n col_len = len(columns)\n if col_len == 0:\n columns.append('Tot_P_P')\n\n # bigly work,\n r_data = query_proc.table_query(regions, columns)\n\n columns.insert(0, area)\n columns.insert(0, \"Census 2016 Name\")\n\n return render_template('data.html',\n title='Data from the map',\n data=r_data,\n columns=columns,\n raw_regions=raw_regions)", "def get(self, data):\n pass", "def mapping(self):\n return self.request('_mapping', pylastica.request.Request.GET).data", "def call(self, endpoint: str = \"\", query: dict = dict(), method: str = \"GET\",\n unit: bool = True) -> dict:\n query.update({'apikey': self.api_key, 'lat': self.coords[0], 'lon': self.coords[1]})\n if unit:\n query['unit_system'] = self.unit_system\n resp = requests.request(\n url=self.base_url + endpoint,\n method=method,\n params=query\n )\n if not resp.ok:\n raise e.ClimaAPIError(resp)\n return resp.json()", "def get_data(radius: int, user_zip_code: str, state_dict: dict,\n provider_filter: list) -> list:\n # zip_map_dict = create_zip_dict()\n active_set = active_providers(state_dict['metadata']['provider_brands'])\n in_range_lst = create_in_range_lst(state_dict['features'],\n user_zip_code, radius,\n active_set, False, provider_filter)\n return create_message_list(in_range_lst, user_zip_code)", "def index(self):\n args = index_reqparse.parse_args()\n args.api_key = \"8l3xbEmsQMq7AG7mXoSy3IuJAqehmWGRC754Otx7\"\n\n url = 'http://api.data.gov/gsa/fbopen/v0/opps?%s' % urlencode(query=args)\n\n return json.loads(urlopen(url=url).read())", "def get_entity_data(self, instance_id = None):\n # Typically, this url is better, becuase we can insert parameters into url in a proper way\n # original url is 'https://api.ci.ai.dynamics.com/v1/instances/{instanceId}/data/{relativePath}[?forceSearch][&proxy]'\n # but we don't need optional parameters, so we delete them from full url\n url_original = 'https://api.ci.ai.dynamics.com/v1/instances/{instanceId}/data/{relativePath}'\n # it seems {} only part of python sting format, so we only use string.format(value1, value2,..) to replace it \n url_parse = url_original.format(instanceId = self.INSTANCEID, relativePath = self.RELATIVEPATH)\n # it seems sometimes url with parameters not working, so we provide a fulfuiled url \n url_full = 'https://api.ci.ai.dynamics.com/v1/instances/c910b061-1008-4397-95f6-4e7b443b924a/data/RetailDemoData_RetailSystem_Contacts'\n \n # send get request with \n response = requests.get(url_parse, headers=self.HEADERS)\n # r2 = requests.get(url_full, headers=self.HEADERS)\n print(response)\n # for record in r3.json():\n # print(json.dumps(record))\n # according to json file structore, our data contain in ['value']\n # response.json()['value']\n # for row in r3.json()['value']:\n # print(json.dumps(row))\n # we can try to storage data into a local directory,\n # these only work for local python script, not on AZURE runbook\n # with open('03_data/32_entity.json', 'w') as outfile:\n # json.dump(r3.json()['value'], outfile)\n\n return response.json()['value']", "def get(self, base_url, observable, limit, credentials):\n\n url = url_join(base_url, self.filter(observable)) + f'&$top={limit}'\n\n response = get_data(url, credentials)\n\n return [\n self.sighting(observable, x) for x in response.get('value', [])\n ]", "def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(ts_code: int, begin: str, end: str) -> List:\n\n url = (\n \"http://api.bcb.gov.br/dados/serie/bcdata.sgs.{}\"\n \"/dados?formato=json&dataInicial={}&dataFinal={}\"\n )\n request_url = url.format(ts_code, begin, end)\n response = requests.get(request_url)\n return response.json()", "def main():\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token, '1910 Entrepreneur Dr, Raleigh, NC 27518')\n response = response.get('businesses')\n print(json.dumps(response, indent=4))", "def get(self, *args, **kwargs) -> Any:\n return self.raw_data.get(*args, **kwargs)", "def get_claim_bill(\n _db: Session = db, *, start: datetime = None, end: datetime = None,\n store_internal_id: int = None, owner_id: int = None,\n kind: models.ClaimKind = None, calculation: CalcType = \"avg\"\n):\n query = crud.claim.get_query(_db)\n if start and end:\n query = query.filter(models.Claim.created_at.between(start, end))\n if store_internal_id:\n query = query.filter(models.Claim.store_internal_id == store_internal_id)\n if owner_id:\n query = query.filter(models.Claim.owner_id == owner_id)\n if kind:\n query = query.filter(models.Claim.kind == kind)\n bill = query.with_entities(getattr(func, calculation)(models.Claim.bill).label(\"_bill\")).first()\n bill_round = round(bill[0], 2) if bill[0] is not None else 0.00\n return bill_round" ]
[ "0.6360357", "0.6014319", "0.59525716", "0.5819394", "0.5776328", "0.57017654", "0.5693053", "0.56899124", "0.5679887", "0.5679887", "0.5679887", "0.56725025", "0.56725025", "0.5663544", "0.5658554", "0.5646016", "0.56169564", "0.5605687", "0.5596649", "0.5570589", "0.5530671", "0.55145097", "0.5502198", "0.5497323", "0.548008", "0.5475051", "0.54735523", "0.545327", "0.54267776", "0.5414057", "0.53767276", "0.537473", "0.53490686", "0.5327744", "0.53263485", "0.5318045", "0.5315238", "0.53121483", "0.5309047", "0.5302879", "0.52997714", "0.5293017", "0.52893126", "0.5284566", "0.52675885", "0.5267583", "0.5244594", "0.52374876", "0.5236168", "0.52358997", "0.52304167", "0.5227862", "0.5225517", "0.5224484", "0.5224484", "0.5219283", "0.52080905", "0.51961976", "0.5192172", "0.51918435", "0.5190948", "0.5183728", "0.51824766", "0.5179246", "0.5161987", "0.51476425", "0.51469666", "0.51445967", "0.51399916", "0.51346385", "0.51323247", "0.51308113", "0.51221496", "0.51155484", "0.51108056", "0.51085526", "0.5107533", "0.5098758", "0.50983804", "0.5093868", "0.50929487", "0.50915706", "0.509095", "0.5089983", "0.5085026", "0.5081053", "0.5080668", "0.50795496", "0.50769264", "0.5075166", "0.50711375", "0.5068888", "0.5068731", "0.5064039", "0.50628066", "0.5060558", "0.5053803", "0.5052109", "0.5051208", "0.50510263" ]
0.59895027
2
Get budget billing data
async def __getBBL_async(self, account, projectedBillData) -> dict: _LOGGER.info("Getting budget billing data") data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_PREMISE_DETAILS.format(account=account) ) if response.status == 200: r = (await response.json())["data"] dataList = r["graphData"] # startIndex = len(dataList) - 1 billingCharge = 0 budgetBillDeferBalance = r["defAmt"] projectedBill = projectedBillData["projected_bill"] asOfDays = projectedBillData["as_of_days"] for det in dataList: billingCharge += det["actuallBillAmt"] calc1 = (projectedBill + billingCharge) / 12 calc2 = (1 / 12) * (budgetBillDeferBalance) projectedBudgetBill = round(calc1 + calc2, 2) bbDailyAvg = round(projectedBudgetBill / 30, 2) bbAsOfDateAmt = round(projectedBudgetBill / 30 * asOfDays, 2) data["budget_billing_daily_avg"] = bbDailyAvg data["budget_billing_bill_to_date"] = bbAsOfDateAmt data["budget_billing_projected_bill"] = float(projectedBudgetBill) async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_GRAPH.format(account=account) ) if response.status == 200: r = (await response.json())["data"] data["bill_to_date"] = float(r["eleAmt"]) data["defered_amount"] = float(r["defAmt"]) except Exception as e: _LOGGER.error(e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCampaignBudget(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def budget(self):\n return self._budget", "def billing_info(self):\r\n return BillingInfo(self)", "def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")", "def get_budgets(self) -> list:\n return self.budget_manager.get_budgets()", "def get_budgets(self) -> list:\n return list(self.budgets.values())", "def billing(self):\n return self._billing", "def billing_info(self):\n return self._billing_info", "def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)", "def get_spend_by_campaign_custom(self, budget_id, aw_account_id):\n try:\n budget = Budget.objects.get(id=budget_id)\n google_ads_account = DependentAccount.objects.get(id=aw_account_id)\n except (Budget.DoesNotExist, DependentAccount.DoesNotExist):\n return\n\n client = get_client()\n client.client_customer_id = google_ads_account.dependent_account_id\n\n aw_campaigns = budget.aw_campaigns.filter(account=google_ads_account)\n aw_campaign_ids = list(set([aw_campaign.campaign_id for aw_campaign in aw_campaigns]))\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = budget.start_date\n end_date = budget.end_date\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n yest_campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n yest_campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': yest_campaign_report_selector\n }\n\n start_date = budget.start_date\n yest_end_date = datetime.datetime.now() - datetime.timedelta(1)\n\n yest_campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': yest_end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(yest_campaign_report_query))\n for campaign_row in campaign_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n # try:\n # campaign_report = \\\n # Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(yest_campaign_report_query))[0]\n # except IndexError:\n # return\n #\n # campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n # start_date=budget.start_date,\n # end_date=budget.end_date)\n #\n # campaign_spend_object.spend_until_yesterday = int(campaign_report['cost']) / 1000000\n # campaign_spend_object.save()\n\n return 'get_spend_by_campaign_custom'", "def get_expenses(budget):\n return sum(expense['bgt'] for expense in budget['spend'])", "def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)", "def budget_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"budget_name\")", "async def future_budget(budget: Budget):\n\n # Get the JSON object from the request body and cast it to a dictionary\n input_dict = budget.to_dict()\n bank_account_id = input_dict['bank_account_id']\n monthly_savings_goal = input_dict['monthly_savings_goal']\n\n transactions = load_user_data(bank_account_id)\n\n # instantiate the user\n user = User(transactions)\n\n # predict budget using time series model\n pred_bud = user.predict_budget()\n\n # if a fatal error was encountered while generating the budget,\n # return no budget along with the warning list\n if user.warning == 2:\n return json.dumps([None, user.warning_list])\n\n # modify budget based on savings goal\n modified_budget = user.budget_modifier(\n pred_bud, monthly_savings_goal=monthly_savings_goal)\n\n # if a fatal error was encountered while modifying the budget,\n # return no budget along with the warning list\n if user.warning == 2:\n return json.dumps([None, user.warning_list])\n\n # if a non-fatal warning was encountered in predict_budget() or\n # budget_modifier(), return the budget along with the warning list\n elif user.warning == 1:\n return json.dumps([modified_budget, user.warning_list])\n\n return modified_budget", "def get_debt_state(member, limit_year, limit_month):\n if member.first_payment_year is None:\n # never paid! using registration date to start with\n yearmonths_paid = set()\n year_to_check = member.registration_date.year\n month_to_check = member.registration_date.month\n else:\n # build a set for the year/month of paid quotas\n quotas = Quota.objects.filter(member=member).all()\n yearmonths_paid = {(q.year, q.month) for q in quotas}\n\n year_to_check = member.first_payment_year\n month_to_check = member.first_payment_month\n\n # verify the limit is after member started paying\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n return []\n elif year_to_check > limit_year:\n return []\n\n # build a set of all the year/month the member should have paid up to (including) the limit\n should_have_paid = set()\n while True:\n should_have_paid.add((year_to_check, month_to_check))\n year_to_check, month_to_check = increment_year_month(year_to_check, month_to_check)\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n break\n elif year_to_check > limit_year:\n break\n\n return sorted(should_have_paid - yearmonths_paid)", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def budget_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"budget_name\")", "def get_budgets(budg_path, exp_path, dates=None):\n exp_budg = data_help.read_jsonFile(budg_path)\n exp_data = data_help.read_jsonFile(exp_path)\n if dates == None:\n dates = [util.get_current_month()]\n for date in dates:\n exp_budg_keys = exp_budg.keys()\n if date not in exp_budg_keys: # check for current month to find exp categories\n print(\n f\"I have detected some data with for the month {date} that has no budget set.\")\n print(\n \"Please set the budget for this month.. or delete the data and run the program again.\")\n if len(exp_budg) != 0:\n user_in = util.get_user_input_for_chars(\n \"Would you like to the whole thing (w) or create new (n)? \", ['w', 'n'])\n\n if user_in == 'w':\n key = util.select_dict_key_using_integer(\n exp_budg, \"Please select a budget to copy: \", print_children=True, print_vals=False, \n print_child_vals=True)\n exp_budg[date] = exp_budg[key]\n elif user_in == 'n':\n exp_budg[date] = declare_new_budget(date, exp_data)\n else:\n exp_budg[date] = declare_new_budget(date, exp_data)\n\n print(f\"Your budget is now saved for {date}.\")\n\n else:\n print(f\"Your monthly budget for {date} is: \")\n\n util.print_simple_dict(exp_budg[date], print_vals=True)\n\n data_help.write_to_jsonFile(budg_path, exp_budg)\n return", "def get_budget(self, names=None, zones=None, net=False, pivot=False):\n recarray = _get_budget(\n self._budget, self._zonenamedict, names=names, zones=zones, net=net\n )\n\n if pivot:\n recarray = _pivot_recarray(recarray)\n\n return recarray", "def get_period_budgets(cls, now):\n limits_dict = {}\n strategies = cls.objects_visible.filter(is_distributed_evenly=True)\n strategies = cls.running(strategies)\n\n for strategy in strategies:\n limits_dict[strategy.public_id] = strategy.period_budget(now)\n\n log.info('[SPENDINGS] Period budgets calculated (currency): {0}'.format(limits_dict))\n\n # Cast to budget precision used in Redis\n return {strategy: cast_CPM_to_dbbid(cast_currency_to_CPM(budget)) for strategy, budget in limits_dict.items()}", "def get_budget_from_api(type_of_thing: int, qty: int):\n payload = {\"multiplier\": qty}\n if type_of_thing == 1:\n payload[\"commodity\"] = \"tomatoes\"\n elif type_of_thing == 2:\n payload[\"commodity\"] = \"broiler-chickens\"\n else:\n return None\n\n cache_key = f\"{type_of_thing}-{qty}\"\n\n val_from_cache = cache.get(cache_key)\n if val_from_cache:\n return val_from_cache\n\n json_payload = json.dumps(payload)\n\n r = requests.post(BUDGET_API_URL, json_payload)\n\n if r.status_code == 200:\n result = r.json()['data']\n cache.set(\n cache_key,\n result,\n BUDGET_CACHE_DURATION\n )\n return result\n\n return None", "def get_debt(self):\n sum_import = self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n ).aggregate(Sum(\"amount\"))\n return sum_import.get(\"amount__sum\", None)", "def get_order_limit_data():\n\n chargeDB = ChargeDBHelper()\n order_limit_list = []\n\n rxcui_bundles = chargeDB.get_all_charge_bundles()\n clinic_count = clinic_cnt_for_days(chargeDB.get_days_spanned())\n for bundle in rxcui_bundles:\n order_limit_list.append(to_order_limit_row(bundle, clinic_count))\n\n\n\n chargeDB.close()\n return order_limit_list", "def get_billing_data_by_priority(self):\n result = {}\n product = self.get_first_product_by_priority()\n if product:\n sp = self.subscriptionproduct_set.filter(product=product).first()\n if sp.address:\n result = {\n \"route\": sp.route_id,\n \"order\": sp.order,\n \"address\": sp.address.address_1 or sp.subscription.contact.email,\n \"state\": sp.address.state,\n \"city\": sp.address.city,\n \"name\": self.get_billing_name(),\n }\n if not result:\n if getattr(settings, \"FORCE_DUMMY_MISSING_BILLING_DATA\", False):\n result = {}\n return result", "async def __getFromProjectedBill(self, account, premise, currentBillDate) -> dict:\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(\n URL_RESOURCES_PROJECTED_BILL.format(\n account=account,\n premise=premise,\n lastBillDate=currentBillDate.strftime(\"%m%d%Y\"),\n )\n )\n\n if response.status == 200:\n projectedBillData = (await response.json())[\"data\"]\n\n billToDate = float(projectedBillData[\"billToDate\"])\n projectedBill = float(projectedBillData[\"projectedBill\"])\n dailyAvg = float(projectedBillData[\"dailyAvg\"])\n avgHighTemp = int(projectedBillData[\"avgHighTemp\"])\n\n data[\"bill_to_date\"] = billToDate\n data[\"projected_bill\"] = projectedBill\n data[\"daily_avg\"] = dailyAvg\n data[\"avg_high_temp\"] = avgHighTemp\n\n except Exception as e:\n _LOGGER.error(e)\n\n return data", "def GetAllCostByAmountBandFromDB(lowerLimit, upperLimit):\n\n logs.logger.debug(\"Start to get back all Cost object from database\\\n based on amount band.\")\n try:\n searchedCostByAmountBandFromDB = session.query(\n Cost.Cost).filter(Cost.Cost.amount >= lowerLimit, Cost.Cost.amount <= upperLimit).all()\n logs.logger.info(\n \"Get back all Cost object from database based on amount band.\")\n return [item for item in searchedCostByAmountBandFromDB]\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))", "def getBudget(movieInfo):\n if \"budget\" in movieInfo:\n return int(movieInfo[\"budget\"])\n else:\n raise AttributeError(\"%s instance has no attribute budget\" % movieInfo)", "def budget_balance(self):\n budget_balance = round(self.budget() - self.total_spent(), 2)\n budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places\n return (budget_balance, budget_balance_degree)", "def get(self):\n return {'bills': [bill.json() for bill in BillModel.find_all()]}", "def action_budget_create(self, cr, uid, ids, context=None):\n payment_term_obj = self.pool.get('account.payment.term')\n for porder in self.browse(cr, uid, ids, context=context):\n period = self.pool.get('account.period').find(cr,uid,porder.date_order, context = context)[0] \n result = []\n confirmation_dict={\n 'reference': porder.name,\n 'period_id': period,\n 'partner_id':porder.partner_id.id,\n 'amount': porder.amount_total,\n 'note':'',\n 'date':porder.date_order,\n 'type':'purchase'}\n\n for line in porder.order_line:\n confirmation_ids=[]\n account_id = self._choose_account_from_po_line(cr, uid, line, context=context)\n notes = _(\"Purchase Approval: %s \\nDescription: %s.\\nDate: %s \\nProducts: %s \") % (porder.name , porder.notes , porder.date_order , line.name )\n\n result= payment_term_obj.compute(cr, \n uid, porder.payment_term_id.id, line.price_subtotal,porder.date_order or False, context=context)\n for r in result:\n confirmation_dict.update(\n {'date':r[0],\n 'amount':r[1],\n 'note':notes,\n 'name':'/',\n 'general_account_id': account_id,\n 'account_analytic_id': line.account_analytic_id.id or False,\n })\n confirmation_id = self.pool.get('account.budget.confirmation').create(cr, uid, confirmation_dict)\n confirmation_ids.append(confirmation_id)\n line.write({'confirmation_ids':[(6, 0, confirmation_ids)] ,'state': 'waiting_budget'})\n self.write(cr, uid, ids, {'state': 'waiting_budget'})\n return True", "def budget_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"budget_name\")", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def get_pl_balances(self):\n\n\t\tdimension_fields = ['t1.cost_center']\n\n\t\tself.accounting_dimensions = get_accounting_dimensions()\n\t\tfor dimension in self.accounting_dimensions:\n\t\t\tdimension_fields.append('t1.{0}'.format(dimension))\n\n\t\treturn frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.account, t2.account_currency, {dimension_fields},\n\t\t\t\tsum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) as bal_in_account_currency,\n\t\t\t\tsum(t1.debit) - sum(t1.credit) as bal_in_company_currency\n\t\t\tfrom `tabGL Entry` t1, `tabAccount` t2\n\t\t\twhere t1.is_cancelled = 0 and t1.account = t2.name and t2.report_type = 'Profit and Loss'\n\t\t\tand t2.docstatus < 2 and t2.company = %s\n\t\t\tand t1.posting_date between %s and %s\n\t\t\tgroup by t1.account, {dimension_fields}\n\t\t\"\"\".format(dimension_fields = ', '.join(dimension_fields)), (self.company, self.get(\"year_start_date\"), self.posting_date), as_dict=1)", "def get_display_price_data(self, source, commitment):\n overage, included = self.get_price_data(source, commitment)\n if self.name == settings.BILLING_DEFAULT_PLAN_NAME:\n included = OFFICIAL_BUILDER_LIMITS[source]\n return overage, included", "def get_budget_fixture(thing_being_farmed: int):\n if thing_being_farmed == 1:\n path = os.path.join(BASE_DIR, 'fixtures', 'tomato-budget.json')\n elif thing_being_farmed == 2:\n path = os.path.join(BASE_DIR, 'fixtures', 'broiler-budget.json')\n else:\n return None\n\n with open(path, 'r') as budget_fixture:\n budget = json.load(budget_fixture)\n\n return budget", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")", "def get_bill_details(request):\n\n print request\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n customer_billing = data[telephone_number]['last_month_billing']\n print customer_billing\n\n customer_type = data[telephone_number]['type_customer']\n if customer_type == 'postpaid':\n\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \" Customer and currently using \" + data[telephone_number]['plan_details'] + \" plan type.\"\n if customer_billing['roaming'] == 'True':\n reply += \"You had used your cellphone while on roaming for which you were charged extra.\"\n elif customer_billing['data_exhaust'] == 'True':\n reply += \"You had used your data network after your allocated limit was exhausted. You were charged for these services\"\n elif customer_billing['subscribed'] == 'True':\n reply += \"You had subscribed to some promotional services for which you were charged in extra.\"\n else:\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \". We believe that this might be a mistake from our side and would like you to speak to our customer care executives separately.\"\n\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n\n print reply\n\n context['bill_details'] = reply\n\n return context", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n all_updates_rule: Optional[pulumi.Input[pulumi.InputType['BudgetAllUpdatesRuleArgs']]] = None,\n amount: Optional[pulumi.Input[pulumi.InputType['BudgetAmountArgs']]] = None,\n billing_account: Optional[pulumi.Input[str]] = None,\n budget_filter: Optional[pulumi.Input[pulumi.InputType['BudgetBudgetFilterArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n threshold_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BudgetThresholdRuleArgs']]]]] = None) -> 'Budget':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BudgetState.__new__(_BudgetState)\n\n __props__.__dict__[\"all_updates_rule\"] = all_updates_rule\n __props__.__dict__[\"amount\"] = amount\n __props__.__dict__[\"billing_account\"] = billing_account\n __props__.__dict__[\"budget_filter\"] = budget_filter\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"threshold_rules\"] = threshold_rules\n return Budget(resource_name, opts=opts, __props__=__props__)", "def _fetch_bills(self, options):\n bill_count = options['max'] or fetch.DEFAULT_BILL_COUNT\n return fetch.bills(per_page=bill_count)", "def create_budget(budget_category: BudgetCategory) -> Budget:\n amount = -1\n while amount <= 0:\n amount = float(input(f'Enter {budget_category.value} budget: '))\n if amount <= 0:\n print('Budget amount must be greater than 0! Please enter '\n 'again!')\n return Budget(budget_category, amount)", "def get_claim_bill(\n _db: Session = db, *, start: datetime = None, end: datetime = None,\n store_internal_id: int = None, owner_id: int = None,\n kind: models.ClaimKind = None, calculation: CalcType = \"avg\"\n):\n query = crud.claim.get_query(_db)\n if start and end:\n query = query.filter(models.Claim.created_at.between(start, end))\n if store_internal_id:\n query = query.filter(models.Claim.store_internal_id == store_internal_id)\n if owner_id:\n query = query.filter(models.Claim.owner_id == owner_id)\n if kind:\n query = query.filter(models.Claim.kind == kind)\n bill = query.with_entities(getattr(func, calculation)(models.Claim.bill).label(\"_bill\")).first()\n bill_round = round(bill[0], 2) if bill[0] is not None else 0.00\n return bill_round", "def declare_new_budget(date, exp_data):\n\n exp_list = exp_data[env.EXPENSE_DATA_KEY]\n local_budget = {}\n month_total = util.get_float_input(\n f\"Please input your total for the month ending {date}: \", force_pos=True)\n budg_remaining = month_total\n\n for i, exp in enumerate(exp_list):\n if i == len(exp_list) - 1:\n print(\"I got the last one for you :) MATH!\")\n budg_amnt = budg_remaining\n budg_remaining = 0\n\n elif budg_remaining == 0: # elif skips this condition if budget remaining is set above\n budg_amnt = 0\n local_budget[env.BUDGET_TOTAL_KEY] = month_total\n else:\n prompt = f\"Enter your budget for: [{exp}] - Total Budget Re. ${budg_remaining} - Exp's Re. [{len(exp_list) - i - 1}]: \"\n budg_amnt = prompt_for_budget_amnt(\n prompt, budg_remaining, exp_data)\n local_budget.update({exp: budg_amnt})\n budg_remaining = round(month_total - sum_budget(local_budget), 2)\n print(local_budget)\n return local_budget", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n all_updates_rule: Optional[pulumi.Input[pulumi.InputType['BudgetAllUpdatesRuleArgs']]] = None,\n amount: Optional[pulumi.Input[pulumi.InputType['BudgetAmountArgs']]] = None,\n billing_account: Optional[pulumi.Input[str]] = None,\n budget_filter: Optional[pulumi.Input[pulumi.InputType['BudgetBudgetFilterArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n threshold_rules: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['BudgetThresholdRuleArgs']]]]] = None) -> 'Budget':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"all_updates_rule\"] = all_updates_rule\n __props__[\"amount\"] = amount\n __props__[\"billing_account\"] = billing_account\n __props__[\"budget_filter\"] = budget_filter\n __props__[\"display_name\"] = display_name\n __props__[\"name\"] = name\n __props__[\"threshold_rules\"] = threshold_rules\n return Budget(resource_name, opts=opts, __props__=__props__)", "def get_bankcard_obj(self):\n kwargs = {\n 'card_number': self.cleaned_data['number'],\n 'expiry_date': self.cleaned_data['expiry_month'].strftime(\"%m/%y\"),\n 'ccv': self.cleaned_data['ccv_number'],\n }\n if self.cleaned_data['start_month']:\n kwargs['start_date'] = self.cleaned_data['start_month'].strftime(\"%m/%y\")\n return Bankcard(**kwargs)", "def budget(config, tag):\n\tif tag in config[\"budget\"].keys():\n\t\tprint(tag)\n\telse:\n\t\tprint(config[\"budget\"].keys())", "def get(self, args):\n return Payment.query.offset(args['offset']).limit(args['limit'])", "def budgetFilter(budget):\n # Query the database for all the movies\n movies = Movies.query.order_by(db.asc(Movies.budget)).filter(Movies.budget>=budget).limit(100).all()\n\n # Serialize the list of movies from our data\n movies_schema = MoviesSchema(many=True)\n data = movies_schema.dump(movies)\n return data", "def get_budget_by_name(self, budget_name):\n return next((budget for budget in self.budgets\n if budget.name.lower() == budget_name.lower()), None)", "def month():\n \n # get month entered by user - if no month entered default to current month\n month = request.args.get(\"month\", datetime.now().strftime(\"%Y-%m\"))\n \n # get budget data for month as a dictionary\n data = budget_data(month)\n \n return json.dumps(data)", "def handle(self):\n self.validate()\n if self.errors:\n raise InvalidDataException(self.errors)\n\n phone_number = self.data.get('phone_number')\n month = self.data.get('month')\n year = self.data.get('year')\n\n # if a period was not informed, get the current last one\n if not month and not year:\n year, month = last_period()\n\n bill_data = Bill.data_by_number_period(phone_number, month, year)\n return bill_data", "def get_config(self, budget):\n\t\traise NotImplementedError('This function needs to be overwritten in %s.'%(self.__class__.__name__))", "def _get_fwl_billing_item(self, firewall_id, dedicated=False):\r\n mask = ('mask[id,billingItem[id]]')\r\n if dedicated:\r\n fwl_svc = self.client['Network_Vlan_Firewall']\r\n else:\r\n fwl_svc = self.client['Network_Component_Firewall']\r\n return fwl_svc.getObject(id=firewall_id, mask=mask)", "def get_transfer_bid(self):\n api_uri = self._uri_dict.get('getTransferBid')\n data = {}\n r_data = self._post(api_uri, data)\n return r_data", "def get_bank(self):\n return self._bank", "def get_spend_by_campaign_this_month(self, account_id):\n try:\n account = DependentAccount.objects.get(id=account_id)\n except DependentAccount.DoesNotExist:\n return\n\n client = get_client()\n client.client_customer_id = account.dependent_account_id\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'THIS_MONTH',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n in_use_ids = []\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n in_use_ids.append(campaign_row['campaign_id'])\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=account)\n # Update campaign name\n campaign.campaign_name = campaign_row['campaign']\n # This is the cost for this month\n cost = int(campaign_row['cost']) / 1000000\n campaign.campaign_cost = cost\n campaign.save()\n print('Campaign: ' + str(campaign) + ' now has a spend this month of $' + str(campaign.campaign_cost))\n\n today = datetime.datetime.today()\n\n if today.day != 1:\n yesterday = datetime.datetime.now() - datetime.timedelta(1)\n first_day_of_month = datetime.datetime(yesterday.year, yesterday.month, 1)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = first_day_of_month\n end_date = yesterday\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_yest_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_yest_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=account)\n campaign.campaign_name = campaign_row['campaign']\n # This is the cost for this month until yesterday\n spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign.spend_until_yesterday = spend_until_yesterday\n campaign.save()\n print(\n 'Campaign: ' + str(campaign) + ' has spend until yesterday of $' + str(campaign.spend_until_yesterday))\n\n return 'get_spend_by_campaign_this_month'", "def test_companies_company_id_data_bill_credit_notes_get(self):\n pass", "def billing_info_list(request):\n if request.method == 'GET':\n billing_infos = BillingInfo.objects.all()\n serializer = BillingInfoSerializer(billing_infos, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = BillingInfoSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "async def fetch_balance(self, params={}):\n await self.load_markets()\n request = {\n 'currency': 'all',\n }\n response = await self.privateGetUserMargin(self.extend(request, params))\n #\n # [\n # {\n # \"account\":1455728,\n # \"currency\":\"XBt\",\n # \"riskLimit\":1000000000000,\n # \"prevState\":\"\",\n # \"state\":\"\",\n # \"action\":\"\",\n # \"amount\":263542,\n # \"pendingCredit\":0,\n # \"pendingDebit\":0,\n # \"confirmedDebit\":0,\n # \"prevRealisedPnl\":0,\n # \"prevUnrealisedPnl\":0,\n # \"grossComm\":0,\n # \"grossOpenCost\":0,\n # \"grossOpenPremium\":0,\n # \"grossExecCost\":0,\n # \"grossMarkValue\":0,\n # \"riskValue\":0,\n # \"taxableMargin\":0,\n # \"initMargin\":0,\n # \"maintMargin\":0,\n # \"sessionMargin\":0,\n # \"targetExcessMargin\":0,\n # \"varMargin\":0,\n # \"realisedPnl\":0,\n # \"unrealisedPnl\":0,\n # \"indicativeTax\":0,\n # \"unrealisedProfit\":0,\n # \"syntheticMargin\":null,\n # \"walletBalance\":263542,\n # \"marginBalance\":263542,\n # \"marginBalancePcnt\":1,\n # \"marginLeverage\":0,\n # \"marginUsedPcnt\":0,\n # \"excessMargin\":263542,\n # \"excessMarginPcnt\":1,\n # \"availableMargin\":263542,\n # \"withdrawableMargin\":263542,\n # \"timestamp\":\"2020-08-03T12:01:01.246Z\",\n # \"grossLastValue\":0,\n # \"commission\":null\n # }\n # ]\n #\n return self.parse_balance(response)", "def get_customer_balance_sheet(self):\n total = 0\n taxes = 0\n balances = 0\n un_paid_count = 0\n conflicts = 0\n unresolved_conflicts = 0\n projected_before_tax = 0\n\n invoice_list = Customer_Invoice.objects.all()\n count = len(invoice_list)\n for invoice in invoice_list:\n if invoice.invoice_quote.total_price_quoted:\n total += invoice.invoice_quote.total_price_quoted\n taxes += invoice.invoice_quote.tax_on_quote\n balances += invoice.get_balance_due()\n else:\n projected = invoice.get_cost()\n projected_before_tax += projected[1]\n if not invoice.paid_in_full:\n un_paid_count += 1\n for conflict in invoice.conflict.all():\n conflicts += 1\n if not conflict.conflict_resolution:\n unresolved_conflicts += 1\n profit = total - taxes\n\n return total, taxes, profit, balances, count, conflicts, unresolved_conflicts, projected_before_tax", "def default_billing(self):\n return self._default_billing", "def get_budget(\n self, f=None, names=None, zones=None, net=False, pivot=False\n ):\n aliases = None\n if self._zon is not None:\n aliases = self._zon.aliases\n\n if f is None and self._recarray is None:\n f = os.path.join(self._model_ws, f\"{self._name}.csv\")\n self._recarray = _read_zb_csv2(\n f, add_prefix=False, aliases=aliases\n )\n elif f is None:\n pass\n else:\n self._recarray = _read_zb_csv2(\n f, add_prefix=False, aliases=aliases\n )\n\n recarray = _get_budget(\n self._recarray,\n self._zon._zonenamedict,\n names=names,\n zones=zones,\n net=net,\n )\n\n if pivot:\n recarray = _pivot_recarray(recarray)\n\n return recarray", "def get_billing_document(self):\n if self.rut:\n return self.rut\n elif self.billing_id_doc:\n return self.billing_id_doc\n else:\n return self.contact.id_document", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def get_transactions_for_budget(self, budget_name=None):\n if len(self.budgets) > 1 and budget_name is None:\n self._logger.error('There are multiple budgets and no budget name was provided')\n raise MultipleBudgets\n if budget_name is None:\n self._logger.debug('No budget name provided returning the only budget registered')\n budget = self.budgets[0]\n else:\n self._logger.debug('Trying to retrieve budget with name \"%s\"', budget_name)\n budget = next((budget for budget in self.budgets\n if budget.name.lower() == budget_name.lower()), None)\n if not budget:\n return []\n return [YnabServerTransaction(transaction, transaction.account)\n for transaction in budget.transactions]", "def retrieve(self, request, *args, **kwargs):\n return super(BalanceBillsViewSet, self).retrieve(\n request,\n *args,\n **kwargs\n )", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"billing_account\")", "def test_companies_company_id_data_bank_accounts_account_id_transactions_get(self):\n pass", "def GetAllCostByDateOfPaymentBandFromDB(startDate, endDate):\n\n logs.logger.debug(\n \"Start to get back all Cost object from database \"\n \"based on payment date band.\")\n try:\n searchedCostByDateOfPaymentBandFromDB = session.query(\n Cost.Cost).filter(Cost.Cost.dateOfPayment >= startDate, Cost.Cost.dateOfPayment <= endDate).all()\n logs.logger.info(\n \"Get back all Cost object from database \"\n \"based on payment date band.\")\n return [item for item in searchedCostByDateOfPaymentBandFromDB]\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def balance(self):\n balance = {'A': 0, 'B': 0}\n account_balances = AccountBalance.objects.filter(\n organization_id=self.id,\n expiration_date=None\n ).order_by('-id')\n\n credit_class = CreditClass.objects.filter(credit_class=\"A\").first()\n for account_balance in account_balances:\n if account_balance.credit_class_id == credit_class.id:\n balance['A'] = account_balance.balance\n else:\n balance['B'] = account_balance.balance\n return balance", "def test_retrieve_all_by_bank(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"\",\n interaction=\"Bank\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n # Gets 288 entries back, because one for each of the 144\n # cards, plus the unconditioned version of each\n self.assertEquals(len(card_stats), 288)\n\n self.assertEquals(card_stats[0]['card_name'], 'Adventurer')\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def test_get_virtual_account_beneficiary(self):\n pass", "def budget_filter(self) -> pulumi.Output['outputs.BudgetBudgetFilter']:\n return pulumi.get(self, \"budget_filter\")", "def getBudgetBalance(self, budgetName):\r\n assert budgetName in self.budgets, \"Specified budget doesn't exist\"\r\n return \"%.2f\" % float(self.budgets[budgetName])", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")", "def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def get_campaign_cost(self, id):\n logger.info(\"Function call: get_campaign_cost: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/cost'.format(id)))", "def test_get_virtual_account_beneficiaries(self):\n pass", "def get_amount():\n conn = None\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n\n #tas\n raw={}\n raw['plan']={}\n raw['actual']={}\n\n cur.execute(\"SELECT slc.*,plc.amount as amount_plan,ppm.amount as price \\\n ,to_char((slc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_actual \\\n , to_char((plc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_plan \\\n ,aco.amount_actual_coal \\\n FROM vw_tta_cps_sum_land_clearing_now AS slc \\\n LEFT JOIN vw_tta_cps_price_per_month AS ppm \\\n ON slc.int_month=ppm.month \\\n AND slc.year_id=ppm.year_id \\\n LEFT JOIN vw_tta_cps_plan_land_clearing AS plc \\\n ON slc.periode_month_id=plc.periode_month_id \\\n AND slc.land_clearing_location_id=plc.land_clearing_location_id \\\n LEFT JOIN vw_tta_cps_sr_all_per_month_plan_actual as aco \\\n ON aco.int_month=slc.int_month \\\n WHERE ppm.jenis_price=10 \\\n AND aco.plan_id=1\")\n rowcount=cur.rowcount\n print(\"The number of row: \", rowcount)\n row = cur.fetchone()\n counter=0\n item={}\n\n if rowcount>0:\n f=open('../../data_cost_per_ton.csv','w')\n f.write('AREA,PLAN,ACTUAL\\n')\n while row is not None:\n #print(row)\n raw[\"plan\"]=row[11]\n raw[\"actual\"]=row[10]\n f.write(str('RAW')+','+str(raw[\"plan\"])+','+str(raw[\"actual\"])+\"\\n\")\n\n row = cur.fetchone()\n\n if rowcount>0:\n f.close()\n cur.close()\n\n print(str(datetime.datetime.now())+' '+str(rowcount)+' row updated')\n except (Exception, psycopg2.DatabaseError) as error:\n print(str(datetime.datetime.now())+' '+str(error))\n finally:\n if conn is not None:\n conn.close()", "def get_all_spend_by_campaign_custom(self):\n budgets = Budget.objects.filter(has_adwords=True, is_monthly=False)\n for budget in budgets:\n for aw_account in budget.account.adwords.all():\n if settings.DEBUG:\n get_spend_by_campaign_custom(budget.id, aw_account.id)\n else:\n get_spend_by_campaign_custom.delay(budget.id, aw_account.id)\n\n return 'get_all_spend_by_campaign_custom'", "def sum_budget(monthly_budget: dict):\n sum = 0\n for k, v in monthly_budget.items():\n if v is not None and k != env.BUDGET_TOTAL_KEY:\n sum += v\n\n return sum", "def get_entity_contracts():\n url = 'http://www.base.gov.pt/base2/rest/contratos?' \\\n 'adjudicatariaid=%d' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-1000000'})\n return json.loads(response.text)", "def FundingInfo(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('funding_info', default)\n return [HEP.FundingObject(i) for i in tmp]", "def complete(self, cr, uid, ids, context={}):\n budget_pool = self.pool.get('account.budget')\n budget_line_pool = self.pool.get('account.budget.lines')\n for r in self.browse(cr, uid, ids, context=context):\n if r.type=='transfer' and not r.line_ids:\n raise osv.except_osv(_('Error!'),_('You cannot complete Transfer Operations without any Budget line.'))\n if r.budget_type=='cash':\n budget_ids = budget_pool.search(cr, uid,[('analytic_account_id', '=', r.analytic_account_id.id), \n ('period_id', '=', r.period_id.id)], context=context)\n budget_line_id = budget_line_pool.search(cr, uid,[('general_account_id', '=', r.account_id.id), \n ('account_budget_id', 'in', tuple(budget_ids))], context=context)\n if budget_line_id:\n line=budget_line_pool.browse(cr, uid, budget_line_id, context=context)[0]\n if line.planned_amount+line.total_operation < line.cash_total_operation + r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( line.cash_total_operation+ r.amount,line.planned_amount+line.total_operation ,))\n if line.cash_residual_balance + r.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (r.amount, line.name, line.cash_residual_balance,))\n for e in r.line_ids:\n if line.planned_amount+line.total_operation < line.cash_total_operation - r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( e.cash_total_operation- r.amount,line.planned_amount+line.total_operation ,))\n if e.line_id.cash_residual_balance - e.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (e.amount, e.line_id.name, e.line_id.cash_residual_balance,))\n return self.write(cr, uid, ids,{'state':'complete','name': r.name == '/' and \n self.pool.get('ir.sequence').get(cr, uid, 'account.budget.operation') or \n r.name, 'amount': r.type=='increase' and r.amount or sum([l.amount for l in r.line_ids])}, context=context)\n \n return super(account_budget_operation, self).complete(cr, uid, ids, context=context)", "def get_business(api_key, business_id):\n\n business_path = BUSINESS_PATH + business_id\n\n\n return request(API_HOST, business_path, api_key)", "def test_companies_company_id_data_tax_rates_get(self):\n pass", "def bcp_get(self, **kwargs):\n pass", "def get_pending_orders(self):\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'pending', ''), auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def billing(self, account_id):\n from pureport_client.commands.accounts.billing import Command\n return Command(self.client, account_id)", "def add_budget(self, budget: Budget) -> None:\n self.budgets[budget.category] = budget", "def get_list() -> List[BankDetails]:\n from paynlsdk.client.transaction import Transaction\n return Transaction.get_banks().banks", "def get_broilers_budget(chickens: int = 1):\n budget = get_budget_fixture(thing_being_farmed=2)\n\n for i, segment in enumerate(budget['segments']):\n for i2, activity in enumerate(segment['activities']):\n for i3, item in enumerate(activity['inputs']):\n new_price = chickens * item['price']\n item['estimated_price'] = new_price\n item['price'] = new_price\n\n return budget", "def ListUserBillingCycles(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def payback_form_data(self):\n return {\n 'date': date.today(),\n 'who_from': 'Georgie',\n 'who_to': 'Tristan',\n 'amount': 50,\n 'currency': 'GBP',\n 'method': 'Bank_transfer'\n }" ]
[ "0.66888595", "0.6544355", "0.64032984", "0.6293646", "0.62567914", "0.625594", "0.62317514", "0.6158417", "0.6107173", "0.60221004", "0.59583", "0.586195", "0.58142555", "0.57493407", "0.56994355", "0.5688151", "0.5630534", "0.5616468", "0.56075585", "0.5590992", "0.5573143", "0.55689305", "0.5564759", "0.5552304", "0.5542231", "0.5503946", "0.5483847", "0.54810107", "0.5463703", "0.5454207", "0.54327214", "0.5428958", "0.5427901", "0.5408684", "0.54038906", "0.53833085", "0.5379731", "0.5376678", "0.5376678", "0.5376678", "0.5372952", "0.5372906", "0.5368391", "0.5355997", "0.53545624", "0.5341898", "0.5340229", "0.533204", "0.5324086", "0.5323478", "0.5316224", "0.5310702", "0.53087777", "0.5307988", "0.53072894", "0.5307256", "0.53048205", "0.5294596", "0.52943766", "0.52919644", "0.52812916", "0.52805215", "0.5272894", "0.52713424", "0.5268449", "0.52536476", "0.5226082", "0.5223371", "0.52058595", "0.5205518", "0.5205518", "0.520527", "0.52027977", "0.5192338", "0.51843125", "0.51392907", "0.51351047", "0.51324564", "0.5121087", "0.5121087", "0.5114291", "0.5113227", "0.5111747", "0.5104885", "0.50985074", "0.50979", "0.5093064", "0.5085721", "0.50799024", "0.5077532", "0.5069483", "0.50647444", "0.5062828", "0.5062258", "0.50568414", "0.50560707", "0.5049608", "0.5045234", "0.5042542", "0.5039526" ]
0.7448803
0
get data from appliance usage
async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict: _LOGGER.info("Getting appliance usage data") JSON = {"startDate": str(lastBilledDate.strftime("%m%d%Y"))} data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.post( URL_APPLIANCE_USAGE.format(account=account), json=JSON ) if response.status == 200: electric = (await response.json())["data"]["electric"] full = 100 for e in electric: rr = round(float(e["percentageDollar"])) if rr < full: full = full - rr else: rr = full data[e["category"].replace(" ", "_")] = rr except Exception as e: _LOGGER.error(e) return {"energy_percent_by_applicance": data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/ApiUsage/{applicationId}/\"))", "def get_usage_data(username, password):\n usage_req = XfinityUsage(username, password, browser_name=\"firefox-headless\")\n return usage_req.run()", "def test_getusage(self):\n ret = {\"message\": \"No Random.org api key or api version found.\", \"res\": False}\n self.assertDictEqual(random_org.getUsage(), ret)\n\n self.assertDictEqual(\n random_org.getUsage(api_key=\"peW\", api_version=\"1\"),\n {\n \"bitsLeft\": None,\n \"requestsLeft\": None,\n \"res\": True,\n \"totalBits\": None,\n \"totalRequests\": None,\n },\n )", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def retr_devices_by_app( app ) :\n\n\t\t\t_logger.info( '...retr_devices_by_app...' )\n\t\t\toutput = []\n\t\t\ttry :\n\t\t\t\tdb = mongo.db.auth_devices\n\t\t\t\tfor device in db.find( { 'app_tags' : app } ) :\n\t\t\t\t\toutput.append({'moniker' : device['moniker'] ,\n\t\t\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t\t\t 'enlisted' : device['enlisted'] ,\n\t\t\t\t\t\t\t\t 'last_kown_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t\t\t 'engaged' : device['engaged'] ,\n\t\t\t\t\t\t\t\t 'canononical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t\t\t 'scope' : device['scope'] ,\n\t\t\t\t\t\t\t\t 'segment' : device['segment']\n\t\t\t\t\t})\n\t\t\texcept Exception as e :\n\t\t\t\t _logger.error( '...retr_devices_by_app %s' % e.message )\n\t\t\treturn jsonify({'result' : output})", "def get_data():\n pass", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def usage(self, host):", "def getInfo():", "def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))", "def get_apk(self):", "def get_discovery_summary():\n pass", "def usage_information(self):\n return self._usage_information", "def gather_metric(self):\n result = self._shell.run(self.ADB_COMMAND)\n stdout = result.stdout.splitlines()\n adb_version = stdout[0].split()[-1]\n # Revision information will always be in next line\n adb_revision = stdout[1].split()[1]\n\n response = {\n self.ADB_VERSION: adb_version,\n self.ADB_REVISION: adb_revision\n }\n return response", "def device_overview(self):\r\n data = {}\r\n\r\n # GET DATA\r\n token = request.headers.get('token')\r\n userid = request.headers.get('userid')\r\n vessel_id = request.args.get('vessel_id')\r\n epoch_format = request.args.get('format')\r\n\r\n # CHECK TOKEN\r\n if not self.validate_token(token, userid):\r\n data['alert'] = \"Invalid Token\"\r\n data['status'] = 'Failed'\r\n return self.return_data(data)\r\n\r\n alarm_types = self.get_alarm_types()\r\n\r\n ats = self.get_alarm_trigger()\r\n\r\n devices = self.couch_query.get_all_devices(vessel_id)\r\n\r\n standard_time = self.epoch_day(time.time())\r\n\r\n epoch_time = time.time()\r\n\r\n temp_data = []\r\n\r\n start_date = self.get_start_date(epoch_format)\r\n\r\n if not start_date and epoch_format not in [\"day\", \"hours\"]:\r\n\r\n data['alert'] = \"Invalid format!\"\r\n data['status'] = 'Failed'\r\n\r\n return self.return_data(data)\r\n\r\n for device in devices:\r\n\r\n if device['doc']['device'] in ['PARAMETERS', 'NTWCONF', 'NTWPERF1']:\r\n\r\n continue\r\n\r\n row = {}\r\n row['device'] = device['doc']['device']\r\n row['name'] = device['doc']['device']\r\n row['Alert'] = 0\r\n row['Critical'] = 0\r\n row['Warning'] = 0\r\n row['Info'] = 0\r\n row['Debug'] = 0\r\n for atrigger in ats:\r\n\r\n trigger_type = self.get_alarm_type_name(alarm_types, atrigger['alarm_type_id'])\r\n\r\n at_id = atrigger['alarm_trigger_id']\r\n device_id = device['id']\r\n\r\n datas = self.calc.calculate_trigger([at_id], standard_time,\r\n epoch_time, vessel_id=vessel_id,\r\n device_id=device_id)\r\n\r\n if not datas == \"No Alarm Trigger found.\":\r\n\r\n datas_index_0 = datas[0]\r\n len_datas = datas_index_0['results']\r\n if len_datas:\r\n\r\n row[trigger_type] = 1\r\n\r\n if epoch_format in ['week', 'month', \"quarter\", 'annual']:\r\n\r\n sql_str = \"SELECT COUNT(alarm_trigger_id) FROM alarm_data \"\r\n sql_str += \"WHERE device_id='{0}' \".format(device_id)\r\n sql_str += \"AND epoch_date > {0} \".format(start_date)\r\n sql_str += \"AND epoch_date < {0}\".format(epoch_time)\r\n\r\n res = self.postgres.query_fetch_one(sql_str)\r\n\r\n row[trigger_type] = row[trigger_type] + res['count']\r\n\r\n temp_data.append(row)\r\n\r\n final_data = {}\r\n final_data['data'] = temp_data\r\n final_data['status'] = 'ok'\r\n\r\n return self.return_data(final_data)", "def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices", "def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data", "def get_data():\n return", "def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list", "def get_data(self):", "def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def user_sends_get_call_to_the_devices():\n web_app.list_devices()", "def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200", "def retrieve_dial_data(app_name):\n # NOTE: the reference code store the file in the application folder, we read the file in our data folder\n # perhaps other changes will be needed to allow USE_ADDITIONAL_DATA feature to work\n file_path = 'dial_data/' + app_name + '.json'\n if not fileops.file_exists(file_path):\n return {}\n data = fileops.load_file_def(file_path)\n return json.loads(data)", "def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def get_usage_data(self):\n with self._lock:\n data_copy = self._data.copy()\n return data_copy", "def get_listing_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #get the show from the database\n show = Show.find_show_with_guidebox_id(guidebox_id)\n\n #get show title from Guidebox so it can be used in the OnConnect title search url \n show_title = str(show.title)\n\n #get OnConnect seriesId\n series_id = onconnect_search_series_id(show_title)\n\n #obtaining listing information for a 24 hour period from the current time\n airings = onconnect_search_airings(series_id)\n\n return jsonify(airings)", "def available(self, app):\n return self.xlist(\"testfor\", app)[0]", "def GetData(self):\r\n if self.Error == False:\r\n Extra = {}\r\n try:\r\n result = {}\r\n temp = self.ScrapeMainWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters1Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters2Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeStatusWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n sqlArray = {}\r\n sqlArray[self.deviceDescr] = {}\r\n sqlArray[self.deviceDescr][self.devNumber] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"General\"] = result\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"][\"ExtractTime\"] = time.time()\r\n sqlArray[\"ReadError\"] = False \r\n return sqlArray\r\n \r\n except Exception as e: \r\n self.log.printError(\"ERROR in Retreiving Seatel VSAT Data,%s Module Error\" % sys._getframe().f_code.co_name) \r\n self.log.printError( str(e))\r\n self.Error = True\r\n Extra[\"ReadError\"] = True\r\n return Extra\r\n else:\r\n self.log.printWarning(\"%s skipped due to previous failure\" % sys._getframe().f_code.co_name)\r\n return None", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def get_usage(self):\r\n return self.box_usage", "async def get_device_data(self):\n pass", "def getatt(self):\n err = True\n retry = 0\n while err and retry<2:\n try:\n r = requests.get(self.url+'ATT?\\n')\n res = float(r.text)\n err = False\n except:\n err = True\n time.sleep(1)\n retry += 1\n res = nan\n return res", "def get_data(self):\n pass", "def get_data(self):\n pass", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def _get_dapall_from_api(self):\n\n url = marvin.config.urlmap['api']['dapall']['url']\n\n url_full = url.format(name=self.plateifu,\n bintype=self.bintype.name,\n template=self.template.name)\n\n try:\n response = self._toolInteraction(url_full)\n except Exception as ee:\n raise MarvinError('found a problem while getting DAPall: {0}'.format(str(ee)))\n\n if response.results['error'] is not None:\n raise MarvinError('found a problem while getting DAPall: {}'\n .format(str(response.results['error'])))\n\n data = response.getData()\n\n return data['dapall_data']", "def get_usage(self, start=None, end=None):\n return self.manager.get_usage(self, start=start, end=end)", "def usage():\n return _usage", "def summary(app):\n click.echo(get_summary(app))", "def test_duo_application_get(self):\n pass", "def get_data(self):\r\n pass", "def usage(where=sys.stdout):\n print('Gather Holding IDs via REST using a list of MMS IDs.',\n file=where) \n print('Usage:', file=where)\n print(' west2_gather_Holding_IDs.py <file.txt> <APIKEY>', file=where)\n print('Where:', file=where)\n print(' file.txt List of MSS IDs (one / line)',\n file=where)\n print(' APIKEY API key for accessing Alma REST APIs',\n file=where)\n print('Output:', file=where)\n print(' Generates a datestamped text file: holding-and-mss-ids.<date>.txt',\n file=where)\n print(' consisting of lines holding_id<tab>mms_id', file=where)", "def get_appliances(self):\n if self.appliances is not None:\n return self.appliances\n self.appliances = Appliance.objects.filter(user=self.request.user)\n return self.appliances", "def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))", "def get_usage(self):\n return self.box_usage", "def AcquiredData (self) :\n\t\treturn self.run(\"AcquiredData\")", "def usage():\r\n print 'Usage: collect_logs.py testrun \"env1, env2,...\" \"oscounters, applogs, gclogs, traces\"'\r\n print \"testrun = Name of the test execution e.g. 12032713\"\r\n print \"environments = Name of the environment defined in the environments.ini file\"\r\n print \"logtypes = List types of logfiles to collect\"", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def getStockData():\n pass", "def extract_programs():\n if settings.XPRO_CATALOG_API_URL:\n return requests.get(settings.XPRO_CATALOG_API_URL, timeout=20).json()\n return []", "def data():\n return volumes_fetchers.get_json_data()", "def get_antivirus_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><anti-virus><upgrade><info></info></upgrade></anti-virus></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mock_datasource_usages(self):\n account1 = self.test_data.accounts[0]\n meter = account1.meters[0]\n usage = meter.usages[0]\n self.assertIsInstance(usage, Usage)\n self.assertEqual(usage.PK, 6)\n self.assertEqual(usage.UsageActualName, \"test_usage\")\n self.assertEqual(usage.UsageAmount, Decimal(50.0))\n self.assertEqual(usage.RateComponent, \"test_rate_component\")\n self.assertEqual(usage.EnergyUnit, \"test_energy_unit\")\n self.assertEqual(usage.IntervalStart, date(2016, 1, 1))\n self.assertEqual(usage.IntervalEnd, date(2016, 2, 1))", "def get_data_qos(args):\n diff_data(args, \"qos\")", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def get_device_info(target_project_arn):\n try:\n device_info = device_farm.list_devices(\n arn=target_project_arn,\n filters=[\n {\n \"attribute\": \"PLATFORM\",\n \"operator\": \"EQUALS\",\n \"values\": ['ANDROID', ]\n },\n {\n \"attribute\": \"OS_VERSION\",\n \"operator\": \"GREATER_THAN_OR_EQUALS\",\n \"values\": ['9', ]\n },\n {\n \"attribute\": \"MANUFACTURER\",\n \"operator\": \"EQUALS\",\n \"values\": ['Google', ]\n },\n {\n \"attribute\": \"AVAILABILITY\",\n \"operator\": \"EQUALS\",\n \"values\": ['HIGHLY_AVAILABLE', ]\n },\n {\n \"attribute\": \"FLEET_TYPE\",\n \"operator\": \"EQUALS\",\n \"values\": ['PUBLIC', ]\n }\n ])['devices']\n\n if device_info is not None:\n device_arn = device_info[0]['arn']\n device_name = device_info[0]['name']\n device_manufacture = device_info[0]['manufacturer']\n device_model = device_info[0]['model']\n device_model_id = device_info[0]['modelId']\n device_type = device_info[0]['formFactor']\n device_platform = device_info[0]['platform']\n device_os = device_info[0]['os']\n device_visibility = device_info[0]['fleetType']\n device_availability = device_info[0]['availability']\n\n print('Device Name - {} with Manufacture {}, model {}, modelId {} & type {}'.format(\n device_name,\n device_manufacture,\n device_model,\n device_model_id,\n device_type\n )\n )\n print('Device Platform {} with OS {}, visibility {} & availability - {} '.format(\n device_platform,\n device_os,\n device_visibility,\n device_availability\n )\n )\n\n if device_availability == TARGET_AVAILABILITY:\n print('AWS setup is complete')\n else:\n print('Problem, device is not available')\n else:\n print('Problem finding device info')\n\n except IndexError:\n print('Problem finding device from pool {}'.format(device_info))", "def access():", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def main():\n results = []\n results.extend(check_mounts())\n results.extend(diskusage())\n return results", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()", "def get_app(self, app_name, ns_name):\n\n status, _ = self.helm_client.status(app_name, namespace=ns_name)\n values, _ = self.helm_client.get_values(app_name, namespace=ns_name)\n release_data = {\"status\": status, \"values\": values}\n\n schema_path = Path(\"%s/%s/values.schema.json\"\n % (self._get_ns_dir(ns_name), app_name))\n if schema_path.exists():\n schema = json.loads(schema_path.read_text())\n release_data[\"schema\"] = schema\n\n return release_data", "def diagnostics(self, oid):\n path = '/servers/%s/diagnostics' % oid\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Shows basic usage data for server %s: %s' % \n (oid, truncate(res)))\n return res[0]", "def test_get_us_daily_data(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getDailyUSDataFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)", "def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None", "def get_patient_status():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/3\")\n print(r.text)", "def usage(self):\r\n return usage.Usage(self)", "def get_info(self):\n pass", "def get_info(self):\n pass", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()", "def appdata(appname):\n z = Zap(appname)\n z.appdata(stdout=True)", "def test_retrieve_1_by_all(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 1)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[0]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def games_usage(parsed_args):\n if parsed_args.verb == \"GET\":\n filter_dict = {\n \"game_type\": parsed_args.game_type,\n \"genre\": parsed_args.genre,\n \"keywords\": parsed_args.keywords,\n \"mechanic\": parsed_args.mechanic\n }\n df = get_games(parsed_args.id, filter_dict)\n if parsed_args.function == \"FILTERS\":\n df = get_game_filters(df)\n else:\n df = post_games(df, parsed_args.sort_by, parsed_args.weighting)\n return df", "def get_data(self, label):\n self.application.get_data(label)", "def get_usage_stats(self) -> UsageStats:\n return self._usage", "def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = audit_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e", "def get(self, request, *args, **kwargs):\n device = Device.objects.get(name=kwargs[\"device_name\"])\n global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")\n status_code, data = graph_ql_query(request, device, global_settings.sot_agg_query)\n data = json.loads(json.dumps(data))\n return Response(GraphQLSerializer(data=data).initial_data, status=status_code)", "def _get_values(self, app_name, chart_dir):\n\n raw, _ = self.helm_client.show_info(app_name, \"values\",\n chart_dir=chart_dir)\n return yaml.load(raw, yaml.SafeLoader)", "def getAPData(ap, timePerRange=3*settings.SNMPAPLAP, \n\tstartTime=None,\n\tendTime=None):\n\n\tCOUNTERTOSPEED = ['ethernetRxTotalBytes','ethernetTxTotalBytes']\n\tGETMAX = []\n\t\n\tresult = []\n\ttry:\n\n\t\tif startTime == None:\n\t\t\tstartTime = APSnapshot.objects.aggregate(Min(\"date\"))[\"date__min\"]\n\t\tif endTime == None:\n\t\t\tendTime = APSnapshot.objects.aggregate(Max(\"date\"))[\"date__max\"]\n\n\n\t\tsnapshots = APSnapshot.objects.filter(ap=ap, date__gte=startTime, date__lte=endTime).order_by('date')\n\t\tstartAt = snapshots[0].date\n\t\t\n\t\tvalues = {}\n\t\tfor data in snapshots[0].apsnapshotdata_set.all():\n\t\t\tvalues[data.name] = [data.value]\n\n\t\tfor snap in snapshots[1:]:\n\t\t\t# Get the data of the period\n\t\t\tif snap.date < (startAt + timePerRange):\n\t\t\t\tfor data in snap.apsnapshotdata_set.all():\n\t\t\t\t\tif data.name in values:\n\t\t\t\t\t\tvalues[data.name].append(data.value)\n\n\t\t\t# Aggregate and reset the period\n\t\t\telse:\n\t\t\t\tdata = {}\n\t\t\t\tfor attr, value in values.items():\n\t\t\t\t\tif attr in COUNTERTOSPEED:\n\t\t\t\t\t\tdata[attr] = getSpeed(value[0], value[-1], timePerRange)\n\t\t\t\t\telif attr in GETMAX:\n\t\t\t\t\t\tdata[attr] = max(value)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata[attr] = sum(value)/float(len(value))\n\n\t\t\t\tresult.append({'date':timezone.localtime(startAt + timePerRange), 'data': data})\n\n\t\t\t\t# Start new period\n\t\t\t\tstartAt = snap.date\n\t\t\t\tvalues = {}\n\t\t\t\tfor data in snap.apsnapshotdata_set.all():\n\t\t\t\t\tvalues[data.name] = [data.value]\n\n\texcept Exception as e:\n\t\tOperationalError(source=\"getAPData\", error=str(e)).save()\n\t\traise e\n\n\treturn result", "def healthcare():", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def measurements_lookup(client, database):\n client.switch_database(database)\n mlist_dict = client.get_list_measurements()\n # print(\"def measurements_lookup 010:\", mlist_dict[:10])\n return mlist_dict", "def get_meter_info(apt_no):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n payload = (\"select uuid, Metadata/Instrument/SupplyType \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=payload)\n # logger.debug (\"%s\",r)\n payload_body = r.json()\n # logger.debug (\"Payload:\\n%s\", payload_body)\n\n meters = []\n for i in range(0, len(payload_body)):\n meter = payload_body[i]\n\n meters.append({'uuid': meter['uuid'], 'type': meter[\n 'Metadata']['Instrument']['SupplyType']})\n\n return meters", "def _get_data(self):\n devices = []\n try:\n if not self.router_client.login():\n self.hass.states.set(f\"{DOMAIN}.statusmsg\", self.router_client.statusmsg)\n _LOGGER.warning(\"Login failed: {0}:{1}@{2}\".format(self.router_client.username, self.router_client.password,self.router_client.host))\n self.router_client.logout()\n return devices\n\n devices_json = self.router_client.get_devices_response()\n finally:\n self.router_client.logout()\n\n self.hass.states.set(f\"{DOMAIN}.scanning\", devices_json != False)\n\n if devices_json != False:\n for device in devices_json:\n # _LOGGER.debug(\"Device: {0}\".format(device))\n dev = Device(\n device['HostName'].replace('未知设备', 'Unknown'),\n device['IPAddress'],\n device['MACAddress'],\n device['Active'],\n ICONS.get(device['IconType'])\n )\n # _LOGGER.debug(\"Device: {0}\".format(dev))\n devices.append(dev)\n return devices\n else:\n return []", "def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)", "def amtool_brief(self, mess, args):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_alerts()\n return result", "def getData(language=None):", "def main(self, name):\n\t\tapi_results = [] \n\t\tparams = self.get_search_parameters(name)\n\t\tapi_results.append(self.api_connect(params))\n\t\ttime.sleep(1.0)\n\t\tkey = api_results[0]['businesses'][0]\n\t\tbusiness_information = [key['name'], self.phone_number_organizer(key), key['rating'],\\\n\t\tkey['review_count']]\n\t\treturn business_information", "def _get_network_utilization(self):\n options = self.scenario_cfg[\"options\"]\n interval = options.get('interval', 1)\n count = options.get('count', 1)\n\n cmd = \"sudo sar -n DEV %d %d\" % (interval, count)\n\n raw_result = self._execute_command(cmd)\n result = self._filtrate_result(raw_result)\n\n return result", "def get_oauth_data():", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def get():" ]
[ "0.6542614", "0.6461803", "0.6203412", "0.6135864", "0.5930974", "0.5910394", "0.5888209", "0.5844207", "0.58017164", "0.5787487", "0.57435703", "0.5714978", "0.5709852", "0.564539", "0.5628239", "0.5616134", "0.5614402", "0.56101", "0.5602017", "0.55986047", "0.5594599", "0.5585295", "0.555319", "0.555319", "0.555319", "0.55047464", "0.54400617", "0.5439377", "0.54390097", "0.5437", "0.54108024", "0.5409253", "0.53943783", "0.539259", "0.5366763", "0.5348971", "0.5336694", "0.53354645", "0.5322523", "0.5322523", "0.53214484", "0.5306477", "0.5306357", "0.5293695", "0.5292136", "0.5289622", "0.5282176", "0.52788544", "0.52651113", "0.5264503", "0.52639383", "0.5257026", "0.52563804", "0.5228213", "0.52273536", "0.52263695", "0.5222667", "0.52185977", "0.5213061", "0.5211865", "0.52102023", "0.520823", "0.52043957", "0.5201907", "0.51920867", "0.5189327", "0.51883113", "0.5185219", "0.5154445", "0.5149091", "0.51479435", "0.5117948", "0.5117097", "0.51147753", "0.51086634", "0.51086634", "0.51074755", "0.51065046", "0.5106256", "0.51035696", "0.51008266", "0.5094939", "0.5090189", "0.5080007", "0.5076841", "0.5074381", "0.5072463", "0.5072011", "0.5064217", "0.5062383", "0.5061672", "0.50547254", "0.5049254", "0.5046863", "0.50416464", "0.5041614", "0.50342315", "0.5031484", "0.50244623", "0.50229454" ]
0.6843256
0
get data from appliance usage
async def __getDataFromBalance(self, account) -> dict: _LOGGER.info("Getting appliance usage data") data = {} URL_BALANCE = API_HOST + "/api/resources/account/{account}/balance?count=-1" try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get(URL_BALANCE.format(account=account)) if response.status == 200: data = (await response.json())["data"] indice = [i for i, x in enumerate(data) if x["details"] == "DEBT"][ 0 ] deb = data[indice]["amount"] except Exception as e: _LOGGER.error(e) return {"balance_data": data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n JSON = {\"startDate\": str(lastBilledDate.strftime(\"%m%d%Y\"))}\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.post(\n URL_APPLIANCE_USAGE.format(account=account), json=JSON\n )\n if response.status == 200:\n electric = (await response.json())[\"data\"][\"electric\"]\n\n full = 100\n for e in electric:\n rr = round(float(e[\"percentageDollar\"]))\n if rr < full:\n full = full - rr\n else:\n rr = full\n data[e[\"category\"].replace(\" \", \"_\")] = rr\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"energy_percent_by_applicance\": data}", "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/ApiUsage/{applicationId}/\"))", "def get_usage_data(username, password):\n usage_req = XfinityUsage(username, password, browser_name=\"firefox-headless\")\n return usage_req.run()", "def test_getusage(self):\n ret = {\"message\": \"No Random.org api key or api version found.\", \"res\": False}\n self.assertDictEqual(random_org.getUsage(), ret)\n\n self.assertDictEqual(\n random_org.getUsage(api_key=\"peW\", api_version=\"1\"),\n {\n \"bitsLeft\": None,\n \"requestsLeft\": None,\n \"res\": True,\n \"totalBits\": None,\n \"totalRequests\": None,\n },\n )", "def retr_devices_by_app( app ) :\n\n\t\t\t_logger.info( '...retr_devices_by_app...' )\n\t\t\toutput = []\n\t\t\ttry :\n\t\t\t\tdb = mongo.db.auth_devices\n\t\t\t\tfor device in db.find( { 'app_tags' : app } ) :\n\t\t\t\t\toutput.append({'moniker' : device['moniker'] ,\n\t\t\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t\t\t 'enlisted' : device['enlisted'] ,\n\t\t\t\t\t\t\t\t 'last_kown_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t\t\t 'engaged' : device['engaged'] ,\n\t\t\t\t\t\t\t\t 'canononical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t\t\t 'scope' : device['scope'] ,\n\t\t\t\t\t\t\t\t 'segment' : device['segment']\n\t\t\t\t\t})\n\t\t\texcept Exception as e :\n\t\t\t\t _logger.error( '...retr_devices_by_app %s' % e.message )\n\t\t\treturn jsonify({'result' : output})", "def get_data():\n pass", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def usage(self, host):", "def getInfo():", "def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))", "def get_apk(self):", "def get_discovery_summary():\n pass", "def usage_information(self):\n return self._usage_information", "def gather_metric(self):\n result = self._shell.run(self.ADB_COMMAND)\n stdout = result.stdout.splitlines()\n adb_version = stdout[0].split()[-1]\n # Revision information will always be in next line\n adb_revision = stdout[1].split()[1]\n\n response = {\n self.ADB_VERSION: adb_version,\n self.ADB_REVISION: adb_revision\n }\n return response", "def device_overview(self):\r\n data = {}\r\n\r\n # GET DATA\r\n token = request.headers.get('token')\r\n userid = request.headers.get('userid')\r\n vessel_id = request.args.get('vessel_id')\r\n epoch_format = request.args.get('format')\r\n\r\n # CHECK TOKEN\r\n if not self.validate_token(token, userid):\r\n data['alert'] = \"Invalid Token\"\r\n data['status'] = 'Failed'\r\n return self.return_data(data)\r\n\r\n alarm_types = self.get_alarm_types()\r\n\r\n ats = self.get_alarm_trigger()\r\n\r\n devices = self.couch_query.get_all_devices(vessel_id)\r\n\r\n standard_time = self.epoch_day(time.time())\r\n\r\n epoch_time = time.time()\r\n\r\n temp_data = []\r\n\r\n start_date = self.get_start_date(epoch_format)\r\n\r\n if not start_date and epoch_format not in [\"day\", \"hours\"]:\r\n\r\n data['alert'] = \"Invalid format!\"\r\n data['status'] = 'Failed'\r\n\r\n return self.return_data(data)\r\n\r\n for device in devices:\r\n\r\n if device['doc']['device'] in ['PARAMETERS', 'NTWCONF', 'NTWPERF1']:\r\n\r\n continue\r\n\r\n row = {}\r\n row['device'] = device['doc']['device']\r\n row['name'] = device['doc']['device']\r\n row['Alert'] = 0\r\n row['Critical'] = 0\r\n row['Warning'] = 0\r\n row['Info'] = 0\r\n row['Debug'] = 0\r\n for atrigger in ats:\r\n\r\n trigger_type = self.get_alarm_type_name(alarm_types, atrigger['alarm_type_id'])\r\n\r\n at_id = atrigger['alarm_trigger_id']\r\n device_id = device['id']\r\n\r\n datas = self.calc.calculate_trigger([at_id], standard_time,\r\n epoch_time, vessel_id=vessel_id,\r\n device_id=device_id)\r\n\r\n if not datas == \"No Alarm Trigger found.\":\r\n\r\n datas_index_0 = datas[0]\r\n len_datas = datas_index_0['results']\r\n if len_datas:\r\n\r\n row[trigger_type] = 1\r\n\r\n if epoch_format in ['week', 'month', \"quarter\", 'annual']:\r\n\r\n sql_str = \"SELECT COUNT(alarm_trigger_id) FROM alarm_data \"\r\n sql_str += \"WHERE device_id='{0}' \".format(device_id)\r\n sql_str += \"AND epoch_date > {0} \".format(start_date)\r\n sql_str += \"AND epoch_date < {0}\".format(epoch_time)\r\n\r\n res = self.postgres.query_fetch_one(sql_str)\r\n\r\n row[trigger_type] = row[trigger_type] + res['count']\r\n\r\n temp_data.append(row)\r\n\r\n final_data = {}\r\n final_data['data'] = temp_data\r\n final_data['status'] = 'ok'\r\n\r\n return self.return_data(final_data)", "def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices", "def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data", "def get_data():\n return", "def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list", "def get_data(self):", "def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def user_sends_get_call_to_the_devices():\n web_app.list_devices()", "def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200", "def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def retrieve_dial_data(app_name):\n # NOTE: the reference code store the file in the application folder, we read the file in our data folder\n # perhaps other changes will be needed to allow USE_ADDITIONAL_DATA feature to work\n file_path = 'dial_data/' + app_name + '.json'\n if not fileops.file_exists(file_path):\n return {}\n data = fileops.load_file_def(file_path)\n return json.loads(data)", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def get_usage_data(self):\n with self._lock:\n data_copy = self._data.copy()\n return data_copy", "def get_listing_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #get the show from the database\n show = Show.find_show_with_guidebox_id(guidebox_id)\n\n #get show title from Guidebox so it can be used in the OnConnect title search url \n show_title = str(show.title)\n\n #get OnConnect seriesId\n series_id = onconnect_search_series_id(show_title)\n\n #obtaining listing information for a 24 hour period from the current time\n airings = onconnect_search_airings(series_id)\n\n return jsonify(airings)", "def available(self, app):\n return self.xlist(\"testfor\", app)[0]", "def GetData(self):\r\n if self.Error == False:\r\n Extra = {}\r\n try:\r\n result = {}\r\n temp = self.ScrapeMainWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters1Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters2Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeStatusWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n sqlArray = {}\r\n sqlArray[self.deviceDescr] = {}\r\n sqlArray[self.deviceDescr][self.devNumber] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"General\"] = result\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"][\"ExtractTime\"] = time.time()\r\n sqlArray[\"ReadError\"] = False \r\n return sqlArray\r\n \r\n except Exception as e: \r\n self.log.printError(\"ERROR in Retreiving Seatel VSAT Data,%s Module Error\" % sys._getframe().f_code.co_name) \r\n self.log.printError( str(e))\r\n self.Error = True\r\n Extra[\"ReadError\"] = True\r\n return Extra\r\n else:\r\n self.log.printWarning(\"%s skipped due to previous failure\" % sys._getframe().f_code.co_name)\r\n return None", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def get_usage(self):\r\n return self.box_usage", "async def get_device_data(self):\n pass", "def getatt(self):\n err = True\n retry = 0\n while err and retry<2:\n try:\n r = requests.get(self.url+'ATT?\\n')\n res = float(r.text)\n err = False\n except:\n err = True\n time.sleep(1)\n retry += 1\n res = nan\n return res", "def get_data(self):\n pass", "def get_data(self):\n pass", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def get_usage(self, start=None, end=None):\n return self.manager.get_usage(self, start=start, end=end)", "def _get_dapall_from_api(self):\n\n url = marvin.config.urlmap['api']['dapall']['url']\n\n url_full = url.format(name=self.plateifu,\n bintype=self.bintype.name,\n template=self.template.name)\n\n try:\n response = self._toolInteraction(url_full)\n except Exception as ee:\n raise MarvinError('found a problem while getting DAPall: {0}'.format(str(ee)))\n\n if response.results['error'] is not None:\n raise MarvinError('found a problem while getting DAPall: {}'\n .format(str(response.results['error'])))\n\n data = response.getData()\n\n return data['dapall_data']", "def usage():\n return _usage", "def summary(app):\n click.echo(get_summary(app))", "def test_duo_application_get(self):\n pass", "def get_data(self):\r\n pass", "def usage(where=sys.stdout):\n print('Gather Holding IDs via REST using a list of MMS IDs.',\n file=where) \n print('Usage:', file=where)\n print(' west2_gather_Holding_IDs.py <file.txt> <APIKEY>', file=where)\n print('Where:', file=where)\n print(' file.txt List of MSS IDs (one / line)',\n file=where)\n print(' APIKEY API key for accessing Alma REST APIs',\n file=where)\n print('Output:', file=where)\n print(' Generates a datestamped text file: holding-and-mss-ids.<date>.txt',\n file=where)\n print(' consisting of lines holding_id<tab>mms_id', file=where)", "def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))", "def get_usage(self):\n return self.box_usage", "def get_appliances(self):\n if self.appliances is not None:\n return self.appliances\n self.appliances = Appliance.objects.filter(user=self.request.user)\n return self.appliances", "def usage():\r\n print 'Usage: collect_logs.py testrun \"env1, env2,...\" \"oscounters, applogs, gclogs, traces\"'\r\n print \"testrun = Name of the test execution e.g. 12032713\"\r\n print \"environments = Name of the environment defined in the environments.ini file\"\r\n print \"logtypes = List types of logfiles to collect\"", "def AcquiredData (self) :\n\t\treturn self.run(\"AcquiredData\")", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def extract_programs():\n if settings.XPRO_CATALOG_API_URL:\n return requests.get(settings.XPRO_CATALOG_API_URL, timeout=20).json()\n return []", "def getStockData():\n pass", "def data():\n return volumes_fetchers.get_json_data()", "def get_antivirus_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><anti-virus><upgrade><info></info></upgrade></anti-virus></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mock_datasource_usages(self):\n account1 = self.test_data.accounts[0]\n meter = account1.meters[0]\n usage = meter.usages[0]\n self.assertIsInstance(usage, Usage)\n self.assertEqual(usage.PK, 6)\n self.assertEqual(usage.UsageActualName, \"test_usage\")\n self.assertEqual(usage.UsageAmount, Decimal(50.0))\n self.assertEqual(usage.RateComponent, \"test_rate_component\")\n self.assertEqual(usage.EnergyUnit, \"test_energy_unit\")\n self.assertEqual(usage.IntervalStart, date(2016, 1, 1))\n self.assertEqual(usage.IntervalEnd, date(2016, 2, 1))", "def get_data_qos(args):\n diff_data(args, \"qos\")", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def get_device_info(target_project_arn):\n try:\n device_info = device_farm.list_devices(\n arn=target_project_arn,\n filters=[\n {\n \"attribute\": \"PLATFORM\",\n \"operator\": \"EQUALS\",\n \"values\": ['ANDROID', ]\n },\n {\n \"attribute\": \"OS_VERSION\",\n \"operator\": \"GREATER_THAN_OR_EQUALS\",\n \"values\": ['9', ]\n },\n {\n \"attribute\": \"MANUFACTURER\",\n \"operator\": \"EQUALS\",\n \"values\": ['Google', ]\n },\n {\n \"attribute\": \"AVAILABILITY\",\n \"operator\": \"EQUALS\",\n \"values\": ['HIGHLY_AVAILABLE', ]\n },\n {\n \"attribute\": \"FLEET_TYPE\",\n \"operator\": \"EQUALS\",\n \"values\": ['PUBLIC', ]\n }\n ])['devices']\n\n if device_info is not None:\n device_arn = device_info[0]['arn']\n device_name = device_info[0]['name']\n device_manufacture = device_info[0]['manufacturer']\n device_model = device_info[0]['model']\n device_model_id = device_info[0]['modelId']\n device_type = device_info[0]['formFactor']\n device_platform = device_info[0]['platform']\n device_os = device_info[0]['os']\n device_visibility = device_info[0]['fleetType']\n device_availability = device_info[0]['availability']\n\n print('Device Name - {} with Manufacture {}, model {}, modelId {} & type {}'.format(\n device_name,\n device_manufacture,\n device_model,\n device_model_id,\n device_type\n )\n )\n print('Device Platform {} with OS {}, visibility {} & availability - {} '.format(\n device_platform,\n device_os,\n device_visibility,\n device_availability\n )\n )\n\n if device_availability == TARGET_AVAILABILITY:\n print('AWS setup is complete')\n else:\n print('Problem, device is not available')\n else:\n print('Problem finding device info')\n\n except IndexError:\n print('Problem finding device from pool {}'.format(device_info))", "def access():", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def main():\n results = []\n results.extend(check_mounts())\n results.extend(diskusage())\n return results", "def get_app(self, app_name, ns_name):\n\n status, _ = self.helm_client.status(app_name, namespace=ns_name)\n values, _ = self.helm_client.get_values(app_name, namespace=ns_name)\n release_data = {\"status\": status, \"values\": values}\n\n schema_path = Path(\"%s/%s/values.schema.json\"\n % (self._get_ns_dir(ns_name), app_name))\n if schema_path.exists():\n schema = json.loads(schema_path.read_text())\n release_data[\"schema\"] = schema\n\n return release_data", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()", "def diagnostics(self, oid):\n path = '/servers/%s/diagnostics' % oid\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Shows basic usage data for server %s: %s' % \n (oid, truncate(res)))\n return res[0]", "def test_get_us_daily_data(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getDailyUSDataFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)", "def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None", "def get_patient_status():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/3\")\n print(r.text)", "def usage(self):\r\n return usage.Usage(self)", "def get_info(self):\n pass", "def get_info(self):\n pass", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()", "def appdata(appname):\n z = Zap(appname)\n z.appdata(stdout=True)", "def test_retrieve_1_by_all(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 1)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[0]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def games_usage(parsed_args):\n if parsed_args.verb == \"GET\":\n filter_dict = {\n \"game_type\": parsed_args.game_type,\n \"genre\": parsed_args.genre,\n \"keywords\": parsed_args.keywords,\n \"mechanic\": parsed_args.mechanic\n }\n df = get_games(parsed_args.id, filter_dict)\n if parsed_args.function == \"FILTERS\":\n df = get_game_filters(df)\n else:\n df = post_games(df, parsed_args.sort_by, parsed_args.weighting)\n return df", "def get_data(self, label):\n self.application.get_data(label)", "def get_usage_stats(self) -> UsageStats:\n return self._usage", "def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = audit_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e", "def get(self, request, *args, **kwargs):\n device = Device.objects.get(name=kwargs[\"device_name\"])\n global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")\n status_code, data = graph_ql_query(request, device, global_settings.sot_agg_query)\n data = json.loads(json.dumps(data))\n return Response(GraphQLSerializer(data=data).initial_data, status=status_code)", "def _get_values(self, app_name, chart_dir):\n\n raw, _ = self.helm_client.show_info(app_name, \"values\",\n chart_dir=chart_dir)\n return yaml.load(raw, yaml.SafeLoader)", "def healthcare():", "def getAPData(ap, timePerRange=3*settings.SNMPAPLAP, \n\tstartTime=None,\n\tendTime=None):\n\n\tCOUNTERTOSPEED = ['ethernetRxTotalBytes','ethernetTxTotalBytes']\n\tGETMAX = []\n\t\n\tresult = []\n\ttry:\n\n\t\tif startTime == None:\n\t\t\tstartTime = APSnapshot.objects.aggregate(Min(\"date\"))[\"date__min\"]\n\t\tif endTime == None:\n\t\t\tendTime = APSnapshot.objects.aggregate(Max(\"date\"))[\"date__max\"]\n\n\n\t\tsnapshots = APSnapshot.objects.filter(ap=ap, date__gte=startTime, date__lte=endTime).order_by('date')\n\t\tstartAt = snapshots[0].date\n\t\t\n\t\tvalues = {}\n\t\tfor data in snapshots[0].apsnapshotdata_set.all():\n\t\t\tvalues[data.name] = [data.value]\n\n\t\tfor snap in snapshots[1:]:\n\t\t\t# Get the data of the period\n\t\t\tif snap.date < (startAt + timePerRange):\n\t\t\t\tfor data in snap.apsnapshotdata_set.all():\n\t\t\t\t\tif data.name in values:\n\t\t\t\t\t\tvalues[data.name].append(data.value)\n\n\t\t\t# Aggregate and reset the period\n\t\t\telse:\n\t\t\t\tdata = {}\n\t\t\t\tfor attr, value in values.items():\n\t\t\t\t\tif attr in COUNTERTOSPEED:\n\t\t\t\t\t\tdata[attr] = getSpeed(value[0], value[-1], timePerRange)\n\t\t\t\t\telif attr in GETMAX:\n\t\t\t\t\t\tdata[attr] = max(value)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata[attr] = sum(value)/float(len(value))\n\n\t\t\t\tresult.append({'date':timezone.localtime(startAt + timePerRange), 'data': data})\n\n\t\t\t\t# Start new period\n\t\t\t\tstartAt = snap.date\n\t\t\t\tvalues = {}\n\t\t\t\tfor data in snap.apsnapshotdata_set.all():\n\t\t\t\t\tvalues[data.name] = [data.value]\n\n\texcept Exception as e:\n\t\tOperationalError(source=\"getAPData\", error=str(e)).save()\n\t\traise e\n\n\treturn result", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def measurements_lookup(client, database):\n client.switch_database(database)\n mlist_dict = client.get_list_measurements()\n # print(\"def measurements_lookup 010:\", mlist_dict[:10])\n return mlist_dict", "def get_meter_info(apt_no):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n payload = (\"select uuid, Metadata/Instrument/SupplyType \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=payload)\n # logger.debug (\"%s\",r)\n payload_body = r.json()\n # logger.debug (\"Payload:\\n%s\", payload_body)\n\n meters = []\n for i in range(0, len(payload_body)):\n meter = payload_body[i]\n\n meters.append({'uuid': meter['uuid'], 'type': meter[\n 'Metadata']['Instrument']['SupplyType']})\n\n return meters", "def _get_data(self):\n devices = []\n try:\n if not self.router_client.login():\n self.hass.states.set(f\"{DOMAIN}.statusmsg\", self.router_client.statusmsg)\n _LOGGER.warning(\"Login failed: {0}:{1}@{2}\".format(self.router_client.username, self.router_client.password,self.router_client.host))\n self.router_client.logout()\n return devices\n\n devices_json = self.router_client.get_devices_response()\n finally:\n self.router_client.logout()\n\n self.hass.states.set(f\"{DOMAIN}.scanning\", devices_json != False)\n\n if devices_json != False:\n for device in devices_json:\n # _LOGGER.debug(\"Device: {0}\".format(device))\n dev = Device(\n device['HostName'].replace('未知设备', 'Unknown'),\n device['IPAddress'],\n device['MACAddress'],\n device['Active'],\n ICONS.get(device['IconType'])\n )\n # _LOGGER.debug(\"Device: {0}\".format(dev))\n devices.append(dev)\n return devices\n else:\n return []", "def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)", "def amtool_brief(self, mess, args):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_alerts()\n return result", "def getData(language=None):", "def main(self, name):\n\t\tapi_results = [] \n\t\tparams = self.get_search_parameters(name)\n\t\tapi_results.append(self.api_connect(params))\n\t\ttime.sleep(1.0)\n\t\tkey = api_results[0]['businesses'][0]\n\t\tbusiness_information = [key['name'], self.phone_number_organizer(key), key['rating'],\\\n\t\tkey['review_count']]\n\t\treturn business_information", "def _get_network_utilization(self):\n options = self.scenario_cfg[\"options\"]\n interval = options.get('interval', 1)\n count = options.get('count', 1)\n\n cmd = \"sudo sar -n DEV %d %d\" % (interval, count)\n\n raw_result = self._execute_command(cmd)\n result = self._filtrate_result(raw_result)\n\n return result", "def get_oauth_data():", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def get():" ]
[ "0.68421215", "0.6543652", "0.64636284", "0.6203195", "0.61375993", "0.5911429", "0.58880204", "0.5845489", "0.5802097", "0.57885844", "0.57432395", "0.57148707", "0.5710095", "0.5646816", "0.56286925", "0.5616645", "0.5614922", "0.56103456", "0.56019914", "0.55968064", "0.55946743", "0.5585468", "0.55528677", "0.55528677", "0.55528677", "0.55059457", "0.5441368", "0.54404444", "0.5439524", "0.5438029", "0.54107", "0.5409626", "0.5395171", "0.5392969", "0.5366013", "0.53489035", "0.53362995", "0.5335913", "0.5322384", "0.5322384", "0.5321443", "0.5308198", "0.53058976", "0.52944", "0.5292505", "0.52906495", "0.52821016", "0.5280711", "0.52647436", "0.5263888", "0.52638495", "0.5257619", "0.5257518", "0.5228837", "0.52274376", "0.5226846", "0.5221803", "0.5218312", "0.52136624", "0.521233", "0.5211371", "0.52088434", "0.52060896", "0.52023757", "0.51923317", "0.51891965", "0.5188599", "0.518666", "0.5153769", "0.5150233", "0.5148587", "0.51196384", "0.51187587", "0.5115817", "0.51095563", "0.51095563", "0.5106701", "0.51058", "0.5105605", "0.51042444", "0.5100568", "0.5095127", "0.5090872", "0.508169", "0.50781566", "0.5074861", "0.5072838", "0.5071839", "0.50645113", "0.5062994", "0.5061628", "0.5054738", "0.50506854", "0.50483835", "0.5042703", "0.50411904", "0.5033789", "0.5032017", "0.50249046", "0.5024781" ]
0.5929427
5
Return the default form class used for user registration.
def get_form_class(self, request): return RegistrationForm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_form_class(self, request):\n\t\treturn RegistrationForm", "def get_form_class(self):\n return self.form_class", "def get_form_class(self):\n if self.form_class:\n return self.form_class\n else:\n raise ImproperlyConfigured(\n \"在定义类视图%s的时候,你必须明确指定一个form_class.\"%self.__class__.__name__)", "def get_form_class(self):\r\n return modelform_factory(self.model)", "def get_form(self, form_class=None):\n\t\tif form_class is None:\n\t\t\tform_class = self.get_form_class()\n\t\treturn form_class(self.request.user, **self.get_form_kwargs())", "def get_token_form_class(self):\n from two_factor.forms import AuthenticationTokenForm\n\n return AuthenticationTokenForm", "def get_form_class():\n return RazorPaymentForm", "def get_form_class(self):\n form_options = self.get_form_options()\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the base_form_class from the model.\n # If that is not defined, use WagtailAdminModelForm.\n model_form_class = getattr(self.model, \"base_form_class\", WagtailAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n return get_form_for_model(\n self.model,\n form_class=base_form_class,\n **form_options,\n )", "def get_form():\n global form_class\n from fluent_comments import appsettings\n\n if form_class is None:\n if appsettings.FLUENT_COMMENTS_FORM_CLASS:\n from django.utils.module_loading import import_string\n\n form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS)\n else:\n from fluent_comments.forms import FluentCommentForm\n\n form_class = FluentCommentForm\n\n return form_class", "def get_form_class(self, form_key):\n return self.get_form_classes()[form_key]", "def get_form_class(self):\n\t\treturn formset_factory(super(FormsetMixin, self).get_form_class(), **self.get_formset_kwargs())", "def get_default_form(self, display=False):\n form_selector = display_form_selector if display else county_form_selector\n return form_selector.get_combined_form_class(counties=[self.county.slug])", "def get_form_class(self):\n return get_review_form(review=self.get_object(), user=self.request.user)", "def get_form_classes(self):\n return {\n **self.form_classes\n }", "def get_form(self, form_class):\n return form_class(**self.get_form_kwargs())", "def get_form(self, form_class=None):\n if form_class is None:\n form_class = self.get_form_class()\n return form_class(\n token=self.request.session.get('token', False),\n aiid=self.kwargs['aiid'],\n **self.get_form_kwargs()\n )", "def get_form(self):\n kwargs = {\n \"instance\": self.profile if self.form_object == \"profile\" else self.user,\n \"prefix\": self.name,\n }\n\n if self.request.method == \"POST\":\n return self.form_class(self.request.POST, self.request.FILES, **kwargs)\n else:\n return self.form_class(**kwargs)", "def signup_form(request):\n return {'signup_form': UserForm()}", "def _get_bulk_change_form_class(self):\n return BulkChangeFormWizardHandlerPluginsForm", "def _get_bulk_change_form_class(self):\n return BulkChangeFormElementPluginsForm", "def get_form_class(self):\n login_try_count = self.request.session.get('login_try_count', 0)\n\n # If the form has been submitted...\n if self.request.method == \"POST\":\n self.request.session['login_try_count'] = login_try_count + 1\n\n if login_try_count >= 20:\n return CaptchaAuthenticationForm\n\n return super(LoginView, self).get_form_class()", "def register_form(self):\n f = Form()\n self.forms = f\n return f", "def _get_bulk_change_form_class(self):\n return BulkChangeFormHandlerPluginsForm", "def get_form_class(self):\n \n \"\"\"\n Construct a form class that has all the fields and formsets named in\n the children of this edit handler. \n \"\"\"\n if not hasattr(self, 'model'):\n raise AttributeError(\n '%s is not bound to a model yet. Use `.bind_to(model=model)` '\n 'before using this method.' % self.__class__.__name__)\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the rai_base_form_class from the model.\n # If that is not defined, use RAIAdminModelForm.\n model_form_class = getattr(self.model, 'rai_base_form_class',\n RAIAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n formsets = self.required_formsets()\n\n form_class = rai_modelform_factory(\n self.decorator.get_rai_model(),\n form_class=base_form_class,\n fields=self.required_internal_fields(),\n formsets=formsets,\n widgets=self.widget_overrides())\n form_class.readonly_fields = self.readonly_fields()\n return form_class", "def name(self) -> Text:\n\n return \"user_form\"", "def get_form_class(self):\n if self.survey.get_requires_payment():\n return AuthorizenetSurveyPurchaseForm\n return super(AuthorizenetSurveyPurchaseCreate, self).get_form_class()", "def get_form(self, form_class):\n if self.get_locked_form(form_class):\n return None\n return form_class(**self.get_form_kwargs())", "def get_basic_form(self):\n return self.basic_form", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def get_form(self, form_class=None):\n # 设置初始值\n if self.request.method == \"GET\":\n return SecondMenuModelForm(initial={'menu': self.menu_obj})\n else:\n # post提交的时候,不要忘记设置data\n return SecondMenuModelForm(data=self.request.POST)", "def _create_forms_py(self, form_class_name):\n return '''\n \"\"\"Configuration forms for the extension.\"\"\"\n\n from django import forms\n from djblets.extensions.forms import SettingsForm\n\n\n class %(form_class_name)s(SettingsForm):\n my_field_1 = forms.CharField()\n my_field_2 = forms.BooleanField()\n ''' % {\n 'form_class_name': form_class_name,\n }", "def get_form(self):\n return self.form", "def get_form(self):\n return self.form", "def get_form(self, form_class=None):\n if self._form_instance:\n return self._form_instance\n self._form_instance = super().get_form(form_class)\n return self.get_form(form_class)", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super(UserAdmin, self).get_form(request, obj, **defaults)", "def formfield(self, form_class=RadioSelectField, **kwargs):\n defaults = {\n 'required': not self.blank,\n 'label': capfirst(self.verbose_name),\n 'help_text': self.help_text,\n 'html_attrs': self.html_attrs\n }\n\n if self.has_default():\n defaults['initial'] = self.default\n\n if self.choices:\n # Fields with choices get special treatment.\n include_blank = (self.blank or\n not (self.has_default() or 'initial' in kwargs))\n defaults['choices'] = self.get_choices(include_blank=include_blank)\n defaults['coerce'] = self.to_python\n if self.null:\n defaults['empty_value'] = \"\"\n defaults.update(kwargs)\n return form_class(**defaults)", "def _wrap_form(self, parent_form_class):\n steptitle = pd_mf(u'Add ${name}',\n mapping={'name': self.fti.Title()})\n\n form_class = self._create_form_class(parent_form_class, steptitle)\n\n form_class.__name__ = 'WizardForm: %s' % parent_form_class.__name__\n return form_class", "def model_form_factory(base=Form, meta=ModelFormMeta, **defaults):\n\n class ModelForm(six.with_metaclass(meta, base)):\n \"\"\"\n A function that returns SQLAlchemy session. This should be\n assigned if you wish to use Unique validator. If you are using\n Flask-SQLAlchemy along with WTForms-Alchemy you don't need to\n set this.\n \"\"\"\n get_session = None\n\n class Meta:\n model = None\n\n default = None\n\n #: Whether or not to skip unknown types. If this is set to True,\n #: fields with types that are not present in FormGenerator type map\n #: will be silently excluded from the generated form.\n #:\n #: By default this is set to False, meaning unknown types throw\n #: exceptions when encountered.\n skip_unknown_types = defaults.pop('skip_unknown_types', False)\n\n #: Whether or not to assign all fields as optional, useful when\n #: creating update forms for patch requests\n all_fields_optional = defaults.pop('all_fields_optional', False)\n\n validators = defaults.pop('validators', {})\n\n #: A dict with keys as field names and values as field arguments.\n field_args = defaults.pop('field_args', {})\n\n #: A dict with keys as field names and values as widget options.\n widget_options = defaults.pop('widget_options', {})\n\n #: Whether or not to include only indexed fields.\n only_indexed_fields = defaults.pop('only_indexed_fields', False)\n\n #: Whether or not to include primary keys.\n include_primary_keys = defaults.pop('include_primary_keys', False)\n\n #: Whether or not to include foreign keys. By default this is False\n #: indicating that foreign keys are not included in the generated\n #: form.\n include_foreign_keys = defaults.pop('include_foreign_keys', False)\n\n #: Whether or not to strip string fields\n strip_string_fields = defaults.pop('strip_string_fields', False)\n\n #: Whether or not to include datetime columns that have a default\n #: value. A good example is created_at column which has a default\n #: value of datetime.utcnow.\n include_datetimes_with_default = defaults.pop(\n 'include_datetimes_with_default', False\n )\n\n #: The default validator to be used for not nullable columns. Set\n #: this to `None` if you wish to disable it.\n not_null_validator = defaults.pop(\n 'not_null_validator',\n InputRequired()\n )\n\n #: A dictionary that overrides not null validation on type level.\n #: Keys should be valid SQLAlchemy types and values should be valid\n #: WTForms validators.\n not_null_validator_type_map = defaults.pop(\n 'not_null_validator_type_map',\n ClassMap(\n [(sa.String, [InputRequired(), DataRequired()])]\n )\n )\n\n #: Default email validator\n email_validator = Email\n\n #: Default length validator\n length_validator = Length\n\n #: Default unique validator\n unique_validator = Unique\n\n #: Default number range validator\n number_range_validator = NumberRange\n\n #: Default date range validator\n date_range_validator = DateRange\n\n #: Default time range validator\n time_range_validator = TimeRange\n\n #: Default optional validator\n optional_validator = Optional\n\n #: Which form generator to use. Only override this if you have a\n #: valid form generator which you want to use instead of the\n #: default one.\n form_generator = defaults.pop(\n 'form_generator', FormGenerator\n )\n\n #: Default date format\n date_format = defaults.pop('date_format', '%Y-%m-%d')\n\n #: Default datetime format\n datetime_format = defaults.pop(\n 'datetime_format', '%Y-%m-%d %H:%M:%S'\n )\n\n #: Dictionary of SQLAlchemy types as keys and WTForms field classes\n #: as values. The key value pairs of this dictionary override\n #: the key value pairs of FormGenerator.TYPE_MAP.\n #:\n #: Using this configuration option one can easily configure the\n #: type conversion in class level.\n type_map = defaults.pop('type_map', ClassMap())\n\n #: Whether or not to raise InvalidAttributExceptions when invalid\n #: attribute names are given for include / exclude or only\n attr_errors = defaults.pop('attr_errors', True)\n\n #: Additional fields to include in the generated form.\n include = defaults.pop('include', [])\n\n #: List of fields to exclude from the generated form.\n exclude = defaults.pop('exclude', [])\n\n #: List of fields to only include in the generated form.\n only = defaults.pop('only', [])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets object as form attribute.\"\"\"\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if defaults:\n raise UnknownConfigurationOption(\n list(defaults.keys())[0]\n )\n\n return ModelForm", "def get_form_class(self):\n\t\treturn inlineformset_factory(self.get_model(), self.get_related_model(), **self.get_formset_kwargs())", "def _get_bulk_change_form_class(self):\n raise NotImplementedError(\n \"You should implement `get_bulk_change_form_class`\"\n )", "def get_job_form_class(self, job_name, request=None, object_id=None, view_name=None, extra_context=None):\n return None", "def _get_user_class(self, name):\r\n self._user_classes.setdefault(name, _make_user_class(self, name))\r\n return self._user_classes[name]", "def _class_default(self):\n if self.auto_create:\n return self.instance_class\n return utils.undefined", "def formfield(self, form_class=MultipleSelectField, **kwargs):\n defaults = {\n 'required': not self.blank,\n 'label': capfirst(self.verbose_name),\n 'help_text': self.help_text,\n 'html_attrs': self.html_attrs\n }\n\n if self.has_default():\n defaults['initial'] = self.default\n\n if self.choices:\n # Fields with choices get special treatment.\n include_blank = (self.blank or\n not (self.has_default() or 'initial' in kwargs))\n defaults['choices'] = self.get_choices(include_blank=include_blank)\n defaults['coerce'] = self.to_python\n if self.null:\n defaults['empty_value'] = \"\"\n defaults.update(kwargs)\n return form_class(**defaults)", "def formfield(self, **kwargs): # pylint:disable=arguments-differ\n from .forms import CompositeTypeField\n\n defaults = {\n \"form_class\": CompositeTypeField,\n \"model\": self.Meta.model,\n }\n defaults.update(kwargs)\n\n return super().formfield(**defaults)", "def input_class(field):\r\n return field.field.widget.__class__.__name__.lower()", "def show_new_user_form():\r\n return render_template('user-form.html')", "def login_form(request):\n return {'login_form': LoginForm()}", "def form(self):\n\t\treturn self._form", "def get_form(self):\n return QuestionForm()", "def create_form(self, resource=None, edit_form=False):\n form = UserForm(self.config_models, obj=resource)\n\n form.totp_enabled = self.totp_enabled\n\n session = self.session()\n self.update_form_collection(\n resource, edit_form, form.groups, self.Group, 'sorted_groups',\n 'id', 'name', session\n )\n self.update_form_collection(\n resource, edit_form, form.roles, self.Role, 'sorted_roles', 'id',\n 'name', session\n )\n session.close()\n\n return form", "def get_validator_class(self):\n return self.validator_class", "def registration_form():\n\n return render_template(\"/registration_form.html\")", "def special_class(self, form):\n if not len(form) >= 3:\n raise SyntaxError(\"Not enough forms\")\n bases = form[2]\n doc, code = self._getDocAndCode(form[3:])\n return ast.Class(form[1].name, self.compileForms(bases), doc, code)", "def formfield(self, form_class=HstoreCheckboxInput, **kwargs):\n defaults = {\n 'required': not self.blank,\n 'label': capfirst(self.verbose_name),\n 'help_text': self.help_text,\n 'html_attrs': self.html_attrs,\n }\n\n if self.has_default():\n defaults['initial'] = self.default\n\n if self.choices:\n # Fields with choices get special treatment.\n include_blank = (self.blank or\n not (self.has_default() or 'initial' in kwargs))\n defaults['choices'] = self.get_choices(include_blank=include_blank)\n defaults['coerce'] = self.to_python\n if self.null:\n defaults['empty_value'] = \"\"\n\n defaults.update(kwargs)\n formfield = form_class(**defaults)\n\n if self.html_attrs:\n formfield.widget.build_attrs(self.html_attrs)\n\n return formfield", "def get_model_cls() -> t.Any:\n return SignupRequest", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super(ProfileAdmin, self).get_form(request, obj, **defaults)", "def _get_form_creator(self) -> MapsFormHandler:\n return self._form_creator(columns=self._working_context_service.get_file_columns(),\n shapes=self._document_service.get_supported_shapes(),\n shape_keys=self._document_service.get_supported_shape_keys(),\n colour_palette=self._document_service.get_supported_colours(),\n tiles=self._document_service.get_supported_tiles())", "def get_form(self, request, obj=None, **kwargs):\n if not obj:\n kwargs['form'] = VPNCreationForm\n else:\n kwargs['form'] = VPNUpdateForm\n return super().get_form(request, obj, **kwargs)", "def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n\n self.fields['nick'].widget.attrs.update({\n 'label': 'Přezdívka',\n 'placeholder': 'Mirek'\n })\n\n self.fields['name'].widget.attrs.update({\n 'label': 'Jméno',\n 'placeholder': 'Mirek'\n })\n\n self.fields['surname'].widget.attrs.update({\n 'label': 'Příjmení',\n 'placeholder': 'Dušín'\n })\n\n self.fields['email'].widget.attrs.update({\n 'label': 'E-mail',\n 'placeholder': 'mirek@rychlesipy.cz'\n })\n\n self.fields['age'].widget.attrs.update({'label': 'Věk'})\n self.fields['age'].initial = 18\n\n self.fields['race'].widget.attrs.update({'label': 'Rasa'})\n self.fields['race'].queryset = Race.objects.filter(\n active=True).only('id', 'name')\n\n self.fields['group'].widget.attrs.update({\n 'label': 'Skupina',\n 'placeholder': 'Rychlé Šípy'\n })\n\n for field in self.fields.keys():\n self.fields[field].widget.attrs.update({\n 'required': self.fields[field].required,\n 'title': '',\n 'class': 'form-control'\n })", "def get_formset_form(formset: forms.BaseFormSet) -> forms.BaseForm:\n return formset._construct_form(0, **formset.get_form_kwargs(0)) # type: ignore", "def test_get_form_class(self):\n # Single label\n self.view.learning_model = TestSingleLabelClassifierModel()\n self.assertEqual(self.view.get_form_class(), SingleLabelClassifierForm)\n\n # Multi label\n self.view.learning_model = TestMultiLabelClassifierModel()\n self.assertEqual(self.view.get_form_class(), MultiLabelClassifierForm)", "def get_default(cls):\n return cls.USER", "def register_form():\n\n return render_template(\"register-form.html\")", "def get_app_form():\n\t\n\treturn render_template(\"application-form.html\")", "def register_form():\n\n return render_template(\"register.html\")", "def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"email\"].required = True", "def form_for_request(request, FormClass, *args, **kwargs):\n return FormClass(request.POST if request.method == 'POST' else None, *args, **kwargs)", "def forms(self):\n from hubspot3.forms import FormsClient\n\n return FormsClient(**self.auth, **self.options)", "def QuestionForm(user, *args, **kwargs):\n\n if user.is_anonymous():\n if not settings.ALLOW_ANONYMOUS:\n return None\n else:\n selected_fields = ['name', 'email', 'title', 'body', 'phone_number']\n else:\n selected_fields = ['user', 'title', 'body', 'status', 'phone_number']\n\n if settings.ALERTS:\n selected_fields += ['alert']\n\n class _QuestionForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(_QuestionForm, self).__init__(*args, **kwargs)\n\n for key in self.fields:\n if not key in OPTIONAL_FIELDS:\n self.fields[key].required = True\n\n # hide the internal status for non-staff\n qf = self.fields.get('status', None)\n if qf and not user.is_staff:\n choices = list(qf.choices)\n choices.remove(('internal', _('Internal')))\n qf.choices = choices\n\n # a bit of a hack...\n # hide a field, and use clean to force\n # a specific value of ours\n for key in ['user']:\n qf = self.fields.get(key, None)\n if qf:\n qf.widget = qf.hidden_widget()\n qf.required = False\n\n # honey pot!\n #phone_number = forms.CharField(label=_('Phone number'), required=False)\n # if user.is_anonymous():\n # captcha = ReCaptchaField(attrs={'theme': 'clean', 'lang': 'ru'})\n\n def clean_user(self):\n return user\n\n class Meta:\n model = Question\n fields = selected_fields\n\n return _QuestionForm(*args, **kwargs)", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def get_validator_class(self):\n validator_class = self.oauth_validator_class\n if validator_class is not None:\n return validator_class\n return oauth_api_settings.DEFAULT_VALIDATOR_CLASS", "def formfield(self, **kwargs):\n form_class = kwargs.get('form_class', BitOptionsForm)\n if issubclass(form_class, BitOptionsForm):\n defaults = {\n 'form_class': form_class,\n 'min_value': 0,\n 'max_value': self.options.maximum_value,\n 'options': list(self.options)\n }\n kwargs.pop('widget', None)\n else:\n defaults = {}\n defaults.update(kwargs)\n return super(SimpleBitOptionsField, self).formfield(**defaults)", "def make_form(self):", "def get_form(self, form_id):\n\t\treturn Form(form_id, self.user_id, self.site_id)", "def create_form(data, form_class, instance):\n if instance:\n form = form_class(instance=instance)\n if data:\n form = form_class(data, instance=instance)\n else:\n form = form_class()\n if data:\n form = form_class(data)\n return form", "def __init__(self, request=None, *args, **kwargs):\n # self.request = request\n\n super(RegistrationForm, self).__init__(*args, **kwargs)", "def register(self, form):\n new_user = self.create_inactive_user(form)\n signals.user_registered.send(\n sender=self.__class__, user=new_user, request=self.request\n )\n return new_user", "def get_formset(self, formset_class=None):\n if formset_class is None:\n formset_class = self.get_formset_class()\n return formset_class(**self.get_formset_kwargs())", "def formfield(self, **kwargs):\n defaults = {\n 'form_class': LocalizedIntegerFieldForm\n }\n\n defaults.update(kwargs)\n return super().formfield(**defaults)", "def user_cls(self):\n return self.get_entity_cls('user')", "def get_form(self, request, obj=None, **kwargs):\n if not obj:\n kwargs['form'] = VPNClientCreationForm\n return super().get_form(request, obj, **kwargs)", "def name(self): \n return \"search_form\"", "def make_new_user():\n return render_template('users/new_user_form.html')", "def form_tweaks(self):\n pass", "def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n self.fields['first_name'].required = True\n self.fields['password'].widget = forms.PasswordInput() \n\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(MyAuthenticationForm, self).__init__(*args, **kwargs)\n\n # Set the label for the \"username\" field.\n UserModel = get_user_model()\n self.email_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)\n if self.fields['email'].label is None:\n self.fields['email'].label = capfirst(self.email_field.verbose_name)", "def get_email_class():\n fields = {\n 'name': models.CharField(max_length=256),\n '__unicode__': lambda self: self.name\n }\n options = {\n 'ordering': ['name'],\n }\n for language_key, language_name in settings.LANGUAGES:\n fields['html_{}'.format(language_key)] = models.TextField(blank=True, null=True)\n return create_model('Email', fields, 'wedding', options=options)", "def boots_form(obj):\n\n if isinstance(obj, BaseForm):\n return form(obj)\n\n elif isinstance(obj, BoundField):\n return boots_field(obj)\n\n else:\n raise Exception, 'Bootstrap template tag recieved a non form or field object'", "def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n self.fields[\"groups\"].label = \"roles\"", "def get_class(self):\n\t\treturn self.CLASS", "def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]", "def form(self):\n return security.get_field_storage()", "def class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"class_name\")", "def get_form_for_model(\n model,\n form_class=WagtailAdminModelForm,\n **kwargs,\n):\n\n # This is really just Django's modelform_factory, tweaked to accept arbitrary kwargs.\n\n meta_class_attrs = kwargs\n meta_class_attrs[\"model\"] = model\n\n # The kwargs passed here are expected to come from EditHandler.get_form_options, which collects\n # them by descending the tree of child edit handlers. If there are no edit handlers that\n # specify form fields, this can legitimately result in both 'fields' and 'exclude' being\n # absent, which ModelForm doesn't normally allow. In this case, explicitly set fields to [].\n if \"fields\" not in meta_class_attrs and \"exclude\" not in meta_class_attrs:\n meta_class_attrs[\"fields\"] = []\n\n # Give this new form class a reasonable name.\n class_name = model.__name__ + \"Form\"\n bases = (form_class.Meta,) if hasattr(form_class, \"Meta\") else ()\n Meta = type(\"Meta\", bases, meta_class_attrs)\n form_class_attrs = {\"Meta\": Meta}\n\n metaclass = type(form_class)\n return metaclass(class_name, (form_class,), form_class_attrs)", "def listfield_formfield_method(self, **kwargs):\r\n #Largely lifted straight from Field.formfield() in django.models.__init__.py\r\n defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}\r\n if self.has_default(): #No idea what this does\r\n if callable(self.default):\r\n True\r\n defaults['initial'] = self.default\r\n defaults['show_hidden_initial'] = True\r\n else:\r\n defaults['initial'] = self.get_default()\r\n #if self.choices:\r\n if self.choices:\r\n form_field_class = forms.MultipleChoiceField\r\n defaults['choices'] = self.choices\r\n else:\r\n form_field_class = ListFormField\r\n defaults.update(**kwargs)\r\n return form_field_class(**defaults)", "def show_register_form():\n return render_template(\"register-form.html\")", "def __init__(self, request=None, *args, **kwargs):\r\n self.request = request\r\n self.user_cache = None\r\n super(CustomizeAuthenticationForm, self).__init__(*args, **kwargs)\r\n\r\n # Set the label for the \"username\" field.\r\n UserModel = get_user_model()\r\n self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)\r\n if self.fields['username'].label is None:\r\n self.fields['username'].label = capfirst(self.username_field.verbose_name)", "def registrationView(request):\n \n form = SmellerModelForm()\n form2 = SmellerModelForm2()\n \n return render(request, 'SmellGuessTemplate/registration.html', {'current_date': datetime.now(), 'form': form, 'form2': form2})" ]
[ "0.8111784", "0.770102", "0.74282926", "0.72233987", "0.71631217", "0.7097246", "0.7082445", "0.696311", "0.692484", "0.6767896", "0.67418265", "0.66456175", "0.66020036", "0.64767134", "0.64665145", "0.64210325", "0.63494134", "0.6314577", "0.62687373", "0.6240669", "0.6233663", "0.6218351", "0.6175523", "0.6134979", "0.6127915", "0.611254", "0.5986515", "0.5926572", "0.58976483", "0.5886312", "0.5872859", "0.58338857", "0.58338857", "0.5832761", "0.5804261", "0.5803808", "0.57744175", "0.5755462", "0.5754948", "0.57470006", "0.5722564", "0.5707954", "0.5704913", "0.5694639", "0.5609122", "0.5601415", "0.55768293", "0.556893", "0.55426854", "0.5536171", "0.5517594", "0.5489994", "0.5458909", "0.5457085", "0.5422828", "0.54221654", "0.5414593", "0.54130596", "0.53964347", "0.53803694", "0.5366187", "0.5362144", "0.5358248", "0.5346385", "0.5309946", "0.53081924", "0.5306183", "0.5282719", "0.52780885", "0.5271509", "0.5253488", "0.5252767", "0.5246927", "0.52323854", "0.5224555", "0.52168226", "0.5215213", "0.521193", "0.5209969", "0.5206189", "0.5187087", "0.51803064", "0.5173953", "0.51705986", "0.51682484", "0.5149056", "0.5149011", "0.5142962", "0.5134487", "0.5130868", "0.51293796", "0.51283216", "0.5118006", "0.5114531", "0.5091923", "0.5070309", "0.506654", "0.5060644", "0.50499016", "0.5047768" ]
0.7956627
1
Creates the sum tree data structure for the given replay capacity.
def __init__(self, capacity): assert isinstance(capacity, int) if capacity <= 0: raise ValueError( 'Sum tree capacity should be positive. Got: {}'.format(capacity)) self.nodes = [] self.depth = int(np.ceil(np.log2(capacity))) self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx self.high_idx = capacity + self.low_idx self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision. self.capacity = capacity self.highest_set = 0 self.max_recorded_priority = 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity, tuple, alpha=0.6, beta=0.4):\n self.tree = SumTree(capacity)\n self.capacity = capacity\n self.alpha = alpha\n self.beta = beta\n self.tuple = tuple", "def __init__(self, memory_size, batch_size, alpha):\n self.tree = sum_tree.SumTree(memory_size)\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.alpha = alpha", "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def __init__(self, capacity: int):\n self._pax_with_carry_on = PaxStack()\n self._pax_without_carry_on = PaxStack()\n self._capacity = capacity\n self._current_pax = 0", "def __init__(self, capacity):\n self.capacity = capacity\n self.map = {}\n self.head = self.Node(0, 0)\n self.tail = self.Node(0, 0)\n self.head.next = self.tail\n self.tail.pre = self.head\n self.cnt = 0", "def __init__(self, memory_size, alpha):\n self.tree = SumTree(memory_size)\n self.memory_size = memory_size\n self.alpha = alpha\n self.bonus_priority = 999 # add bonus priority for transitions that were never sampled\n self.epsilon_priority = 0.000001\n if self.alpha == 0: # revert to full uniform\n self.bonus_priority = 0", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def __init__(self, size):\n self.size = size\n self.queue = []\n self.sum = 0", "def __init__(self, size):\n\n self._root = Node()\n size_left = int(size/2)\n # Initialization of the tree\n self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[\n self._root.right = self._createSubtree(self._root, size_left, size)\n self._max_priority = 1", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.data = [None] * capacity\n self.head = 0\n self.tail = 0", "def __init__(self, size: int):\n self.size = size\n self.queue = deque()\n self.widowSum = 0", "def __init__(self, size):\n self.sum = 0\n self.nums = 0\n self.size = size\n self.deq = collections.deque()", "def __init__(self, memory_size, batch_size, alpha, mu, seed):\n self.tree = SumTree(memory_size)\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.alpha = alpha\n self.__e = 0.01\n self.__mu = mu\n np.random.seed(seed)", "def __init__(self, size: int):\n self.size = size \n self.tracker = deque()\n self.sum = 0", "def __init__(self, size):\n self.size = size\n self.queue = deque([])\n self.cur_sum = 0", "def __init__ (self, size: int):\n self.size = size\n self.queue = []\n self.sum = 0", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def nocache_create_equal_size_subtrees():\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost", "def __init__(self, buffer_size, batch_size, random_seed=1234):\n self.tree = PER.sum_tree.SumTree(buffer_size)\n self.batch_size = batch_size\n self.s_prev, self.s_ori_prev, self.a_prev, self.r_prev = None, None, None, None\n\n # p_i = (p + e)^a\n self.e = 0.00000001\n self.a = 0.6 # values suggested by authors\n self.beta = 0.4 # to 1 - values suggested by authors\n\n self.previous_index = None # TODO\n self.prevQ_s_t_a_t_ = None\n random.seed(random_seed)", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost", "def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.capacity = 10000\n self.table = [[] for _ in range(self.capacity)]", "def __init__(self):\n self._size = 0\n self._array = [None] * BinaryTree.DEFAULT_CAPACITY", "def __init__(self, capacity):\n self.memory = deque([], maxlen=capacity)", "def __init__(self, size: int):\n self.__data = []\n for i in range(0, size):\n self.__data.append(self.Node(pre_index=i))", "def __init__(self, size, alpha):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0", "def __init__(self, capacity: int, function) -> None:\n self.buckets = DynamicArray()\n for _ in range(capacity):\n self.buckets.append(LinkedList())\n self.capacity = capacity\n self.hash_function = function\n self.size = 0", "def __init__(self, capacity: int, storage_unit: str = \"timesteps\", **kwargs):\n super().__init__(capacity=capacity, storage_unit=\"timesteps\", **kwargs)\n self.replay_batches = []\n self.replay_index = 0", "def __init__(self, capacity=10):\n\n self._board = [None] * capacity # list of 10 None elements\n self._n = 0 # number of actual entries", "def __init__(self, buffer_size, batch_size, random_seed=1234):\n self.tree = PER.sum_tree.SumTree(buffer_size)\n self.batch_size = batch_size\n self.episode = []\n self.s_prev, self.s_ori_prev, self.a_prev, self.r_prev, self.v_prev, self.distribution, self.mask = \\\n None, None, None, None, None, None, None\n\n # p_i = (p + e)^a\n self.e = 0.00000001\n self.a = 0.6 # values suggested by authors\n self.beta = 0.4 # to 1 - values suggested by authors\n\n self.previous_index = None # TODO\n random.seed(random_seed)", "def expand(self, policy):\n if self.children != {}: return\n actionWeights = policy(self.state)\n for action in actionWeights:\n succ = self.state.succ(self.state.player, action)\n self.children[action] = TreeNode(succ, actionWeights[action], self)", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def __init__(self, action_size, buffer_size, batch_size, buf_alpha, seed, device):\n\n self.action_size = action_size\n self.memory = []#deque(maxlen=buffer_size)\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.next_ndx = 0\n\n assert buf_alpha >= 0\n self.alpha = buf_alpha\n #self.experience =namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed =random.seed(seed)\n self.device = device\n\n # Num of nodes of a tree, should be in orders of '2'\n iter_capacity = 1\n while iter_capacity < buffer_size:\n iter_capacity *= 2\n\n self.iter_sum = SumSegmentTree(iter_capacity)\n self.iter_min = MinSegmentTree(iter_capacity)\n self.max_p = 1.0", "def __init__(self, parent):\r\n self.parent = parent[:]\r\n self.size = len(self.parent)\r\n self.children = {}\r\n self.data = {}\r\n for i in xrange(self.size):\r\n self.children[i] = []\r\n self.nchildren = [1] * self.size\r\n # iya\r\n for i in xrange(1, self.size):\r\n self.children[self.parent[i]].append(i)\r\n for i in xrange(self.size - 1, 0, -1):\r\n self.nchildren[self.parent[i]] += self.nchildren[i]", "def __init__(self, size):\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0", "def __init__(self, size = 100):\n\t\tself.__parent = [i for i in range(size)]\n\t\tself.__rank = [0 for _ in range(size)]", "def __init__(self, size: int):\n self.size = size\n self.queue = [0] * self.size # queue w/ len of size\n self.head = self.window_sum = 0\n self.count = 0", "def knapsack(items, capacity):\r\n pass", "def __init__(self, size):\n self.moveSize = size\n self.items = deque()\n self.sum = 0", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def __init__(self, size: int, values: List[int]=None) -> int:\n self.tree = [el for el in values] if values else ([0] * (size + 1))\n self.size = (len(values)) if values else (size + 1)\n\n if values:\n for i in range(1, self.size):\n parent = i + self._lsb(i)\n if parent < self.size:\n self.tree[parent] += self.tree[i]", "def __init__(self, size: int):\n self.q = deque()\n self.max_size = size\n self.sum = 0.0", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def __init__(self):\n INIT_CAPACITY = 8\n LOAD_FACTOR = 2 / 3\n self.capacity = INIT_CAPACITY\n self.size = 0\n self.slots = [None] * INIT_CAPACITY\n self.load_factor = LOAD_FACTOR", "def __init__(self, size: int):\n self.nums = deque()\n self.size = size \n self.rsm = 0", "def build(self, block_size):", "def __init__(self):\n Node.__init__(self)\n self.__counts = 0\n self.__children = dict()\n self.__children_counts = dict()", "def __init__(self, size: int, alpha: float):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha > 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n self._prio_change_stats = WindowStat(\"reprio\", 1000)", "def __init__(self,size=100):\n self.__parent = [ i for i in range(size) ]\n self.__size = [ 1 for i in range(size) ]\n self.__rank = [ 0 for i in range(size) ]", "def __init__(self, k):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self, k: int):\n self.front = 0\n self.rear = 0\n self.capacity = k + 1\n self.arr = [0 for _ in range(self.capacity)]", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def resize(self, new_capacity):\n # Your code here\n self.capacity = new_capacity\n\n # make new array to store the current self.hash_table\n # update self.hash_table to be array of size new_capacity\n # for each item in our copy array\n # self.put(item) in our newly size self.hash_table\n # if item.next is not None\n # make sure to self.put(item.next) to get all chained nodes\n\n old_storage = self.hash_table\n self.hash_table = [None] * new_capacity\n\n for i, el in enumerate(old_storage):\n if el is not None:\n self.put(el.key, el.value)\n\n curr_node = el\n\n if curr_node is not None:\n # add all chained nodes\n while curr_node.next is not None:\n curr_node = curr_node.next\n if curr_node is not None:\n self.put(curr_node.key, curr_node.value)", "def initial_population(self, size):\n return [self.target_tree] + \\\n [self.mutator.mutate(copy.deepcopy(self.target_tree))\n for i in range(size - 1)]", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def update(self, probs: torch.Tensor):\n tree, capacity = self._create_tree(probs, self.tree)\n self.tree = tree\n self.capacity = capacity", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def __init__(self, tree):\n self._tree = tree\n self._size = len(tree)\n\n self._tree.reindex()\n self._preprocess()", "def __init__(self, estimated_nodes=0, estimated_edges=0, capacity_type=np.int32, jit_build=True):\n flow_type = capacity_type if np.issubdtype(capacity_type, np.floating) else np.int32\n super().__init__(\n estimated_nodes=estimated_nodes,\n estimated_edges=estimated_edges,\n flow_type=flow_type,\n capacity_type=capacity_type,\n arc_index_type=np.int64,\n node_index_type=np.int32,\n jit_build=jit_build,\n )", "def __init__(self):\r\n self.bucket = []\r\n for i in range(4096):\r\n self.bucket.append(Node(0,0))", "def __init__(self, state, parent, action, path_cost):\n self.state = state\n self.parent = parent\n self.action = action\n self.path_cost = path_cost\n self.depth = 0\n if parent:\n self.depth = parent.depth + 1", "def create_binary_tree(depth=3):\n # The only node is root\n max_nodes, nodes = 2 ** depth - 1, 1\n queue = Queue()\n val = IntGenerator()\n root = Node(val=next(val))\n\n queue.enqueue(root)\n while nodes + 2 <= max_nodes:\n current = queue.dequeue()\n\n # Create child nodes until total num is le max_nodes\n # for specified length in binary tree\n if nodes + 2 <= max_nodes: # tree will have +2 nodes after creating children\n current.left = Node(val=next(val))\n current.right = Node(val=next(val))\n queue.enqueue(current.left)\n queue.enqueue(current.right)\n nodes += 2\n\n return root", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def __init__(self, degree):\r\n self.root = Node([], [])\r\n self.min_num_keys = degree - 1 \r\n self.max_num_keys = 2*degree - 1", "def __init__(self, k: int):\n self.capacity = k\n self.head = None\n self.tail = None\n self.count = 0", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n self.node = np.zeros(self.ntotal, dtype='int32')\n #oxx, oyy, ozz, txy, pressure\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n self.location = np.empty(self.ntotal, dtype='U8')\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def heap_update(self):\n print 'SumTree pre-update:', self.replay.tree[0].sum\n last_ixs = self.replay.last_ixs(True)\n while True:\n if len(last_ixs) == 0:\n break\n if len(last_ixs) < 10000:\n ixs = last_ixs\n last_ixs = []\n else:\n ixs = last_ixs[:10000]\n last_ixs = last_ixs[10000:]\n batch = [self.replay.tree[ix].pointer for ix in ixs]\n delta = self.get_delta(batch)\n self.get_p_weights(delta, batch, ixs)\n print 'SumTree post-update:', self.replay.tree[0].sum\n print 'SumTree updated'", "def __init__(self, k: int):\r\n self.capacity = k\r\n self.frontIndex = 0\r\n self.lastIndex = 1\r\n self.deque = [0] * self.capacity\r\n self.size = 0 # current size\r", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def __init__(self):\n self.memory = {}\n self.sum_ = {}", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def __init__(self, max_node_capacity=16):\n assert isinstance(max_node_capacity, int)\n assert max_node_capacity > 0\n self.max_node_capacity = max_node_capacity\n self.length = 0\n self.head = None\n self.tail = None", "def __init__(self, affinity, game_type, game_space, search_depth, opponent=None):\n\n super().__init__(affinity, game_type, game_space, opponent)\n self.__search_depth = search_depth\n self.nodes_expanded = 0", "def __init__(self, affinity, game_type, game_space, search_depth, opponent=None):\n\n super().__init__(affinity, game_type, game_space, opponent)\n self.__search_depth = search_depth\n self.nodes_expanded = 0", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def _dfs_assign(self, filetree):\n stack = [filetree]\n while stack:\n node = stack.pop()\n if isinstance(node, tuple) and node[0][\"packmode\"] is None:\n # all children have been seen already, assing packmode\n node = node[0] # unpack the actual node\n weights = defaultdict(int)\n for child in node[\"children\"].values():\n weights[child[\"packmode\"]] += child[\"weight\"]\n packmode, weight = max(weights.items(), key=lambda x: x[1])\n node[\"weight\"] = weight\n node[\"packmode\"] = packmode\n elif node[\"children\"]:\n # schedule that node for computation\n stack.append((node,))\n # visit all children first\n for child in node[\"children\"].values():\n stack.append(child)", "def __init__(self, k):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def _make_array(self, capacity):\n return (capacity * ctypes.py_object)()", "def new_capacity_rule(mod, prj, prd):\n return 0", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def _gen_test_tree_5():\n tree = BinaryNode(30)\n tree.right = BinaryNode(30)\n return tree", "def __init__(self):\n self._root = None\n self._size = 0\n self._my_hash = {'preorder':self.preorder, 'postorder': self.postorder, 'inorder': self.inorder,\n \"breadthfirst\": self.breadthfirst}", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def __init__(self, capacity):\n self.capacity = capacity #this is example for list implementation\n self.head = [None] * capacity #this is example for list implementation\n self.num_items = 0 #this is example for list implementation", "def _build_replay_buffer(self):\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)", "def fullIntroduceNew (self, networkSize, numNodes, time) :\n\t\tnodeList = self.createNodes(networkSize)\n\t\tself.addAppRecordDiff(nodeList)\n\t\tsessionInfo = self.sessionsFull(nodeList)\n\t\ttotal = 0\n\t\twhile self.endConditionData(nodeList) :\n\t\t\tif total == time :\n\t\t\t\tfor i in range(networkSize, networkSize + numNodes):\n\t\t\t\t\tnode = Node(str(i))\n\t\t\t\t\tnodeList.append(node)\n\t\t\t\t\tnode.addAppData(\"record\"+str(i),\"data\" + str(i), Node.ALL, Node.ALL )\n \t\t\tnode.serialize((Node.ALL, Node.ALL))\n\t\t\t\t\tsessionInfo = self.sessionsFull(nodeList)\n\t\t\tindex = random.randint(0, len(sessionInfo)-1)\n\t\t\tclient = sessionInfo[index][0]\n\t\t\tserver = sessionInfo[index][1]\n\t\t\tself.fullDBReplication(nodeList[client], sessionInfo[index][2])\n\t\t\ttotal = total + 1\n\t\treturn total", "def __init__(self):\n self._root = None\n self._size = 0\n self._curr_idx = 0\n self._depths, self._heights = None, None", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def __init__(self):\n self._root = None\n self._size = 0", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def __init__(self, k: int):\n self.queue = [0]*k\n self.headIndex = 0\n self.count = 0\n self.capacity = k", "def __init__(self, state, parent=None, action=None, path_cost=0):\n self.state = state\n self.parent = parent\n self.action = action\n self.path_cost = path_cost\n self.depth = 0\n if parent:\n self.depth = parent.depth + 1", "def __init__(self, initial, costfn=lambda node: node.path_cost):\n self.heap = []\n self.states = {}\n self.costfn = costfn\n self.add(initial)" ]
[ "0.6020408", "0.5820842", "0.5724238", "0.55728537", "0.55458015", "0.5542693", "0.5513752", "0.5476557", "0.53817534", "0.53465384", "0.53336674", "0.53084284", "0.5296613", "0.5293262", "0.5262831", "0.5256954", "0.5208593", "0.519735", "0.5179887", "0.51658785", "0.51624656", "0.5124032", "0.5124032", "0.51225233", "0.51113254", "0.51065004", "0.5095909", "0.50948924", "0.5094026", "0.50869215", "0.5079532", "0.5079528", "0.5072277", "0.5051862", "0.5044467", "0.5039624", "0.50225544", "0.5008621", "0.4997842", "0.49854472", "0.49834117", "0.4982999", "0.49746028", "0.49639958", "0.49519968", "0.49395713", "0.49366853", "0.49109983", "0.4910131", "0.49088165", "0.49000874", "0.4888422", "0.48638195", "0.4849619", "0.484711", "0.48464805", "0.48333806", "0.48227507", "0.48192292", "0.4810541", "0.48081967", "0.4796131", "0.47940937", "0.4790346", "0.47896418", "0.47845325", "0.4765943", "0.4758157", "0.4757919", "0.47562143", "0.4755983", "0.47529817", "0.47463545", "0.4739974", "0.4734465", "0.47251427", "0.47241658", "0.47241658", "0.4716732", "0.47135222", "0.46988487", "0.4694798", "0.46884847", "0.46814907", "0.46810135", "0.46787104", "0.46783796", "0.4674046", "0.46726173", "0.46712068", "0.4669191", "0.46691796", "0.46691796", "0.46691796", "0.46691796", "0.4661859", "0.46616843", "0.4661375", "0.4656472", "0.46548703" ]
0.70326006
0