blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94b7cafb4bb13b099c3d093549c75e172d5d9a29
|
6df6c9ff29fe7aed0972f935cd53337d95f6ad40
|
/ecommerce/migrations/0025_auto_20210520_1832.py
|
bf1785363566bde6081088d96b406dd367b45628
|
[] |
no_license
|
brijesh681/rentel_website
|
46acae1ceb586c2bfcc795a519061a52ffad58a2
|
ef205304c6f5f9f6ff981a8244f4237c59bc52bb
|
refs/heads/master
| 2023-06-11T21:39:24.715466
| 2021-07-05T11:44:20
| 2021-07-05T11:44:20
| 383,115,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# Generated by Django 3.1.4 on 2021-05-20 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0024_dealsofday'),
]
operations = [
migrations.RenameField(
model_name='offer',
old_name='today_selling_mrp',
new_name='discount_percent',
),
migrations.RemoveField(
model_name='offer',
name='today_discount_percent',
),
migrations.AlterField(
model_name='offer',
name='offer_type',
field=models.CharField(choices=[('Deals Of The Day', 'Deals Of The Day'), ('Festive Special', 'Festive Special'), ('Summer Collection', 'Summer Collection'), ('Winter Collection', 'Winter Collection'), ('As Seen Your Favourite', 'As Seen Your Favourite')], max_length=25),
),
migrations.DeleteModel(
name='DealsOfDay',
),
]
|
[
"80308508+brijesh681@users.noreply.github.com"
] |
80308508+brijesh681@users.noreply.github.com
|
21199e34f98bf0e139f0ff9d121b283d51ab5daf
|
ddd80f5cff588f6dcd72da90e90dccac3e545397
|
/projects/cam/camloop.py
|
eb953eab4d56292c11cc71f04143c27bcf81b902
|
[] |
no_license
|
HTaylor7486/projects
|
cd5ade1919f71f5e6f1e48d016505364dc919af5
|
8717c8a12237c095f6b9709242f7be27ac4132d1
|
refs/heads/master
| 2020-05-12T16:36:43.137135
| 2014-12-17T13:13:50
| 2014-12-17T13:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import picamera,time
def getpic(name="null"):
try:
with picamera.PiCamera() as camera:
q = "n"
while q == "n":
camera.start_preview()
time.sleep(3)
camera.capture("{0}.jpeg".format (name))
camera.stop_preview()
q = input("is the image okay? (y/n) ")
filename = ("{0}.jepg".format (name))
print("Your file is called {0}.jpeg".format (name))
return filename
except picamera.exc.PicameraMMALError:
print("Your camera is not working please connect and restart the program")
def getchar():
name = ""
while name == "":
name = input("what is your name?")
hair = ""
while not hair in ["blonde","brown","ginger","no hair"]:
hair = input ("what hair colour do you have? (blonde/brown/ginger/no hair)")
hat = ""
while not hat in ["y","n"]:
hat = input("do you have a hat? (y/n)")
eye = ""
while not eye in ["green","brown","blue"] :
eye = input("what is your eye colour")
gender = ""
while not gender in ["m","f"] :
gender = input("what is your gender?(m/f)")
fhair = ""
while not fhair in ["y","n"]:
fhair = input("do you have facial hair?(y/n)")
glass = ""
while not glass in ["y","n"]:
glass = input("do you have glasses?(y/n)")
charprof = [name,hair,hat,eye,gender,fhair,glass]
getpic(name)
return charprof
|
[
"htaylor7486@sohamcollege.org.uk"
] |
htaylor7486@sohamcollege.org.uk
|
67b04f877b89acc6dfc9c7553fba773c72403679
|
5fcefc2dff07daa503c95553ee2273403a57a2e2
|
/catkin_ws/build/my_turtle_whitcomb/catkin_generated/pkg.develspace.context.pc.py
|
a59f8f82da051a204e8bbeeee1c1ea927f6fcba9
|
[] |
no_license
|
StephenWhit/TurtleBot
|
a5d5d6bb3fb082e2afff90b3fe7df005ef650bb8
|
61640d480f69b9f3656fc1cba257a073ed8ef3e5
|
refs/heads/master
| 2020-04-01T02:52:51.074586
| 2018-10-12T19:45:15
| 2018-10-12T19:45:15
| 152,799,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/viki/catkin_ws/devel/include;/home/viki/catkin_ws/src/my_turtle_whitcomb/include".split(';') if "/home/viki/catkin_ws/devel/include;/home/viki/catkin_ws/src/my_turtle_whitcomb/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;roscpp;rospy;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmy_turtle_whitcomb".split(';') if "-lmy_turtle_whitcomb" != "" else []
PROJECT_NAME = "my_turtle_whitcomb"
PROJECT_SPACE_DIR = "/home/viki/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"stephenwhit897@gmail.com"
] |
stephenwhit897@gmail.com
|
6d440df45f5272e972930a42b1a331ba016a59be
|
c38ad398d5909eade726fa1c2849b0cd124ef9b7
|
/rltime/env_wrappers/switching_env_wrapper.py
|
f34a2bed13d750ddcd9154f6f00f40a58290fdd5
|
[
"Apache-2.0"
] |
permissive
|
frederikschubert/rltime
|
612318e9ff8702e6775193b6261ea6a83b1d38fd
|
d1722ffd4cf7b4599655b8d9c64abc243919afc9
|
refs/heads/master
| 2020-09-11T02:30:51.074875
| 2019-12-16T10:44:23
| 2019-12-16T10:44:23
| 221,911,500
| 0
| 0
|
Apache-2.0
| 2019-11-15T11:34:33
| 2019-11-15T11:34:33
| null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from typing import List
import gym
import numpy as np
from gym import spaces
class SwitchingWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, env_index: int):
super().__init__(env)
self.env_index = env_index
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return (
observation,
reward,
done,
{**info, **{"env_index": self.env_index}},
)
|
[
"frederik.schubert@mailbox.org"
] |
frederik.schubert@mailbox.org
|
0176c8e8f9f456d2c8194d846412d68db7679af2
|
e9552e0e7960a8b04ec2f3e4889d51ffb1e5318c
|
/td/client.py
|
5732f13556e75132f0fc37d1b75f711f90dcd8fc
|
[] |
no_license
|
webclinic017/ethan_trade_bot
|
32d76270f18f339c7a116b83128a4954669711f6
|
9b78e216be38dc9dd709d5e0bcc936ea8886b751
|
refs/heads/main
| 2023-06-14T19:45:38.614865
| 2021-07-03T07:10:34
| 2021-07-03T07:10:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76,843
|
py
|
import os
import time
import json
import datetime
import requests
import urllib.parse
import dateutil.parser
from td.stream import TDStreamerClient
class TDClient():
'''
TD Ameritrade API Client Class.
Implements OAuth 2.0 Authorization Code Grant workflow, handles configuration
and state management, adds token for authenticated calls, and performs request
to the TD Ameritrade API.
'''
def __init__(self, **kwargs):
'''
Initializes the session with default values and any user-provided overrides.
The following arguments MUST be fspecified at runtime or else initalization
will fail.
NAME: consumer_id
DESC: The Consumer ID assigned to you during the App registration. This can
be found at the app registration portal.
NAME: account_number
DESC: This is the account number for your main TD Ameritrade Account.
NAME: account_password
DESC: This is the account password for your main TD Ameritrade Account.
NAME: redirect_uri
DESC: This is the redirect URL that you specified when you created your
TD Ameritrade Application.
'''
# define the configuration settings.
self.config = {'consumer_id': None,
'account_number': None,
'account_password': None,
'redirect_uri': None,
'resource':'https://api.tdameritrade.com',
'api_version':'/v1',
'cache_state': True,
'authenticaiton_url':'https://auth.tdameritrade.com',
'auth_endpoint':'https://auth.tdameritrade.com' + '/auth?',
'token_endpoint':'https://api.tdameritrade.com' + '/v1' + '/oauth2/token',
'refresh_enabled': True}
# This serves as a mechanism to validate input parameters for the different endpoint arguments.
self.endpoint_arguments = {
'search_instruments':{'projection': ['symbol-search', 'symbol-regex', 'desc-search', 'desc-regex', 'fundamental']},
'get_market_hours':{'markets':['EQUITY', 'OPTION', 'FUTURE', 'BOND', 'FOREX']},
'get_movers':{'market':['$DJI', '$COMPX', '$SPX.X'],
'direction':['up','down'],
'change':['value','percent']},
'get_user_principals':{'fields':['streamerSubscriptionKeys', 'streamerConnectionInfo', 'preferences', 'surrogateIds']}
}
# loop through the key word arguments.
for key in kwargs:
# there may be a chance an unknown argument was pass through. Print a warning if this is the case.
if key not in self.config:
print("WARNING: The argument, {} is an unkown argument.".format(key))
raise KeyError('Invalid Argument Name.')
# update the configuration settings so they now contain the passed through value.
self.config.update(kwargs.items())
# call the state_manager method and update the state to init (initalized)
self.state_manager('init')
# define a new attribute called 'authstate' and initalize it to '' (Blank). This will be used by our login function.
self.authstate = False
# Initalize the client with no streaming session.
self.streaming_session = None
def __repr__(self):
'''
Defines the string representation of our TD Ameritrade Class instance.
RTYPE: String
'''
# grab the logged in state.
if self.state['loggedin']:
logged_in_state = 'True'
else:
logged_in_state = 'False'
# define the string representation
str_representation = '<TDAmeritrade Client (logged_in = {}, authorized = {})>'.format(logged_in_state, self.authstate)
return str_representation
def headers(self, mode = None):
'''
Returns a dictionary of default HTTP headers for calls to TD Ameritrade API,
in the headers we defined the Authorization and access token.
NAME: mode
DESC: Defines the content-type for the headers dictionary.
TYPE: String
'''
# grab the access token
token = self.state['access_token']
# create the headers dictionary
headers = {'Authorization' : f'Bearer {token}'}
if mode == 'application/json':
headers['Content-type'] = 'application/json'
return headers
def api_endpoint(self, url):
'''
Convert relative endpoint (e.g., 'quotes') to full API endpoint.
NAME: url
DESC: The URL that needs conversion to a full endpoint URL.
TYPE: String
RTYPE: String
'''
# if they pass through a valid url then, just use that.
if urllib.parse.urlparse(url).scheme in ['http', 'https']:
return url
# otherwise build the URL
return urllib.parse.urljoin( self.config['resource'] + self.config['api_version'] + "/", url.lstrip('/'))
def state_manager(self, action):
'''
Manages the self.state dictionary. Initalize State will set
the properties to their default value. Save will save the
current state if 'cache_state' is set to TRUE.
NAME: action
DESC: action argument must of one of the following:
'init' -- Initalize State.
'save' -- Save the current state.
TYPE: String
'''
# define the initalized state, these are the default values.
initialized_state = {'access_token': None,
'refresh_token': None,
'access_token_expires_at': 0,
'refresh_token_expires_at':0,
'authorization_url': None,
'redirect_code': None,
'token_scope': '',
'loggedin': False}
# Grab the current directory of the client file, that way we can store the JSON file in the same folder.
# dir_path = os.path.expanduser("~")
# filename = 'TDAmeritradeState.json'
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = 'TDAmeritradeState.json'
file_path = os.path.join(dir_path, filename)
# if the state is initalized
if action == 'init':
self.state = initialized_state
# if they allowed for caching and the file exist, load the file.
if self.config['cache_state'] and os.path.isfile(file_path):
with open(file_path, 'r') as fileHandle:
self.state.update(json.load(fileHandle))
# if they didnt allow for caching delete the file.
elif not self.config['cache_state'] and os.path.isfile(os.path.join(dir_path, filename)):
os.remove(file_path)
# if they want to save it and have allowed for caching then load the file.
elif action == 'save' and self.config['cache_state']:
with open(file_path, 'w') as fileHandle:
# build JSON string using dictionary comprehension.
json_string = {key:self.state[key] for key in initialized_state}
json.dump(json_string, fileHandle)
def login(self):
'''
Ask the user to authenticate themselves via the TD Ameritrade Authentication Portal. This will
create a URL, display it for the User to go to and request that they paste the final URL into
command window.
Once the user is authenticated the API key is valide for 90 days, so refresh tokens may be used
from this point, up to the 90 days.
'''
# if caching is enabled then attempt silent authentication.
if self.config['cache_state']:
# if it was successful, the user is authenticated.
if self.silent_sso():
# update the authentication state
self.authstate = 'Authenticated'
return True
# update the authentication state
self.authstate = 'Authenticated'
# prepare the payload to login
data = {'response_type': 'code',
'redirect_uri': self.config['redirect_uri'],
'client_id': self.config['consumer_id'] + '@AMER.OAUTHAP'}
# url encode the data.
params = urllib.parse.urlencode(data)
# build the full URL for the authentication endpoint.
url = self.config['auth_endpoint'] + params
# set the newly created 'authorization_url' key to the newly created url
self.state['authorization_url'] = url
# aks the user to go to the URL provided, they will be prompted to authenticate themsevles.
print('Please go to URL provided authorize your account: {}'.format(self.state['authorization_url']))
# ask the user to take the final URL after authentication and paste here so we can parse.
my_response = input('Paste the full URL redirect here: ')
# store the redirect URL
self.state['redirect_code'] = my_response
# this will complete the final part of the authentication process.
self.grab_access_token()
def logout(self):
'''
Clears the current TD Ameritrade Connection state.
'''
# change state to initalized so they will have to either get a
# new access token or refresh token next time they use the API
self.state_manager('init')
def grab_access_token(self):
'''
Access token handler for AuthCode Workflow. This takes the
authorization code parsed from the auth endpoint to call the
token endpoint and obtain an access token.
'''
# Parse the URL
url_dict = urllib.parse.parse_qs(self.state['redirect_code'])
# Convert the values to a list.
url_values = list(url_dict.values())
# Grab the Code, which is stored in a list.
url_code = url_values[0][0]
# define the parameters of our access token post.
data = {'grant_type':'authorization_code',
'client_id':self.config['consumer_id'],
'access_type':'offline',
'code':url_code,
'redirect_uri':self.config['redirect_uri']}
# post the data to the token endpoint and store the response.
token_response = requests.post(url = self.config['token_endpoint'], data = data, verify = True)
# call the save_token method to save the access token.
self.token_save(token_response)
# update the state if the request was successful.
if token_response and token_response.ok:
self.state_manager('save')
def silent_sso(self):
'''
Attempt a silent authentication, by checking whether current access token
is valid and/or attempting to refresh it. Returns True if we have successfully
stored a valid access token.
RTYPE: Boolean
'''
# if the current access token is not expired then we are still authenticated.
if self.token_seconds(token_type = 'access_token') > 0:
return True
# if the refresh token is expired then you have to do a full login.
elif self.token_seconds(token_type = 'refresh_token') <= 0:
return False
# if the current access token is expired then try and refresh access token.
elif self.state['refresh_token'] and self.token_refresh():
return True
# More than likely a first time login, so can't do silent authenticaiton.
return False
def token_refresh(self):
'''
Refreshes the current access token.
RTYPE: Boolean
'''
# build the parameters of our request
data = {'client_id': self.config['consumer_id'] + '@AMER.OAUTHAP',
'grant_type':'refresh_token',
'access_type':'offline',
'refresh_token': self.state['refresh_token']}
# make a post request to the token endpoint
response = requests.post(self.config['token_endpoint'], data=data, verify=True)
# if there was an error go through the full authentication
if response.status_code == 401:
print('The Credentials you passed through are invalid.')
return False
elif response.status_code == 400:
print('Validation was unsuccessful.')
return False
elif response.status_code == 500:
print('The TD Server is experiencing an error, please try again later.')
return False
elif response.status_code == 403:
print("You don't have access to this resource, cannot authenticate.")
return False
elif response.status_code == 503:
print("The TD Server can't respond, please try again later.")
return False
else:
# save the token and the state, since we now have a new access token that has a new expiration date.
self.token_save(response)
self.state_manager('save')
return True
def token_save(self, response):
'''
Parses an access token from the response of a POST request and saves it
in the state dictionary for future use. Additionally, it will store the
expiration time and the refresh token.
NAME: response
DESC: A response object recieved from the `token_refresh` or `grab_access_token`
methods.
TYPE: requests.Response
RTYPE: Boolean
'''
# parse the data.
json_data = response.json()
# make sure there is an access token before proceeding.
if 'access_token' not in json_data:
self.logout()
return False
# save the access token and refresh token
self.state['access_token'] = json_data['access_token']
self.state['refresh_token'] = json_data['refresh_token']
# and the logged in status
self.state['loggedin'] = True
# store token expiration time
self.state['access_token_expires_at'] = time.time() + int(json_data['expires_in'])
self.state['refresh_token_expires_at'] = time.time() + int(json_data['refresh_token_expires_in'])
return True
def token_seconds(self, token_type = 'access_token'):
'''
Return the number of seconds until the current access token or refresh token
will expire. The default value is access token because this is the most commonly used
token during requests.
NAME: token_type
DESC: The type of token you would like to determine lifespan for. Possible values are:
'access_token'
'refresh_token'
TYPE: String
RTYPE: Boolean
'''
# if needed check the access token.
if token_type == 'access_token':
# if the time to expiration is less than or equal to 0, return 0.
if not self.state['access_token'] or time.time() >= self.state['access_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(self.state['access_token_expires_at'] - time.time())
# if needed check the refresh token.
elif token_type == 'refresh_token':
# if the time to expiration is less than or equal to 0, return 0.
if not self.state['refresh_token'] or time.time() >= self.state['refresh_token_expires_at']:
return 0
# else return the number of seconds until expiration.
token_exp = int(self.state['refresh_token_expires_at'] - time.time())
return token_exp
def token_validation(self, nseconds = 5):
'''
Verify the current access token is valid for at least N seconds, and
if not then attempt to refresh it. Can be used to assure a valid token
before making a call to the TD Ameritrade API.
PARA: nseconds
TYPE: integer
DESC: The minimum number of seconds the token has to be valid for before
attempting to get a refresh token.
'''
if self.token_seconds(token_type = 'access_token') < nseconds and self.config['refresh_enabled']:
self.token_refresh()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ALL ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def validate_arguments(self, endpoint = None, parameter_name = None, parameter_argument = None):
'''
This will validate an argument for the specified endpoint and raise an error if the argument
is not valid. Can take both a list of arguments or a single argument.
NAME: endpoint
DESC: This is the endpoint name, and should line up exactly with the TD Ameritrade Client library.
TYPE: String
NAME: parameter_name
DESC: An endpoint can have a parameter that needs to be passed through, this represents the name of
that parameter.
TYPE: String
NAME: parameter_argument
DESC: The arguments being validated for the particular parameter name. This can either be a single value
or a list of values.
TYPE: List<Strings> OR String
EXAMPLES:
WITH NO LIST:
------------------------------------------------------------
api_endpoint = 'search_instruments'
para_name = 'projection'
para_args = 'fundamental'
self.validate_arguments(endpoint = api_endpoint,
parameter_name = para_name,
parameter_argument = para_args)
WITH LIST:
------------------------------------------------------------
api_endpoint = 'get_market_hours'
para_name = 'markets'
para_args = ['FOREX', 'EQUITY']
self.validate_arguments(endpoint = api_endpoint,
parameter_name = para_name,
parameter_argument = para_args)
'''
# grab the possible parameters for the endpoint.
parameters_dictionary = self.endpoint_arguments[endpoint]
# grab the parameter arguments, for the specified parameter name.
parameter_possible_arguments = parameters_dictionary[parameter_name]
# if it's a list then see if it matches any of the possible values.
if type(parameter_argument) is list:
# build the validation result list.
validation_result = [argument not in parameter_possible_arguments for argument in parameter_argument]
# if any of the results are FALSE then raise an error.
if any(validation_result):
print('\nThe value you passed through is not valid, please choose one of the following valid values: {} \n'.format(' ,'.join(parameter_possible_arguments)))
raise ValueError('Invalid Value.')
elif not any(validation_result):
return True
# if the argument isn't in the list of possible values, raise an error.
elif parameter_argument not in parameter_possible_arguments:
print('\nThe value you passed through is not valid, please choose one of the following valid values: {} \n'.upper().format(' ,'.join(parameter_possible_arguments)))
raise ValueError('Invalid Value.')
elif parameter_argument in parameter_possible_arguments:
return True
def prepare_arguments_list(self, parameter_list = None):
'''
Some endpoints can take multiple values for a parameter, this
method takes that list and creates a valid string that can be
used in an API request. The list can have either one index or
multiple indexes.
NAME: parameter_list
DESC: A list of paramater values assigned to an argument.
TYPE: List
EXAMPLE:
SessionObject.prepare_arguments_list(parameter_list = ['MSFT', 'SQ'])
'''
# validate it's a list.
if type(parameter_list) is list:
# specify the delimeter and join the list.
delimeter = ','
parameter_list = delimeter.join(parameter_list)
return parameter_list
def get_quotes(self, instruments = None):
'''
Serves as the mechanism to make a request to the Get Quote and Get Quotes Endpoint.
If one item is provided a Get Quote request will be made and if more than one item
is provided then a Get Quotes request will be made.
Documentation Link: https://developer.tdameritrade.com/quotes/apis
NAME: instruments
DESC: A list of different financial instruments.
TYPE: List
EXAMPLES:
SessionObject.get_quotes(instruments = ['MSFT'])
SessionObject.get_quotes(instruments = ['MSFT','SQ'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
instruments = self.prepare_arguments_list(parameter_list = instruments)
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'symbol':instruments}
# define the endpoint
endpoint = '/marketdata/quotes'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params=data, verify = True).json()
def get_price_history(self, symbol = None, periodType = None, period = None, startDate = None, endDate = None,
frequencyType = None, frequency = None, needExtendedHoursData = None):
'''
STILL BUILDING
NAME: symbol
DESC: The ticker symbol to request data for.
TYPE: String
NAME: periodType
DESC: The type of period to show. Valid values are day, month, year, or ytd (year to date). Default is day.
TYPE: String
NAME: period
DESC: The number of periods to show.
TYPE: Integer
NAME: startDate
DESC: Start date as milliseconds since epoch.
TYPE: Integer
NAME: endDate
DESC: End date as milliseconds since epoch.
TYPE: Integer
NAME: frequencyType
DESC: The type of frequency with which a new candle is formed.
TYPE: String
NAME: frequency
DESC: The number of the frequencyType to be included in each candle.
TYPE: Integer
NAME: needExtendedHoursData
DESC: True to return extended hours data, false for regular market hours only. Default is true
TYPE: Boolean
'''
# Validator function for get_price_history
def validate(data):
# Valid periods by periodType
valid_periods = {
'day': [1, 2, 3, 4, 5, 10],
'month': [1, 2, 3, 6],
'year': [1, 2, 3, 5, 10, 15, 20],
'ytd': [1],
}
# Valid frequencyType by period
valid_frequency_types = {
'day': ['minute'],
'month': ['daily', 'weekly'],
'year': ['daily', 'weekly', 'monthly'],
'ytd': ['daily', 'weekly'],
}
# Valid frequency by frequencyType
valid_frequencies = {
'minute': [1, 5, 10, 15, 30],
'daily': [1],
'weekly': [1],
'monthly': [1]
}
# check data to confirm that either period or date range is provided
if (data['startDate'] and data['endDate'] and not data['period']) or (not data['startDate'] and not data['endDate'] and data['period']):
# Validate periodType
if data['periodType'] not in valid_periods.keys():
print('Period Type: {} is not valid. Valid values are {}'.format(data['periodType'], valid_periods.keys()))
raise ValueError('Invalid Value')
# Validate period
if data['period'] and data['period'] not in valid_periods[data['periodType']]:
print('Period: {} is not valid. Valid values are {}'.format(data['period'], valid_periods[data['periodType']]))
raise ValueError('Invalid Value')
# Validate frequencyType by frenquency
if data['frequencyType'] not in valid_frequencies.keys():
print('frequencyType: {} is not valid. Valid values are {}'.format(data['frequencyType'], valid_frequencies.keys()))
raise ValueError('Invalid Value')
# Validate frequencyType by periodType
if data['frequencyType'] not in valid_frequency_types[data['periodType']]:
print('frequencyType: {} is not valid. Valid values for period: {} are {}'.format(data['frequencyType'], data['periodType'], valid_frequency_types[data['periodType']]))
raise ValueError('Invalid Value')
# Validate periodType
if data['frequency'] not in valid_frequencies[data['frequencyType']]:
print('frequency: {} is not valid. Valid values are {}'.format(data['frequency'], valid_frequencies[data['frequencyType']]))
raise ValueError('Invalid Value')
# TODO Validate startDate and endDate
# Recompute payload dictionary and remove any None values
return({k: v for k, v in data.items() if v is not None})
else:
print('Either startDate/endDate or period must be provided exclusively.')
raise ValueError('Invalid Value')
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'period':period,
'periodType':periodType,
'startDate':startDate,
'endDate':endDate,
'frequency':frequency,
'frequencyType':frequencyType,
'needExtendedHoursData':needExtendedHoursData}
# define the endpoint
endpoint = '/marketdata/{}/pricehistory'.format(symbol)
# validate the data
data = validate(data)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params=data, verify = True).json()
def search_instruments(self, symbol = None, projection = 'symbol-search'):
'''
Search or retrieve instrument data, including fundamental data.
Documentation Link: https://developer.tdameritrade.com/instruments/apis/get/instruments
NAME: symbol
DESC: The symbol of the financial instrument you would like to search.
TYPE: string
NAME: projection
DESC: The type of request, default is "symbol-search". The type of request include the following:
1. symbol-search
Retrieve instrument data of a specific symbol or cusip
2. symbol-regex
Retrieve instrument data for all symbols matching regex.
Example: symbol=XYZ.* will return all symbols beginning with XYZ
3. desc-search
Retrieve instrument data for instruments whose description contains
the word supplied. Example: symbol=FakeCompany will return all
instruments with FakeCompany in the description
4. desc-regex
Search description with full regex support. Example: symbol=XYZ.[A-C]
returns all instruments whose descriptions contain a word beginning
with XYZ followed by a character A through C
5. fundamental
Returns fundamental data for a single instrument specified by exact symbol.
TYPE: string
EXAMPLES:
SessionObject.search_instrument(symbol = 'XYZ', projection = 'symbol-search')
SessionObject.search_instrument(symbol = 'XYZ.*', projection = 'symbol-regex')
SessionObject.search_instrument(symbol = 'FakeCompany', projection = 'desc-search')
SessionObject.search_instrument(symbol = 'XYZ.[A-C]', projection = 'desc-regex')
SessionObject.search_instrument(symbol = 'XYZ.[A-C]', projection = 'fundamental')
'''
# first make sure that the token is still valid.
self.token_validation()
# validate argument
self.validate_arguments(endpoint = 'search_instruments', parameter_name = 'projection', parameter_argument = projection)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'symbol':symbol,
'projection':projection}
# define the endpoint
endpoint = '/instruments'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_instruments(self, cusip = None):
'''
Get an instrument by CUSIP (Committee on Uniform Securities Identification Procedures) code.
Documentation Link: https://developer.tdameritrade.com/instruments/apis/get/instruments/%7Bcusip%7D
NAME: cusip
DESC: The CUSIP code of a given financial instrument.
TYPE: string
EXAMPLES:
SessionObject.get_instruments(cusip = 'SomeCUSIPNumber')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id']}
# define the endpoint
endpoint = '/instruments'
# build the url
url = self.api_endpoint(endpoint) + "/" + cusip
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_market_hours(self, markets = None, date = None):
'''
Serves as the mechanism to make a request to the "Get Hours for Multiple Markets" and
"Get Hours for Single Markets" Endpoint. If one market is provided a "Get Hours for Single Markets"
request will be made and if more than one item is provided then a "Get Hours for Multiple Markets"
request will be made.
Documentation Link: https://developer.tdameritrade.com/market-hours/apis
NAME: markets
DESC: The markets for which you're requesting market hours, comma-separated.
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
TYPE: List<Strings>
NAME: date
DESC: The date you wish to recieve market hours for. Valid ISO-8601 formats
are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz
TYPE: String
EXAMPLES:
SessionObject.get_market_hours(markets = ['EQUITY'], date = '2019-10-19')
SessionObject.get_market_hours(markets = ['EQUITY','FOREX'], date = '2019-10-19')
'''
# first make sure that the token is still valid.
self.token_validation()
# validate argument
self.validate_arguments(endpoint = 'get_market_hours', parameter_name = 'markets', parameter_argument = markets)
# because we have a list argument, prep it for the request.
markets = self.prepare_arguments_list(parameter_list = markets)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'markets':markets,
'date':date}
# define the endpoint
endpoint = '/marketdata/hours'
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_movers(self, market = None, direction = None, change = None):
'''
Top 10 (up or down) movers by value or percent for a particular market.
Documentation Link: https://developer.tdameritrade.com/movers/apis/get/marketdata
NAME: market
DESC: The index symbol to get movers for. Can be $DJI, $COMPX, or $SPX.X.
TYPE: String
NAME: direction
DESC: To return movers with the specified directions of up or down. Valid values
are up or down
TYPE: String
NAME: change
DESC: To return movers with the specified change types of percent or value Valid
values are percent or value.
TYPE: String
EXAMPLES:
SessionObject.get_movers(market = '$DJI', direction = 'up', change = 'value')
SessionObject.get_movers(market = '$COMPX', direction = 'down', change = 'percent')
'''
# grabs a dictionary representation of our arguments and their inputs.
local_args = locals()
# we don't need the 'self' key
del local_args['self']
# first make sure that the token is still valid.
self.token_validation()
# validate arguments, before making request.
for key, value in local_args.items():
self.validate_arguments(endpoint = 'get_movers', parameter_name = key, parameter_argument = value)
# grab the original headers we have stored.
merged_headers = self.headers()
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'direction':direction,
'change':change}
# define the endpoint
endpoint = '/marketdata/{}/movers'.format(market)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
def get_options_chain(self, option_chain = None, args_dictionary = None):
'''
Get option chain for an optionable Symbol using one of two methods. Either,
use the OptionChain object which is a built-in object that allows for easy creation of the
POST request. Otherwise, can pass through a dictionary of all the arguments needed.
Documentation Link: https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains
NAME: option_chain
DESC: Represents a single OptionChainObject.
TYPE: TDAmeritrade.OptionChainObject
EXAMPLE:
from td.option_chain import OptionChain
option_chain_1 = OptionChain(args)
SessionObject.get_options_chain( option_chain = option_chain_1)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/marketdata/chains'
# build the url
url = self.api_endpoint(endpoint)
# Grab the items needed for the request.
if option_chain is not None:
# this request requires an API key, so let's add that.
option_chain.add_chain_key(key_name = 'apikey', key_value = self.config['consumer_id'])
# take the JSON representation of the string
data = option_chain._get_query_parameters()
else:
# otherwise take the args dictionary.
data = args_dictionary
# return the response of the get request.
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ACCOUNTS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_accounts(self, account = 'all', fields = None):
'''
Serves as the mechanism to make a request to the "Get Accounts" and "Get Account" Endpoint.
If one account is provided a "Get Account" request will be made and if more than one account
is provided then a "Get Accounts" request will be made.
Documentation Link: https://developer.tdameritrade.com/account-access/apis
NAME: account
DESC: The account number you wish to recieve data on. Default value is 'all'
which will return all accounts of the user.
TYPE: String
NAME: fields
DESC: Balances displayed by default, additional fields can be added here by
adding positions or orders.
TYPE: List<String>
EXAMPLES:
SessionObject.get_accounts(account = 'all', fields = ['orders'])
SessionObject.get_accounts(account = 'MyAccountNumber', fields = ['orders','positions'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
fields = self.prepare_arguments_list(parameter_list = fields)
# build the params dictionary
data = {'apikey':self.config['consumer_id'],
'fields':fields}
# if all use '/accounts' else pass through the account number.
if account == 'all':
endpoint = '/accounts'
else:
endpoint = '/accounts/{}'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE TRANSACTIONS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_transactions(self, account = None, transaction_type = None, symbol = None,
start_date = None, end_date = None, transaction_id = None):
'''
Serves as the mechanism to make a request to the "Get Transactions" and "Get Transaction" Endpoint.
If one `transaction_id` is provided a "Get Transaction" request will be made and if it is not provided
then a "Get Transactions" request will be made.
Documentation Link: https://developer.tdameritrade.com/transaction-history/apis
NAME: account
DESC: The account number you wish to recieve transactions for.
TYPE: String
NAME: transaction_type
DESC: The type of transaction. Only transactions with the specified type will be returned. Valid
values are the following: ALL, TRADE, BUY_ONLY, SELL_ONLY, CASH_IN_OR_CASH_OUT, CHECKING,
DIVIDEND, INTEREST, OTHER, ADVISOR_FEES
TYPE: String
NAME: symbol
DESC: The symbol in the specified transaction. Only transactions with the specified
symbol will be returned.
TYPE: String
NAME: start_date
DESC: Only transactions after the Start Date will be returned. Note: The maximum date range is
one year. Valid ISO-8601 formats are: yyyy-MM-dd.
TYPE: String
NAME: end_date
DESC: Only transactions before the End Date will be returned. Note: The maximum date range is
one year. Valid ISO-8601 formats are: yyyy-MM-dd.
TYPE: String
NAME: transaction_id
DESC: The transaction ID you wish to search. If this is specifed a "Get Transaction" request is
made. Should only be used if you wish to return one transaction.
TYPE: String
EXAMPLES:
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'ALL', start_date = '2019-01-31', end_date = '2019-04-28')
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'ALL', start_date = '2019-01-31')
SessionObject.get_transactions(account = 'MyAccountNumber', transaction_type = 'TRADE')
SessionObject.get_transactions(transaction_id = 'MyTransactionID')
'''
# first make sure that the token is still valid.
self.token_validation()
# default to a "Get Transaction" Request if anything else is passed through along with the transaction_id.
if transaction_id != None:
account = None
transaction_type = None,
start_date = None,
end_date = None
# if the request type they made isn't valid print an error and return nothing.
else:
if transaction_type not in ['ALL', 'TRADE', 'BUY_ONLY', 'SELL_ONLY', 'CASH_IN_OR_CASH_OUT', 'CHECKING','DIVIDEND', 'INTEREST', 'OTHER', 'ADVISOR_FEES']:
print('The type of transaction type you specified is not valid.')
return False
# grab the original headers we have stored.
merged_headers = self.headers()
# if transaction_id is not none, it means we need to make a request to the get_transaction endpoint.
if transaction_id:
# define the endpoint
endpoint = '/accounts/{}/transactions/{}'.format(account, transaction_id)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, verify = True).json()
# if it isn't then we need to make a request to the get_transactions endpoint.
else:
# build the params dictionary
data = {'type':transaction_type,
'symbol':symbol,
'startDate':start_date,
'endDate':end_date}
# define the endpoint
endpoint = '/accounts/{}/transactions'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params=data, verify = True).json()
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE USER INFOS & PREFERENCES ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_preferences(self, account = None):
'''
Get's User Preferences for a specific account.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/accounts/%7BaccountId%7D/preferences-0
NAME: account
DESC: The account number you wish to recieve preference data for.
TYPE: String
EXAMPLES:
SessionObject.get_preferences(account = 'MyAccountNumber')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/preferences'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, verify = True).json()
def get_streamer_subscription_keys(self, accounts = None):
'''
SubscriptionKey for provided accounts or default accounts.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/userprincipals/streamersubscriptionkeys-0
NAME: account
DESC: A list of account numbers you wish to recieve a streamer key for.
TYPE: List<String>
EXAMPLES:
SessionObject.get_streamer_subscription_keys(account = ['MyAccountNumber'])
SessionObject.get_streamer_subscription_keys(account = ['MyAccountNumber1', 'MyAccountNumber2'])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
accounts = self.prepare_arguments_list(parameter_list = accounts)
# define the endpoint
endpoint = '/userprincipals/streamersubscriptionkeys'
# build the params dictionary
data = {'accountIds':accounts}
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params = data, verify = True).json()
def get_user_principals(self, fields = None):
'''
Returns User Principal details.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/get/userprincipals-0
NAME: fields
DESC: A comma separated String which allows one to specify additional fields to return. None of
these fields are returned by default. Possible values in this String can be:
1. streamerSubscriptionKeys
2. streamerConnectionInfo
3. preferences
4. surrogateIds
TYPE: List<String>
EXAMPLES:
SessionObject.get_user_principals(fields = ['preferences'])
SessionObject.get_user_principals(fields = ['preferences', 'streamerConnectionInfo'])
'''
# first make sure that the token is still valid.
self.token_validation()
# validate arguments
self.validate_arguments(endpoint = 'get_user_principals', parameter_name = 'fields', parameter_argument = fields)
# grab the original headers we have stored.
merged_headers = self.headers()
# because we have a list argument, prep it for the request.
fields = self.prepare_arguments_list(parameter_list = fields)
# define the endpoint
endpoint = '/userprincipals'
# build the params dictionary
data = {'fields':fields}
# build the url
url = self.api_endpoint(endpoint)
# return the response of the get request.
return requests.get(url = url, headers=merged_headers, params = data, verify = True).json()
def update_preferences(self, account = None, dataPayload = None):
'''
Update preferences for a specific account. Please note that the directOptionsRouting and
directEquityRouting values cannot be modified via this operation.
Documentation Link: https://developer.tdameritrade.com/user-principal/apis/put/accounts/%7BaccountId%7D/preferences-0
NAME: account
DESC: The account number you wish to update preferences for.
TYPE: String
NAME: dataPayload
DESC: A dictionary that provides all the keys you wish to update. It must contain the following keys to be valid.
1. expressTrading
2. directOptionsRouting
3. directEquityRouting
4. defaultEquityOrderLegInstruction
5. defaultEquityOrderType
6. defaultEquityOrderPriceLinkType
7. defaultEquityOrderDuration
8. defaultEquityOrderMarketSession
9. defaultEquityQuantity
10. mutualFundTaxLotMethod
11. optionTaxLotMethod
12. equityTaxLotMethod
13. defaultAdvancedToolLaunch
14. authTokenTimeout
TYPE: dictionary
EXAMPLES:
SessionObject.update_preferences(account = 'MyAccountNumer', dataPayload = <Dictionary>)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the endpoint
endpoint = '/accounts/{}/preferences'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.put(url = url, headers = merged_headers, data = json.dumps(dataPayload), verify = True)
if response.status_code == 204:
return "Data successfully updated."
else:
return response.content
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE WATCHLISTS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def create_watchlist(self, account = None, name = None, watchlistItems = None):
'''
Create watchlist for specific account. This method does not verify that the symbol or asset type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/post/accounts/%7BaccountId%7D/watchlists-0
NAME: account
DESC: The account number you wish to create the watchlist for.
TYPE: String
NAME: name
DESC: The name you want to give your watchlist.
TYPE: String
NAME: watchlistItems
DESC: A list of WatchListItems object.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.create_watchlist(account = 'MyAccountNumber',
name = 'MyWatchlistName',
watchlistItems = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the endpoint
endpoint = '/accounts/{}/watchlists'.format(account)
# define the payload
payload = {"name": name, "watchlistItems": watchlistItems}
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.post(url = url, headers = merged_headers, data = json.dumps(payload) , verify = True)
if response.status_code == 201:
return "Watchlist {} was successfully created.".format(name)
else:
return response.content
def get_watchlist_accounts(self, account = 'all'):
'''
Serves as the mechanism to make a request to the "Get Watchlist for Single Account" and
"Get Watchlist for Multiple Accounts" Endpoint. If one account is provided a
"Get Watchlist for Single Account" request will be made and if 'all' is provided then a
"Get Watchlist for Multiple Accounts" request will be made.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis
NAME: account
DESC: The account number you wish to pull watchlists from. Default value is 'all'
TYPE: String
EXAMPLES:
SessionObject.get_watchlist_accounts(account = 'all')
SessionObject.get_watchlist_accounts(account = 'MyAccount1')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
if account == 'all':
endpoint = '/accounts/watchlists'
else:
endpoint = '/accounts/{}/watchlists'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def get_watchlist(self, account = None, watchlist_id = None):
'''
Returns a specific watchlist for a specific account.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/get/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number you wish to pull watchlists from.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to return.
TYPE: String
EXAMPLES:
SessionObject.get_watchlist(account = 'MyAccount1', watchlist_id = 'MyWatchlistId')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def delete_watchlist(self, account = None, watchlist_id = None):
'''
Deletes a specific watchlist for a specific account.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/delete/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number you wish to delete the watchlist from.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to delete.
TYPE: String
EXAMPLES:
SessionObject.delete_watchlist(account = 'MyAccount1', watchlist_id = 'MyWatchlistId')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.delete(url = url, headers = merged_headers, verify = True).status_code
def update_watchlist(self, account = None, watchlist_id = None, name = None, watchlistItems = None):
'''
Partially update watchlist for a specific account: change watchlist name, add to the beginning/end of a
watchlist, update or delete items in a watchlist. This method does not verify that the symbol or asset
type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/patch/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number that contains the watchlist you wish to update.
TYPE: String
NAME: watchlist_id
DESC: The ID of the watchlist you wish to update.
TYPE: String
NAME: watchlistItems
DESC: A list of the original watchlist items you wish to update and their modified keys.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.update_watchlist(account = 'MyAccountNumber',
watchlist_id = 'WatchListID',
watchlistItems = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the payload
payload = {"name": name, "watchlistItems": watchlistItems}
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.patch(url = url, headers = merged_headers, data = json.dumps(payload), verify = True).status_code
def replace_watchlist(self, account = None, watchlist_id_new = None, watchlist_id_old = None, name_new = None, watchlistItems_new = None):
'''
STILL BUILDING
Replace watchlist for a specific account. This method does not verify that the symbol or asset type are valid.
Documentation Link: https://developer.tdameritrade.com/watchlist/apis/put/accounts/%7BaccountId%7D/watchlists/%7BwatchlistId%7D-0
NAME: account
DESC: The account number that contains the watchlist you wish to replace.
TYPE: String
NAME: watchlist_id_new
DESC: The ID of the watchlist you wish to replace with the old one.
TYPE: String
NAME: watchlist_id_old
DESC: The ID of the watchlist you wish to replace.
TYPE: String
NAME: name_new
DESC: The name of the new watchlist.
TYPE: String
NAME: watchlistItems_New
DESC: The new watchlist items you wish to add to the watchlist.
TYPE: List<WatchListItems>
EXAMPLES:
WatchListItem1 = WatchListItem()
WatchListItem2 = WatchListItem()
SessionObject.replace_watchlist(account = 'MyAccountNumber',
watchlist_id_new = 'WatchListIDNew',
watchlist_id_old = 'WatchListIDOld',
name_new = 'MyNewName',
watchlistItems_new = [ WatchListItem1, WatchListItem2 ])
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
merged_headers['Content-Type'] = 'application/json'
# define the payload
payload = {"name": name_new, "watchlistId": watchlist_id_new, "watchlistItems": watchlistItems_new}
# define the endpoint
endpoint = '/accounts/{}/watchlists/{}'.format(account, watchlist_id_old)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.put(url = url, headers = merged_headers, data = json.dumps(payload), verify = True).status_code
'''
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
THIS BEGINS THE ORDERS ENDPOINTS PORTION.
----------------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------------
'''
def get_orders_path(self, account = None, max_results = None, from_entered_time = None, to_entered_time = None, status = None):
'''
Returns the orders for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/accounts/%7BaccountId%7D/orders-0
NAME: account
DESC: The account number that you want to query for orders.
TYPE: String
NAME: max_results
DESC: The maximum number of orders to retrieve.
TYPE: integer
NAME: from_entered_time
DESC: Specifies that no orders entered before this time should be returned. Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz Date must be within 60 days from today's date. 'to_entered_time'
must also be set.
TYPE: String
NAME: to_entered_time
DESC: Specifies that no orders entered after this time should be returned.Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz. 'from_entered_time' must also be set.
TYPE: String
NAME: status
DESC: Specifies that only orders of this status should be returned. Possible Values are:
1. AWAITING_PARENT_ORDER
2. AWAITING_CONDITION
3. AWAITING_MANUAL_REVIEW
4. ACCEPTED
5. AWAITING_UR_NOT
6. PENDING_ACTIVATION
7. QUEDED
8. WORKING
9. REJECTED
10. PENDING_CANCEL
11. CANCELED
12. PENDING_REPLACE
13. REPLACED
14. FILLED
15. EXPIRED
EXAMPLES:
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, from_entered_time = '2019-10-01', to_entered_time = '2019-10-10', status = 'FILLED')
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, status = 'EXPIRED')
SessionObject.get_orders_query(account = 'MyAccountID', status = 'REJECTED')
SessionObject.get_orders_query(account = 'MyAccountID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the payload
data = {"maxResults": max_results, "fromEnteredTime": from_entered_time, "toEnteredTime": to_entered_time, "status": status}
# define the endpoint
endpoint = '/accounts/{}/orders'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
def get_orders_query(self, account = None, max_results = None, from_entered_time = None, to_entered_time = None, status = None):
'''
All orders for a specific account or, if account ID isn't specified, orders will be returned for all linked accounts
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/orders-0
NAME: account
DESC: The account number that you want to query for orders, or if none provided will query all.
TYPE: String
NAME: max_results
DESC: The maximum number of orders to retrieve.
TYPE: integer
NAME: from_entered_time
DESC: Specifies that no orders entered before this time should be returned. Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz Date must be within 60 days from today's date. 'to_entered_time'
must also be set.
TYPE: String
NAME: to_entered_time
DESC: Specifies that no orders entered after this time should be returned.Valid ISO-8601 formats are:
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz. 'from_entered_time' must also be set.
TYPE: String
NAME: status
DESC: Specifies that only orders of this status should be returned. Possible Values are:
1. AWAITING_PARENT_ORDER
2. AWAITING_CONDITION
3. AWAITING_MANUAL_REVIEW
4. ACCEPTED
5. AWAITING_UR_NOT
6. PENDING_ACTIVATION
7. QUEDED
8. WORKING
9. REJECTED
10. PENDING_CANCEL
11. CANCELED
12. PENDING_REPLACE
13. REPLACED
14. FILLED
15. EXPIRED
EXAMPLES:
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, from_entered_time = '2019-10-01', to_entered_time = '2019-10-10', status = 'FILLED')
SessionObject.get_orders_query(account = 'MyAccountID', max_results = 6, status = 'EXPIRED')
SessionObject.get_orders_query(account = 'MyAccountID', status = 'REJECTED')
SessionObject.get_orders_query(account = None)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the payload
data = {"accountId": account,
"maxResults": max_results,
"fromEnteredTime": from_entered_time,
"toEnteredTime": to_entered_time,
"status": status}
# define the endpoint
endpoint = '/orders'
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, params = data, verify = True).json()
def get_order(self, account = None, order_id = None):
'''
All orders for a specific account or, if account ID isn't specified, orders will be returned for all linked accounts
Documentation Link: https://developer.tdameritrade.com/account-access/apis/get/orders-0
NAME: account
DESC: The account number that you want to query the order for.
TYPE: String
NAME: order_id
DESC: The order id.
TYPE: integer
EXAMPLES:
SessionObject.get_order(account = 'MyAccountID', order_id = 'MyOrderID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'accounts/{}/orders/{}'.format(account, order_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.get(url = url, headers = merged_headers, verify = True).json()
def cancel_order(self, account = None, order_id = None):
'''
Cancel a specific order for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/delete/accounts/%7BaccountId%7D/orders/%7BorderId%7D-0
NAME: account
DESC: The account number that you want to query the order for.
TYPE: String
NAME: order_id
DESC: The order id.
TYPE: integer
EXAMPLES:
SessionObject.cancel_order(account = 'MyAccountID', order_id = 'MyOrderID')
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'accounts/{}/orders/{}'.format(account, order_id)
# build the url
url = self.api_endpoint(endpoint)
# make the request
return requests.delete(url = url, headers = merged_headers, verify = True).json()
def place_order(self, account = None, order = None):
'''
Places an order for a specific account.
Documentation Link: https://developer.tdameritrade.com/account-access/apis/delete/accounts/%7BaccountId%7D/orders/%7BorderId%7D-0
NAME: account
DESC: The account number that you want to place the order for.
TYPE: String
NAME: order
DESC: Either a JSON string or a TDOrder object that contains the info needed for an order placement.
TYPE: String | Order
EXAMPLES:
SessionObject.place_order(account = 'MyAccountID', order = {'orderKey':'OrderValue'})
SessionObject.place_order(account = 'MyAccountID', order = <Order>)
'''
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers(mode = 'application/json')
# define the endpoint
endpoint = 'accounts/{}/orders'.format(account)
# build the url
url = self.api_endpoint(endpoint)
# make the request
response = requests.post(url = url, headers = merged_headers, data = json.dumps(order), verify = True)
if response.status_code == 201:
return "Order was successfully placed."
else:
return response.json()
def _create_token_timestamp(self, token_timestamp = None):
'''
Takes the token timestamp and converts it to the proper format
needed for the streaming API.
NAME: token_timestamp
DESC: The timestamp returned from the get_user_principals endpoint.
TYPE: String.
RTYPE: TDStream Object
'''
# First parse the date.
token_timestamp = dateutil.parser.parse(token_timestamp, ignoretz = True)
# Grab the starting point, so time '0'
epoch = datetime.datetime.utcfromtimestamp(0)
return int((token_timestamp - epoch).total_seconds() * 1000.0)
def message_key(self, account_id = None):
# first make sure that the token is still valid.
self.token_validation()
# grab the original headers we have stored.
merged_headers = self.headers()
# define the endpoint
endpoint = 'MessageKey'
url = r'https://apis.tdameritrade.com/apps/100/MessageKey?source={}'.format()
# build the url
# url = self.api_endpoint(endpoint)
# print(url)
# make the request
response = requests.get(url = url, headers = merged_headers, verify = True)
print(response.url)
def create_streaming_session(self):
'''
Creates a new streaming session that can be used to stream different data sources.
RTYPE: TDStream Object
'''
# Grab the Subscription Key
sub_key = self.get_streamer_subscription_keys()['keys'][0]['key']
# Grab the Streamer Info.
userPrincipalsResponse = self.get_user_principals(fields = ['streamerConnectionInfo'])
# Grab the timestampe.
tokenTimeStamp = userPrincipalsResponse['streamerInfo']['tokenTimestamp']
# Grab socket
socket_url = userPrincipalsResponse['streamerInfo']['streamerSocketUrl']
# Parse the token timestamp.
tokenTimeStampAsMs = self._create_token_timestamp(token_timestamp = tokenTimeStamp)
# Define our Credentials Dictionary used for authentication.
credentials = {"userid": userPrincipalsResponse['accounts'][0]['accountId'],
"token": userPrincipalsResponse['streamerInfo']['token'],
"company": userPrincipalsResponse['accounts'][0]['company'],
"segment": userPrincipalsResponse['accounts'][0]['segment'],
"cddomain": userPrincipalsResponse['accounts'][0]['accountCdDomainId'],
"usergroup": userPrincipalsResponse['streamerInfo']['userGroup'],
"accesslevel":userPrincipalsResponse['streamerInfo']['accessLevel'],
"authorized": "Y",
"timestamp": tokenTimeStampAsMs,
"appid": userPrincipalsResponse['streamerInfo']['appId'],
"acl": userPrincipalsResponse['streamerInfo']['acl']}
# Create the session
streaming_session = TDStreamerClient(websocket_url = socket_url, user_principal_data = userPrincipalsResponse, credentials = credentials)
return streaming_session
|
[
"lovetrading09@yahoo.com"
] |
lovetrading09@yahoo.com
|
1be87f33c8660ad3c54efa5eb9f2ada26d9a1e6b
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_NumPy_Products_1.txt.py
|
d720b0dffc122f94e2aecfe055e2ab67998d23f1
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
import numpy as np
arr = np.array([1, 2, 3, 4])
x = np.prod(arr)
print(x)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
a11539354e10e7fa486f5db387c8ccfbc7df1177
|
9ae4e7db77c3437331aeb95bfb58e6ab7762b3bd
|
/reside/wsgi.py
|
a957ccf1c84796bfbddab75e8747d1f4dce9e934
|
[] |
no_license
|
ShijuKAbraham/RESIDE
|
7ff190a5110f18a18805a2a636b30d7999309624
|
b67024aab94c3f218dc9bc03f727db8fde68fa9e
|
refs/heads/master
| 2022-01-23T13:12:38.499092
| 2019-07-18T17:07:44
| 2019-07-18T17:07:44
| 197,243,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for reside project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reside.settings')
application = get_wsgi_application()
|
[
"k.ashiju10@gmail.com"
] |
k.ashiju10@gmail.com
|
a0bcf1146515c5da0c64441490de32599b91f02e
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/01/14.py
|
fd0cb48a6c9d059526138c98e8ba82d309f6802b
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
#!/usr/bin/python2
### Google Code Jam template
# Futures
from __future__ import division
from __future__ import with_statement
from __future__ import print_function
## Library
# @memoized
def memoized(func):
mem = {}
def wrapped(*args):
if args not in mem:
mem[args] = func(*args)
return mem[args]
return wrapped
## Setup
from os.path import basename, splitext
# Task letter
TASK=splitext(basename(__file__))[0]
print("Task {}".format(TASK))
## Input templates
# Line as int
#int(infile.readline())
# Line as many ints
#(int(s) for s in infile.readline().split())
## Precalculation
print("Precalculation...")
from string import maketrans
src = """aozq
ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv"""
dst = """yeqz
our language is impossible to understand
there are twenty six factorial possibilities
so it is okay if you want to just give up"""
table = maketrans(src, dst)
print("Precalculation done.")
## Calculation
print("Calculation...")
with open(TASK+".in") as infile:
with open(TASK+".out",mode="wt") as outfile:
cases = int(infile.readline())
for ncase in range(cases):
print("Case #{nc}".format(nc=ncase+1))
# Perform all nessesary calculation
text = infile.readline().strip()
data = text.translate(table)
outfile.write("Case #{nc}: {data}\n".format(nc=ncase+1,data=data))
print("Calculation done.")
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
820296291f92a6cd4a69811553d480b213a822b8
|
3da80755af10c31ded473ad998efb288cd8ca662
|
/python/列表.py
|
a8e28c9139da53847c2aebe49a4c766861c67d95
|
[] |
no_license
|
xiu-du-du/akk8
|
278c2496213b9ccf9aa28695a9c78b782690c9ff
|
115ea1712d7342d9ed92d5454502d056420cfae9
|
refs/heads/master
| 2023-08-25T16:38:05.193139
| 2021-09-20T20:05:09
| 2021-09-20T20:05:09
| 403,110,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,811
|
py
|
# 当前时间:2021/9/13 18:57
"""
列表的创建
"""
lst1=['hello','world',899] # 使用[]直接创建
lst2=list(['hello','world',899,'hello']) # 使用内置函数list()
"""
列表的特点
1. 列表元素按顺序有序排序
2. 索引映射唯一个数据
3. 列表可以存储重复数据
4. 任意数据类型混存
5. 根据需要动态分配和回收内存
"""
print(id(lst2[0]))
print(id(lst2[-4]))
print(lst2.index('hello'))
print(lst2.index('hello',1))
'''
列表的查询
1. 获取列表中指定元素的索引 index()
a. 如列表中存在N个相同元素,只返回第一个元素的索引
b. 如元素再列表中不存在,则会报错
c. 可以在制定的start和end之间查找
2. 获取列表中的单个元素
a. 正向索引从0到N-1
b. 逆向索引从-N到-1
c. 指定索引不存在,则会报错
3. 获取列表中的多个元素
'''
# 切片操作
# 切出来的lst4是新的列表对象
lst3=[10,20,30,40,50,60,70,80]
lst4=lst3[1:6:1]
print(id(lst3))
print(id(lst4))
print(lst3[::-1])
print(lst3[::2])
# in 判断是否存在
# not in 判断是否不存在
print('o' in 'python')
print('s' in 'python')
print(10 in lst3)
print(100 in lst3)
print(10 not in lst3)
print(100 not in lst3)
# 遍历列表
for i in lst3:
print(i)
# 列表元素的增删改
# append() 在列表末尾添加一个元素
lst3.append(22)
lst5=lst3
print(lst3)
print(id(lst3))
print(lst5)
print(id(lst5))
# 将整个列表添加
lst3.append(lst2)
print(lst3)
# extend() 将列表中的元素添加
lst3.extend(lst2)
print(lst3)
# insert() 在指定位置添加元素
lst3.insert(1,90)
print(lst3)
# 切片 从指定位置开始切,然后添加元素
lst3[1:]=lst2
print(lst3)
# 列表删除
# remove() 从列表中移除一个元素,如果有重复,只移除第一个,不存在则报错
lst3.remove(10)
print(lst3)
# pop() 根据索引移除元素,如果不写索引则会删除末尾的一个元素,如果索引不存在,则会报错
lst3.pop(1)
print(lst3)
# 切片
# 产生新对象
lst6=lst3[1:2]
print(id(lst3))
print(id(lst6))
lst3[1:2]=[]
print(lst3)
# clear() 清空列表
lst3.clear()
print(lst3)
lst3=[10,20,30,40,50,60,70,80]
# del 删除列表对象
# 删除后需要重新定义
del lst3
# print(lst3)
# 列表的修改
# 指定索引修改元素
lst3=[10,20,30,40]
lst3[2]=500
print(lst3)
# 切片
# 指定N个元素修改
lst3[1:3]=[300,500,800,900]
print(lst3)
# 列表元素排序
# 升序
lst3.sort()
print(lst3) # 从小到大排序
lst3.sort(reverse=False)
print(lst3) # 从小到大排序
# 降序
lst3.sort(reverse=True)
print(lst3) # 从大到小
print("===="*20)
print(lst3)
lst7=sorted(lst3) # 升序
print(lst7)
lst8=sorted(lst3,reverse=True) # 降序
print(lst8)
# 列表生成式
lst=[i*2 for i in range(1,6)]
print(lst)
|
[
"cnimmor@gmail.com"
] |
cnimmor@gmail.com
|
0855f982056b6cfbc3bce6a7b99b54ca6eddc19d
|
3e367ddabaecca6b3b932c09a933f1f1c3a42190
|
/Province_data/deal_lasa_data.py
|
8244f9ad0088e2b128acd9d04dba7b2c490400b8
|
[] |
no_license
|
13033266796/Air
|
03c8c231f412888aa72c56a06ae588c675ac57f4
|
d1c1fbe4ea86280cf33d12ce07b5b387699c7a11
|
refs/heads/master
| 2022-12-15T05:28:48.027159
| 2020-05-20T03:51:25
| 2020-05-20T03:51:25
| 248,926,154
| 0
| 0
| null | 2022-12-08T04:28:45
| 2020-03-21T07:16:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
import os
import re
with open(r"F:\Python\pyCollect\Air_predict\Provice_data/original/ShangHai_2_year_data.txt","r",encoding="utf-8") as f:
data = f.readline()
line = 0
text = ""
while data:
line += 1
if line == 4:
res = re.findall(r"\S+", text)
with open(r"./original/上海_2year_data.csv","a",encoding="utf-8") as t:
t.write("上海,"+",".join(res)+"\n")
# print("***", text, "***")
text = ""
line = 1
data = data.strip()
text += " " + data
data = f.readline()
# data = data.strip()
# print(data)
# data = f.readline()
# data = data.strip()
# print(data)
# data = f.readline()
# data = data.strip()
# print(data)
# print(text)
# res = re.findall(r"\S+",text)
# print(res)
|
[
"jiechen@webot.co"
] |
jiechen@webot.co
|
e5121a53283e18ff7eca054f670b9368256d0405
|
d9684e7c80f0bab12497e3a14889ffb45e9a41e0
|
/mlshim/_version.py
|
a4f2e2fa66d2fc39b64d70bc046071977bf62b08
|
[
"MIT"
] |
permissive
|
dapperfu/mlshim
|
df9206daa592e01b50c16ddf7a6a92bd1771a802
|
4a1eea0c5ce8d973ada5609c48a4942033b2fbdc
|
refs/heads/master
| 2022-06-22T03:49:46.436570
| 2022-05-18T15:42:39
| 2022-05-18T15:42:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,580
|
py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "None"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "mlshim/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, str]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag, tag_prefix
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(
get_keywords(), cfg.tag_prefix, verbose
)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
[
"engineer@example.org"
] |
engineer@example.org
|
449a5b4d464ce12c138b35ee87635fe1817540fc
|
13d3a44447f6a7d8b0d61c2fb445fa6aa76c2f95
|
/stackdio/core/viewsets.py
|
3708da69f32348e2a5e6effb26d7be236dfe77f5
|
[
"Apache-2.0"
] |
permissive
|
stackdio/stackdio
|
6ba4ad6c2ef10a323cbd955e6d6d5bd7917c17c2
|
84be621705031d147e104369399b872d5093ef64
|
refs/heads/master
| 2021-04-09T16:36:38.220557
| 2018-08-13T18:25:29
| 2018-08-13T18:25:29
| 17,679,603
| 9
| 11
|
Apache-2.0
| 2020-03-19T17:21:45
| 2014-03-12T19:02:06
|
Python
|
UTF-8
|
Python
| false
| false
| 13,461
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class StackdioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class__.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = model_cls._meta.model_name
model_perms = self.get_model_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_model_perms(model_cls, attach_perms=True,
with_group_users=False),
lambda: get_groups_with_model_perms(model_cls, attach_perms=True),
)
# Do this as a function so we don't fetch both the user AND group permissions on each
# request
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(model_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioModelPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_model_permissions())
return response
def perform_create(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_update(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_destroy(self, instance):
model_cls = self.get_model_cls()
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()])
class StackdioModelUserPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioModelGroupPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
class StackdioObjectPermissionsViewSet(StackdioBasePermissionsViewSet):
"""
Viewset for creating permissions endpoints
"""
model_or_object = 'object'
def get_permissioned_object(self):
raise NotImplementedError('`get_permissioned_object()` must be implemented.')
def get_model_name(self):
return self.get_permissioned_object()._meta.model_name
def get_app_label(self):
ret = self.get_permissioned_object()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_object_permissions(self):
return getattr(self.get_permissioned_object(),
'object_permissions',
getattr(self, 'object_permissions', ()))
def get_queryset(self): # pylint: disable=method-hidden
obj = self.get_permissioned_object()
model_name = obj._meta.model_name
object_perms = self.get_object_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_perms(obj, attach_perms=True,
with_superusers=False, with_group_users=False),
lambda: get_groups_with_perms(obj, attach_perms=True),
)
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(object_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioObjectPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_object_permissions())
return response
def perform_create(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_update(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_destroy(self, instance):
obj = self.get_permissioned_object()
app_label = obj._meta.app_label
model_name = obj._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()],
obj)
# pylint: disable=abstract-method
class StackdioObjectUserPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioObjectGroupPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
|
[
"clark.perkins@digitalreasoning.com"
] |
clark.perkins@digitalreasoning.com
|
5efd766bb70d94a197cb80cb858d7211c005cb27
|
4de2b914e4607dd0ca7eec60b21026af6b6c4797
|
/Old_work/valdambrini_cheli_papallo_tarmati/catkin_ws/build/navigation/clear_costmap_recovery/catkin_generated/pkg.develspace.context.pc.py
|
cb8deb76dfb119ed5c90cb0df8ac2a426a6fc434
|
[] |
no_license
|
ABiondi12/project_sgn
|
5203d21f2753dcdf7c53b153324dc75bc1221549
|
570b7be0b01e7c83cb927945e532d6a2213ebf65
|
refs/heads/main
| 2023-06-18T12:59:18.337096
| 2021-07-21T10:27:08
| 2021-07-21T10:27:08
| 307,121,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/catkin_ws/src/navigation/map_server/include".split(';') if "/home/robot/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/robot/catkin_ws/devel"
PROJECT_VERSION = "1.16.2"
|
[
"petracci.francesco@gmail.com"
] |
petracci.francesco@gmail.com
|
3e8c182c30acda216edcbfacad704a01282ee8c7
|
0ea7cdf05adf5ae281d02f7e3ee8c71a5cee5cc4
|
/moblife/app_config.py
|
b0ef42f234f1809f2d99ef69351293c1b08c2ffe
|
[] |
no_license
|
groverc85/Mobile-Assistant
|
df4d4cd6ce4d3a454526536cfd8c8c77348d0eb7
|
7667e7143ca2ff6b37085f5bfea342ee09f78db0
|
refs/heads/master
| 2021-01-19T04:25:05.822978
| 2017-04-22T17:43:24
| 2017-04-22T17:43:24
| 87,370,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
class Config(object):
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'sqlite://:memory:'
class LocalDevConfig(Config):
from datetime import timedelta
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql://root:root@192.168.0.101/xms'
SQLALCHEMY_BINDS = {
'xms': 'mysql://root:root@192.168.0.101/xms',
}
SQLALCHEMY_POOL_RECYCLE = 5
SESSION_TYPE = 'filesystem'
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
SECRET_KEY = '\x82\xe2/\x1fq\xc4\xa1\xfcw\x11\xdf\xbe\x12'
# for Flask-WTF
CSRF_ENABLED = True
class RemoteDevConfig(Config):
from datetime import timedelta
DEBUG = True
SERVER_NAME = 'xms.ok-api.cn'
SQLALCHEMY_DATABASE_URI = 'mysql://xms:xmsdbroot@101.71.0.22/xms'
SQLALCHEMY_BINDS = {
'xms': 'mysql://xms:xmsdbroot@101.71.0.22/xms',
}
SQLALCHEMY_POOL_RECYCLE = 5
SESSION_TYPE = 'filesystem'
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
SECRET_KEY = '\x82\xe2/\x1fq\xc4\xa1\xfcw\x11\xdf\xbe\x12'
# for Flask-WTF
CSRF_ENABLED = True
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql://stMWFmP5H8P8j23LhDzzvSQV:ScOHl8ATsrDw00iXzT5u2qCyEhyMSpf7@sqld.duapp.com:4050/pRtvtvsLEvRpREHGnHOA'
SQLALCHEMY_POOL_RECYCLE = 5
|
[
"groverchen1994@gmail.com"
] |
groverchen1994@gmail.com
|
5370e171697af7ddf7f47b4300bf5ee5e89e9d8b
|
8acbaac388f586fb31ebc62fae5b0f262c6d6892
|
/xcheck/boolcheck.py
|
72808ac342432cbf7bb05eed3e1baa6e645d3357
|
[] |
no_license
|
JoshuaEnglish/pyxmlcheck
|
54e146fa095016557b5c86670d0ef3000e83fc7c
|
8e96b557f506cf16bf533b6744c03c1d34fe0b89
|
refs/heads/master
| 2021-01-10T16:37:30.399116
| 2014-04-08T19:09:56
| 2014-04-08T19:09:56
| 44,216,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,828
|
py
|
from core import XCheck, ET
class BoolCheck(XCheck):
"""BoolCheck(name, **kwargs)
Checks a various number of things that could be interpreted as True.
These check as True:
True, true, 1, Yes, yes, T, t, Y, y
These check as False:
False, false, 0, No, no, N, n, F, f
Attributes:
none_is_false [default True] -- allows None or NoneType to be
accepted for False.
Returns a boolean if normalized
"""
def __init__(self, name, **kwargs):
self.none_is_false = kwargs.pop('none_is_false', True)
XCheck.__init__(self, name, **kwargs)
self._object_atts.append('none_is_false')
self.as_string = False
def check_content(self, item):
ok = None
if str(item).lower() in ['true', 'yes', '1', 't', 'y']:
ok = True
self.normalize_content(True)
if str(item).lower() in ['false', 'no', '0', 'f', 'n']:
ok = True
self.normalize_content(False)
if item is None or str(item).lower().strip() == 'none':
if self.none_is_false:
ok = True
self.normalize_content(False)
else:
ok = False
raise self.error, "BoolCheck cannot accept None"
if ok is None:
ok = False
raise self.error, "Boolean checker cannot check %s" % item
return ok
def normalize_content(self, item):
if str(item).lower() in ['true', 'yes', '1', 't', 'y']:
self._normalized_value = True
if str(item).lower() in ['false', 'no', '0', 'f', 'n']:
self._normalized_value = False
if self.as_string:
self._normalized_value = str(self._normalized_value)
def __call__(self, item, **kwargs):
self.as_string = kwargs.pop('as_string', False)
if self.as_string:
kwargs['normalize'] = True
return XCheck.__call__(self, item, **kwargs)
def dummy_value(self):
return 'False'
import unittest
class BoolCheckTC(unittest.TestCase):
def setUp(self):
self.b = BoolCheck('flag')
def tearDown(self):
del self.b
def test_valid_true(self):
"BoolCheck() accepts several values for 'true'"
for x in [True, 'true', 'True', 'TRUE', 't','T',
'1',1,'y','Y','YES','yes','Yes']:
self.assertTrue(self.b(x))
def test_valid_false(self):
"Boolcheck() accepts several values for 'false'"
for x in [False, 'false','False','FALSE',
'no','n','NO','N','f','F',0,'0']:
self.assertTrue(self.b(x))
def test_normalized_true(self):
for x in [True, 'true', 'True', 'TRUE', 't','T',
'1',1,'y','Y','YES','yes','Yes']:
self.assertTrue(self.b(x, normalize=True))
def test_normalized_false(self):
for x in [False, 'false','False','FALSE',
'no','n','NO','N','f','F',0,'0']:
self.assertFalse(self.b(x, normalize=True))
def test_none_as_false(self):
"BoolCheck() accepts NoneType if none_is_false is True"
self.b.none_is_false=True
for x in [None, 'none','None','NONE']:
self.assertTrue(self.b(x))
self.assertFalse(self.b(x, normalize=True))
def test_fail_without_none_as_false(self):
"BoolCheck() fails if NoneType and none_is_false is False"
self.b.none_is_false = False
for x in [None, 'none','None','NONE']:
self.assertRaises(self.b.error, self.b, x)
def testPassWithValidString(self):
"BoolCheck() accepts a variety of positive and negative strings"
for x in ['true','yes','1','t','y','false','no','0','f','n']:
self.assertTrue(self.b(x))
self.assertTrue(self.b(x.upper()))
self.assertTrue(self.b(x.title()))
def testPassWithXMLText(self):
"BoolCheck() accepts xml-formatting string"
for x in ['true','yes','1','t','y','false','no','0','f','n']:
self.assertTrue(self.b('<flag>%s</flag>' % x))
def testPassWithElement(self):
"BoolCheck() accepts xml-formatting string"
for x in ['true','yes','1','t','y','false','no','0','f','n']:
self.assertTrue(self.b(ET.fromstring('<flag>%s</flag>' % x) ) )
def test_as_string(self):
for x in ['true','yes','1','t','y', 'TRUE', 'YES', 'Y', True]:
self.assertEqual(self.b(x, as_string=True), 'True')
for x in ['false','no','0','f','n','FALSE', 'F','N','NO', False]:
self.assertEqual(self.b(x, as_string=True), 'False')
if __name__=='__main__':
unittest.main(verbosity=1)
|
[
"joshua.r.english@gmail.com"
] |
joshua.r.english@gmail.com
|
85f1ce4737c8836e7c1b35cc884dbb17e7877313
|
9a28f6bf8ba177ced8c498d04d3115d8f5ccdf7b
|
/ArkalysOS/urls.py
|
ba21fe26978e15c9603a83ec0bf4721cfaf8bbea
|
[] |
no_license
|
owinckle/Arkalys-OS
|
0fabc396a4d91e0267a0beac762e519c47fe312b
|
964de2005bb40fe8bfb37fbafeb1028047d99b02
|
refs/heads/main
| 2023-06-25T01:41:19.322400
| 2021-07-19T17:28:25
| 2021-07-19T17:28:25
| 384,285,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
# Frontend
path('dashboard/', include('Ark.urls')),
# Modules
path('api/user-settings/', include('UserSettings.urls')),
path('api/contacts/', include('Contacts.urls')),
path('api/invoicing/', include('Invoicing.urls')),
path('api/calendar/', include('Calendar.urls')),
]
|
[
"oceanwinckler.rem@gmail.com"
] |
oceanwinckler.rem@gmail.com
|
eb8c6bcb6bb79cc466e64cbc75daf73337c0fc54
|
e35c571a240b6483c1b66ebd8bd2a587bec873de
|
/server/send_file.py
|
2e172ee9cc61c6734b5523c03afbeac111316e3f
|
[] |
no_license
|
agdhruv/distributed-file-system
|
edac1527719368c7ca308129785d2d11b4718025
|
fb89a7cc2d1816869518c52ec3ab143c35331603
|
refs/heads/master
| 2020-03-14T17:52:18.956573
| 2018-05-05T18:35:26
| 2018-05-05T18:35:26
| 131,729,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,168
|
py
|
import socket # Import socket module
import os
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12346 # Reserve a port for your service.
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection.
print 'Server listening...'
while True:
c, addr = s.accept() # Establish connection with client.
print '\n[+] Got connection from', addr
c.send('Hello client!')
request = c.recv(1)
if request == '1':
with open("myIndex/index.txt", 'rb') as f:
c.send("\nYour files on the server are ('/' means updating):\n")
l = f.read(1024)
while (l):
c.send(l)
print "Sent", repr(l)
l = f.read(1024)
elif request == '0':
header = ''
# create header--basically the filename from the source
while True:
data = c.recv(1)
if data == '\n':
break
header += data
file_exists = 'no'
with open("myIndex/index.txt", 'r') as f:
for line in f:
if line.rstrip('\n') == header:
file_exists = 'yes'
break
elif line.rstrip('\n').rstrip('/') == header:
file_exists = 'updating'
break
c.send(file_exists)
if file_exists == 'yes':
# create connection with the node to fetch the file
s2 = socket.socket()
host2 = '10.1.21.202'
port2 = 9004
# decide folder in node on the basis of file extension
file_extension = os.path.splitext(header)[1]
file_extension = file_extension[1:] # removing the dot from the file extension
supported_mappings = ['mp3', 'txt', 'pdf']
if file_extension not in supported_mappings:
file_extension = 'others'
s2.connect((host2, port2))
print "Now connected to the node server"
data = s2.recv(1024)
print "Message from node:", repr(data)
# send file name to the node to fetch it
s2.send(file_extension + '/' + header + '\n')
# read the file that was sent by the node and write to file with the same file name
with open(header, 'wb') as f:
print header, 'opened for writing'
while True:
data = s2.recv(1024)
if not data:
break
f.write(data)
s2.close()
print "Now disconnected from the node server"
# at this point, I have the file on this computer
# send the file to the client
with open(header, 'rb') as f:
l = f.read(1024)
while (l):
c.send(l)
l = f.read(1024)
# remove the file from my system
os.remove(header)
c.close()
print '[-] Connection closed with', addr
|
[
"dhruv_agarwal@live.com"
] |
dhruv_agarwal@live.com
|
47a9c6d4f6944ae56920c638179095749d695929
|
36b167e8a162e3bb9c21503592adfadcba024290
|
/fuxi/lib/Rete/CommandLine.py
|
901c8988e1e97740a4e5d234f12ffbf10100c068
|
[] |
no_license
|
studioevoque/python-dlp
|
91d7b9f910ca4d741c953da3101a94806f0a2eff
|
4754cadbcb1fe38112bf01885f84744f46dcb178
|
refs/heads/master
| 2016-09-06T11:00:23.887749
| 2013-07-01T14:21:51
| 2013-07-01T14:21:51
| 33,859,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,622
|
py
|
#!/usr/bin/env python
"""USAGE: Fuxi [options] factFile1 factFile2 ...
Options:
--closure If present, the inferred triples are serialized
along with the original triples if asked for. Otherwise
(the default behavior), only the inferred triples
are serialized
--output=OUT Determines whether to serialize the inferred triples
to STDOUT using the specified RDF syntax ('xml','pretty-xml',
'nt','turtle',or 'n3') or to print a summary of the conflict set
(from the RETE network) if the value of this option is
'conflict'. If the DLP mechanism is invoked (via --dlp) then
a value of 'rif' will cause the generated ruleset to be rendered
in the RIF format. If the proof generation mechanism is
activated then a value of 'pml' will trigger a serialization
of the proof in PML.
--man-owl If present, either the closure (or just the inferred triples) are serialized
using an extension of the manchester OWL syntax
with indications for ontology normalization
(http://www.cs.man.ac.uk/~rector/papers/rector-modularisation-kcap-2003-distrib.pdf)
--class Used in combination with --man-owl and --extract to determine which specific class is serialized / extracted
--property Used in combination with --man-owl and --extract to determine which specific property is serialized / extracted
--extract The identified properties and classes will be extracted from the factfiles
--normalize Will attempt to determine if the ontology is 'normalized' [Rector, A. 2003]
--help
--input-format=<FORMAT> Determines the format of the RDF document(s) which
serve as the initial facts for the RETE network.
One of 'xml','n3','trix', 'nt', or 'rdfa'. The default
is 'xml'.
--pDSemantics Add pD semantics ruleset?
--optimize Suggest inefficiencies in the ruleset and exit
--stdin Parse STDIN as an RDF graph to contribute to the
initial facts for the RETE network using the
specified format
--ns=PREFIX=NSURI Register a namespace binding (QName prefix to a
base URI). This can be used more than once
--graphviz-out=<FILE> A filename to write a graphviz diagram of the RETE
network to
--rules=FILE1,FILE2,.. The Notation 3 documents to use as rulesets for the
RETE network
--ruleFacts Determines whether or not to attempt to parse
initial facts from the rule graph. Default by default
--complementExpand Perform a closed-world expansion of all use of owl:complementOf
--dlp This switch turns on Description Logic Programming
(DLP) inference. In this mode, the input document
is considered an OWL ontology mostly comprised of
Description Horn Logic (DHL) axioms. ontology. An
additional ruleset is included to capture those
semantics outside DHL but which can be expressed in
definite Datalog Logic Programming. The DHL-compiled
ruleset and the extensions are mapped into a RETE-UL
Network for evaluateion.
--proove A N3 string consisting of a single RDF assertion to proove
against the rules and facts provided. Depending on the
--output switch, the proof can be rendered as a Graphviz dot
graph, as a PML proof document, or in a human-readable printout
proof service:
goalGraph=Graph()
goalGraph.parse(StringIO(proove),format='n3')
print proove,len(goalGraph)
assert len(goalGraph),"Empty goal!"
goal=list(goalGraph)[0]
builder,proof=GenerateProof(network,goal)
if outMode == 'dot':
builder.renderProof(proof).write_graphviz('proof.dot')
elif outMode == 'pml':
proofGraph=Graph()
proofGraph.namespace_manager = namespace_manager
builder.serialize(proof,proofGraph)
print proofGraph.serialize(format='pretty-xml')
else:
for step in builder.trace:
print step
"""
from pprint import pprint
from sets import Set
from FuXi.Rete.Proof import GenerateProof
from FuXi.Rete import ReteNetwork
from FuXi.Rete.AlphaNode import SUBJECT,PREDICATE,OBJECT,VARIABLE
from FuXi.Rete.BetaNode import PartialInstanciation, LEFT_MEMORY, RIGHT_MEMORY
from FuXi.Rete.RuleStore import N3RuleStore, SetupRuleStore
from FuXi.Rete.Util import renderNetwork,generateTokenSet, xcombine
from FuXi.DLP import MapDLPtoNetwork, non_DHL_OWL_Semantics
from FuXi.Horn import ComplementExpansion
from FuXi.Horn.HornRules import HornFromN3, Ruleset
from FuXi.Syntax.InfixOWL import *
from rdflib.Namespace import Namespace
from rdflib import plugin,RDF,RDFS,URIRef,URIRef,Literal,Variable
from rdflib.store import Store
from cStringIO import StringIO
from rdflib.Graph import Graph,ReadOnlyGraphAggregate,ConjunctiveGraph
from rdflib.syntax.NamespaceManager import NamespaceManager
import unittest, time, warnings,sys
def main():
from optparse import OptionParser
op = OptionParser('usage: %prog [options] factFile1 factFile2 ... factFileN')
op.add_option('--closure',
action='store_true',
default=False,
help = 'Whether or not to serialize the inferred triples'+
' along with the original triples. Otherwise '+
'(the default behavior), serialize only the inferred triples')
op.add_option('--output',
default='n3',
metavar='RDF_FORMAT',
choices = ['xml',
'TriX',
'n3',
'nt',
'rif',
'rif-xml',
'conflict',
'man-owl'],
help = "Serialize the inferred triples and/or original RDF triples to STDOUT "+
"using the specified RDF syntax ('xml','pretty-xml','nt','turtle', "+
"or 'n3') or to print a summary of the conflict set (from the RETE "+
"network) if the value of this option is 'conflict'. If the the "+
" value is 'rif' or 'rif-xml', Then the rules used for inference "+
"will be serialized as RIF. Finally if the value is 'man-owl', then "+
"the RDF facts are assumed to be OWL/RDF and serialized via Manchester OWL "+
"syntax. The default is %default")
op.add_option('--class',
dest='classes',
action='append',
default=[],
metavar='QNAME',
help = 'Used with --output=man-owl to determine which '+
'classes within the entire OWL/RDF are targetted for serialization'+
'. Can be used more than once')
op.add_option('--property',
action='append',
dest='properties',
default=[],
metavar='QNAME',
help = 'Used with --output=man-owl or --extract to determine which '+
'properties are serialized / extracted. Can be used more than once')
op.add_option('--normalize',
action='store_true',
default=False,
help = "Used with --output=man-owl to attempt to determine if the ontology is 'normalized' [Rector, A. 2003]"+
"The default is %default")
op.add_option('--input-format',
default='xml',
dest='inputFormat',
metavar='RDF_FORMAT',
choices = ['xml', 'trix', 'n3', 'nt', 'rdfa'],
help = "The format of the RDF document(s) which serve as the initial facts "+
" for the RETE network. One of 'xml','n3','trix', 'nt', "+
"or 'rdfa'. The default is %default")
op.add_option('--pDSemantics',
action='store_true',
default=False,
help = 'Used with --dlp to add pD semantics ruleset for semantics not covered '+
'by DLP but can be expressed in definite Datalog Logic Programming'+
' The default is %default')
op.add_option('--stdin',
action='store_true',
default=False,
help = 'Parse STDIN as an RDF graph to contribute to the initial facts. The default is %default ')
op.add_option('--ns',
action='append',
default=[],
metavar="PREFIX=URI",
help = 'Register a namespace binding (QName prefix to a base URI). This '+
'can be used more than once')
op.add_option('--rules',
default=[],
action='append',
default=[],
metavar='PATH_OR_URI',
help = 'The Notation 3 documents to use as rulesets for the RETE network'+
'. Can be specified more than once')
op.add_option('--ruleFacts',
action='store_true',
default=False,
help = "Determines whether or not to attempt to parse initial facts from "+
"the rule graph. The default is %default")
op.add_option('--dlp',
action='store_true',
default=False,
help = 'Use Description Logic Programming (DLP) to extract rules from OWL/RDF. The default is %default')
(options, facts) = op.parse_args()
nsBinds = {'iw':'http://inferenceweb.stanford.edu/2004/07/iw.owl#'}
for nsBind in options.ns:
pref,nsUri = nsBind.split('=')
nsBinds[pref]=nsUri
namespace_manager = NamespaceManager(Graph())
factGraph = Graph()
ruleSet = Ruleset()
for fileN in options.rules:
if options.ruleFacts:
factGraph.parse(fileN,format='n3')
print >>sys.stderr,"Parsing RDF facts from ", fileN
rs = HornFromN3(fileN)
nsBinds.update(rs.nsMapping)
ruleSet.formulae.extend(rs)
#ruleGraph.parse(fileN,format='n3')
ruleSet.nsMapping = nsBinds
for prefix,uri in nsBinds.items():
namespace_manager.bind(prefix, uri, override=False)
closureDeltaGraph = Graph()
closureDeltaGraph.namespace_manager = namespace_manager
factGraph.namespace_manager = namespace_manager
for fileN in facts:
factGraph.parse(fileN,format=options.inputFormat)
if options.stdin:
factGraph.parse(sys.stdin,format=options.inputFormat)
workingMemory = generateTokenSet(factGraph)
rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
network.inferredFacts = closureDeltaGraph
network.nsMap = nsBinds
if options.dlp:
dlp=setupDescriptionLogicProgramming(factGraph,
addPDSemantics=options.pDSemantics,
constructNetwork=False)
ruleSet.formulae.extend(dlp)
if options.output == 'rif':
for rule in ruleSet:
print rule
elif options.output == 'man-owl':
cGraph = network.closureGraph(factGraph,readOnly=False)
cGraph.namespace_manager = namespace_manager
Individual.factoryGraph = cGraph
if options.classes:
mapping = dict(namespace_manager.namespaces())
for c in options.classes:
pref,uri=c.split(':')
print Class(URIRef(mapping[pref]+uri)).__repr__(True)
elif options.properties:
mapping = dict(namespace_manager.namespaces())
for p in options.properties:
pref,uri=p.split(':')
print Property(URIRef(mapping[pref]+uri))
else:
for p in AllProperties(cGraph):
print p.identifier
print repr(p)
for c in AllClasses(cGraph):
if options.normalize:
if c.isPrimitive():
primAnc = [sc for sc in c.subClassOf if sc.isPrimitive()]
if len(primAnc)>1:
warnings.warn("Branches of primitive skeleton taxonomy"+
" should form trees: %s has %s primitive parents: %s"%(
c.qname,len(primAnc),primAnc),UserWarning,1)
children = [desc for desc in c.subSumpteeIds()]
for child in children:
for otherChild in [o for o in children if o is not child]:
if not otherChild in [c.identifier
for c in Class(child).disjointWith]:# and\
warnings.warn("Primitive children (of %s) "+
"must be mutually disjoint: %s and %s"%(
c.qname,
Class(child).qname,
Class(otherChild).qname),UserWarning,1)
if not isinstance(c.identifier,BNode):
print c.__repr__(True)
for rule in ruleSet:
network.buildNetworkFromClause(rule)
start = time.time()
network.feedFactsToAdd(workingMemory)
sTime = time.time() - start
if sTime > 1:
sTimeStr = "%s seconds"%sTime
else:
sTime = sTime * 1000
sTimeStr = "%s milli seconds"%sTime
print >>sys.stderr,"Time to calculate closure on working memory: ",sTimeStr
print >>sys.stderr, network
if options.output == 'conflict':
network.reportConflictSet()
elif options.output not in ['rif','rif-xml','man-owl']:
if options.closure:
cGraph = network.closureGraph(factGraph)
cGraph.namespace_manager = namespace_manager
print cGraph.serialize(destination=None,
format=options.output,
base=None)
else:
print network.inferredFacts.serialize(destination=None,
format=options.output,
base=None)
|
[
"chimezie@d0ad5f6e-b329-0410-b51c-492c9c4f233d"
] |
chimezie@d0ad5f6e-b329-0410-b51c-492c9c4f233d
|
f5d815fb41987350c9c77f6935910b1ecb795776
|
1872ae3b555e905d215431ff5b391647d8628de5
|
/chapter-2-state-driven-agent-design/west_world_with_a_woman/wife.py
|
543c40feb6433f7b94985a89da639107e61dad94
|
[] |
no_license
|
malmyros/programming-game-ai-by-example
|
90d76e2bc18ced8a2080efe71ec4ee1f90d1cbf6
|
9a1cd5689e062250e3833e55aa4680b9e03c0865
|
refs/heads/master
| 2020-05-14T20:46:05.443566
| 2019-04-18T12:53:00
| 2019-04-18T12:53:00
| 181,950,433
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
from base_entity import BaseEntity
class Wife(BaseEntity):
def __init__(self, current_state=None, location=None):
super().__init__()
self.current_state = current_state
self.previous_state = None
self.location = location
self.is_cooking = False
self.stress_level = 0
self.STRESS_THRESHOLD = 3
def update(self):
self.stress_level += 1
if self.current_state:
self.current_state.execute(self)
def change_state(self, new_state):
if not self.current_state and not new_state:
return
self.current_state.exit(self)
self.current_state = new_state
self.current_state.enter(self)
def revert_to_previous_state(self):
self.current_state = self.previous_state
def set_is_cooking(self, is_cooking):
self.is_cooking = is_cooking
def is_stressed(self):
return self.stress_level >= self.STRESS_THRESHOLD
|
[
"michail.almyros@holidayextras.com"
] |
michail.almyros@holidayextras.com
|
d1a37f55af2498bbddef30e64ab5cf173cdc0d1e
|
7f2612e5132e1583e5ba9758f299a8f301f0dc70
|
/FB/257-binary-tree-paths.py
|
eada330e2dcde3095e5ceb1523f68ee52d0cba47
|
[] |
no_license
|
taeheechoi/coding-practice
|
380e263a26ed4de9e542c51e3baa54315127ae4f
|
9528b5e85b0ea2960c994ffea62b5be86481dc38
|
refs/heads/main
| 2022-07-09T11:22:18.619712
| 2022-06-28T14:55:51
| 2022-06-28T14:55:51
| 447,082,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# https://leetcode.com/problems/binary-tree-paths/
# Input: root = [1,2,3,null,5]
# Output: ["1->2->5","1->3"]
# Example 2:
# Input: root = [1]
# Output: ["1"]
class Solution:
def binaryTreePath(self, root):
elements = []
def dfs(node, s):
if not node: return
if node.left is None and node.right is None:
elements.append(s + str(root.val))
s += str(node.val) + '->'
dfs(root.left, s)
dfs(root.right, s)
dfs(root, '')
return elements
|
[
"dadac76@hotmail.com"
] |
dadac76@hotmail.com
|
0a948854e027da6e1d313f2c60f11f0684e5b0f2
|
e7e497b20442a4220296dea1550091a457df5a38
|
/main_project/AdHot/monitorsystem/monitorsystem/controllers/zoom_graph.py
|
4e70d23eb63430c0430abba719d0b4142562c92e
|
[] |
no_license
|
gunner14/old_rr_code
|
cf17a2dedf8dfcdcf441d49139adaadc770c0eea
|
bb047dc88fa7243ded61d840af0f8bad22d68dee
|
refs/heads/master
| 2021-01-17T18:23:28.154228
| 2013-12-02T23:45:33
| 2013-12-02T23:45:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,518
|
py
|
import logging
import rrdtool
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from monitorsystem.lib.base import BaseController, render
from monitorsystem.lib.app_globals import Globals as g
from monitorsystem.lib.get_chart_info import GetAllCatagory
log = logging.getLogger(__name__)
class ZoomGraphController(BaseController):
def index(self):
# Return a rendered template
#return render('/zoom_graph.mako')
# or, return a string
return 'Hello World'
def zoomin(self, dbname, datasource, resolution, title, points, limit, description):
limit = int(limit)
img_path = "/data/xce/pylons/monitorsystem/monitorsystem/public/img/" + str(dbname) + "_big.png"
rrd_path = "/data/xce/monitor/data/" + str(dbname) + ".rrd";
title = str(title);
font = "TITLE:10:/data/xce/monitor/fonts/simhei.ttf"
passed_time = 60 * int(points);
start = "now-" + str(passed_time)
datasource = str(datasource)
resolution = str(resolution)
rra1_points = 1200;
ds_def_1 = "DEF:value1=" + rrd_path + ":" + datasource + ":AVERAGE"
ds_def_2 = "DEF:value2=" + rrd_path + ":" + datasource + ":MAX"
if(limit > 0):
c_def_1 = "CDEF:value3=value1," + str(limit) + ",GT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(limit) + ",GT,value2,UNKN,IF"
elif(limit < 0):
c_def_1 = "CDEF:value3=value1," + str(-limit) + ",LT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(-limit) + ",LT,value2,UNKN,IF"
graph_def_1 = "AREA:value1#00FF00:average"
graph_def_2 = "LINE1:value2#0000FF:max"
graph_def_3 = "AREA:value3#FF0000:warning "
graph_def_4 = "LINE1:value4#FF0000"
width = "500"
height = "400"
comments = "COMMENT:Average--------------MAX--------------MIN-------------- "
g_print_1 = "GPRINT:value1:AVERAGE:%1.2lf"
g_print_2 = "GPRINT:value1:MAX:%18.2lf"
g_print_3 = "GPRINT:value1:MIN:%15.2lf"
if(limit == 0):
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, graph_def_1, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, graph_def_1, graph_def_2, comments, g_print_1, g_print_2, g_print_3)
else:
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, c_def_1, graph_def_1, graph_def_3, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, c_def_1, c_def_2, graph_def_1, graph_def_2, graph_def_3, graph_def_4)
c.img_path = "img/" + str(dbname) + "_big.png"
c.description = description
c.catalist = GetAllCatagory();
return render('/zoom.mako')
# return "Viewing " + str(dbname) + " " + str(resolution) + " " + str(points) + " " + str(limit)
|
[
"liyong19861014@gmail.com"
] |
liyong19861014@gmail.com
|
6509a48313412b962480842b1fb0d090d2edd745
|
3fc3ba3f30d60bcbc0baed660327d575adf6932f
|
/ex91.py
|
53a4f3d6f54b43a2102bcfcb3dd4c3b05ec84ff1
|
[] |
no_license
|
User9000/100python
|
44ef0b885f0a3c411fa55a959aea8c5ae75f11f3
|
1c757c3c76d67eea81fb928519eb7349db9fa175
|
refs/heads/master
| 2021-01-11T20:47:31.545133
| 2017-07-01T15:48:16
| 2017-07-01T15:48:16
| 79,184,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
import sqlite3
import pandas
conn = sqlite3.connect("database.db")
cur= conn.cursor()
data = pandas.read_csv('ten-more-countries.txt')
for index, row in data.iterrows():
cur.execute("INSERT INTO countries VALUES (NULL,?,?,NULL)", (row["Country"], row["Area"]))
conn.commit()
conn.close()
|
[
"Carlo@Carlos-MacBook-Pro.local"
] |
Carlo@Carlos-MacBook-Pro.local
|
c5284396fc5399b0f66bc16ba674ff9cce0fcd00
|
384d58e0226ac0b25b132b9a226032bd2366aba8
|
/lib/python3.6/hmac.py
|
4ef9d5da89b523a0d728eb1bd5831f847ad98f17
|
[
"MIT"
] |
permissive
|
scottyadean/picbiz
|
482ed9f677301129302175edd676602cd5208958
|
e14bb7456936ee43d8124279456f059affd0fa16
|
refs/heads/master
| 2020-03-17T23:16:57.848971
| 2018-06-26T17:00:48
| 2018-06-26T17:00:48
| 134,038,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
/home/scotty/.pyenv/versions/3.6.1/lib/python3.6/hmac.py
|
[
"scott.dean@marketingevolution.com"
] |
scott.dean@marketingevolution.com
|
ebe6ba66f1743f17d66488c547d62eb1dd646dc3
|
a972c5de4144940d1c5903bb5636df4bcaf4b283
|
/ejerciciokenwin/__init__.py
|
bcbe09dd494756b2f4afdb3392ceb03bc3b19d99
|
[] |
no_license
|
apocalipsys/ejerciciopyramid-2020
|
5dafe2926bb78338eb1eca17d2be8f6ef2eba8fa
|
2411601f4e2e0dd9aa49951251f9acfe73d43777
|
refs/heads/master
| 2020-12-21T00:51:38.700245
| 2020-02-07T07:24:51
| 2020-02-07T07:24:51
| 236,258,661
| 3
| 1
| null | 2020-02-05T06:29:14
| 2020-01-26T02:42:08
|
Python
|
UTF-8
|
Python
| false
| false
| 802
|
py
|
#This is a config file, necesary to include the views, moludes, models and so on
#Este archivo de configuracion sirve para incluir las vistas, modelo de base de datos, modulos etc.
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
import os
static_dir = os.path.abspath(os.path.dirname(__file__))
def main(global_config, **settings):
my_session_factory = SignedCookieSessionFactory(
'itsaseekreet')
with Configurator(settings=settings,session_factory=my_session_factory) as config:
config.include('.models')
config.include('pyramid_jinja2')
#config.add_jinja2_renderer('.html')
config.include('.security')
config.include('.routes')
config.scan('.views')
return config.make_wsgi_app()
|
[
"martinvargas82@gmail.com"
] |
martinvargas82@gmail.com
|
90afbb7dcfc470cf63d3d7377d986165be786cbe
|
7eeeb635c08475c68f137fcb44db98177ce1ff4c
|
/drfx/users/serializers.py
|
772e049950979dc085ffb3595c0aa440353ca48b
|
[] |
no_license
|
Mr-YYM/Python_web_learning
|
f2017ce795e598017c85f24e97c17220930fb177
|
62095f36c35858a5be4b15f454de555bc441a9fb
|
refs/heads/master
| 2022-12-11T09:58:46.833562
| 2018-11-03T05:10:07
| 2018-11-03T05:10:07
| 152,242,456
| 0
| 0
| null | 2022-12-08T01:18:29
| 2018-10-09T11:52:10
|
Python
|
UTF-8
|
Python
| false
| false
| 202
|
py
|
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = models.CustomUser
fields = ('email', 'username',)
|
[
"492851378@qq.com"
] |
492851378@qq.com
|
4331f303b88abc1007c44aedec54876888a6b860
|
1a74a9ec3e51e7776e5c15e98c66b4cb5a9f8144
|
/source/webapp/views/base_views.py
|
3bf9e6278427c473397fb2e32f09ab53e41e9079
|
[] |
no_license
|
Aitmatow/issue_tracker
|
d66e47a7f633a455e28a1921c5220c60a4c5907f
|
96f482be1251d9c557446bc0bfa0e949cc3129d9
|
refs/heads/master
| 2022-11-26T19:59:12.929073
| 2019-12-09T12:52:13
| 2019-12-09T12:52:13
| 211,033,057
| 0
| 0
| null | 2022-11-22T04:47:23
| 2019-09-26T07:57:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,098
|
py
|
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.http import urlencode
from django.views import View
from django.views.generic import TemplateView, ListView
class DetailView(TemplateView):
context_key = 'objects'
model = None
def get_context_data(self, **kwargs):
pk = kwargs.get('pk')
context = super().get_context_data(**kwargs)
context[self.context_key] = get_object_or_404(self.model, pk=pk)
return context
def get_objects(self):
return self.model.objects.all()
class UpdateView(View):
form_class = None
template_name = None
redirect_url = ''
model = None
key_kwarg = 'pk'
context_key = 'object'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.form_class(initial=self.get_form_initial())
context = self.make_context(form)
return render(request, self.template_name, context=context)
def get_form_initial(self):
model_fields = [field.name for field in self.model._meta.fields]
initial = {}
for field in model_fields:
initial[field] = getattr(self.object, field)
print(initial)
return initial
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
self.object = self.get_object()
for field, value in form.cleaned_data.items():
setattr(self.object, field, value)
self.object.save()
return redirect(self.get_redirect_url())
def form_invalid(self, form):
context = self.make_context(form)
return render(self.request, self.template_name, context=context)
def get_object(self):
pk = self.kwargs.get(self.key_kwarg)
return get_object_or_404(self.model, pk=pk)
def make_context(self, form):
return {
'form': form,
self.context_key: self.object
}
def get_redirect_url(self):
return self.redirect_url
class DeleteView(View):
template_name = None
model = None
redirect_url = None
confirmation_for_delete = None
def get(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk=kwargs.get('pk'))
if self.confirmation_for_delete == True:
context = {'object': object}
return render(self.request, self.template_name, context)
else:
object.delete()
return redirect(self.get_redirect_url())
def post(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk = kwargs.get('pk'))
object.delete()
return redirect(self.get_redirect_url())
def get_redirect_url(self):
return self.redirect_url
class SearchView(ListView):
template_name = None
model = None
paginate_by = 10
paginate_orphans = 1
page_kwarg = 'page'
form = None
def get(self, request, *args, **kwargs):
self.form = self.get_search_form()
self.search_value = self.get_search_value()
return super().get(request, *args, **kwargs)
def get_search_form(self):
return self.form(data=self.request.GET)
def get_search_value(self):
if self.form.is_valid():
return self.form.cleaned_data['search']
return None
def get_queryset(self):
queryset = super().get_queryset()
if self.search_value:
queryset = queryset.filter(
self.get_query()
)
return queryset
def get_query(self):
pass
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['form'] = self.form
if self.search_value:
context['query'] = urlencode({'search' : self.search_value})
return context
|
[
"aitmarowd@gmail.com"
] |
aitmarowd@gmail.com
|
104efcdb045eb9a95054153f994edc345d127dac
|
3a6633d1af0c6cbd30bcec40c0fd2dac84468880
|
/misc.py
|
e565342b8f0068e2ce352a82ce9f6d75c392b58d
|
[] |
no_license
|
SiyuLiu0329/cyclegan-example
|
0813179ae81ce8886bddc4ebed1b3920d53cfd56
|
bb89e908dded832b914685864ae0d5ad886040ea
|
refs/heads/main
| 2022-12-21T07:01:46.536591
| 2020-10-10T11:55:36
| 2020-10-10T11:55:36
| 302,883,682
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
import tensorflow as tf
import os
import sys
import random
import numpy as np
import cv2
from PIL import Image
def load_and_resize_img(img, size):
h, w = size
img = Image.open(img)
img = img.convert('RGB')
img = np.array(img)
x, y, _ = img.shape
if h is not None and w is not None:
img = cv2.resize(img, (w, h))
return img
class CelebaAttrDict:
def __init__(self, anno_path):
with open(anno_path, "r") as fd:
content = fd.readlines()
self.attribute_names = self._process_line(content[1])
self.d = self._process(content[2:])
self.domains = None
def _process_line(self, l):
return list(filter(lambda x: x not in [''], l.rstrip('\n').split(' ')))
def _process(self, content):
d = {}
for l in content:
l = self._process_line(l)
file_id, attribute_values = l[0].split('.')[0], l[1:]
attr_dict = {}
for i in range(len(self.attribute_names)):
name, value = self.attribute_names[i], int(attribute_values[i]) == 1
attr_dict[name] = value
d[file_id] = attr_dict
return d
def split_by_domain(self, domains):
self.domains = [{} for _ in domains]
for k, v in self.d.items():
for i in range(len(domains)):
domain_attr = domains[i]
belong = domain_attr(v)
if belong:
self.domains[i][k] = v
# found an domain to which the img belongs
break
def find(self, file_id):
return self.d[file_id]
def sample_by_domain(self, batch_size):
sampled = []
for d in self.domains:
sampled_keys = random.sample(d.keys(), batch_size)
sampled.append(sampled_keys)
return sampled
def save_batch(batch, prefix):
for i in range(batch.shape[0]):
img = batch[i]
img = cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
pil_img = Image.fromarray(img.astype('uint8'))
pil_img.save(os.path.join('out', "%s_%d.jpg" % (prefix, i)))
def sample_celeba_with_attributes(data_dir, anno_dir, batch_size, size, attribute_domains):
"""
Returns:
a list of np.ndarray. [img_batch_domain1, img_batch_domain2, ...]
"""
attr_dict = CelebaAttrDict(anno_dir)
attr_dict.split_by_domain(attribute_domains)
def process(idx, size):
filename = os.path.join(data_dir, idx + '.jpg')
img = load_and_resize_img(filename, size)
if random.random() > 0.5: img = cv2.flip(img, 1)
return img
while True:
img_indices = attr_dict.sample_by_domain(batch_size)
imgs = [np.array([process(i, size) for i in d]) for d in img_indices]
yield imgs
def celeba_img_gen(batch_size):
return sample_celeba_with_attributes(
os.path.join('data', 'img_align_celeba_png'), # download the celeba dataset and specify the data dir here
os.path.join('data', 'img_align_celeba_anno', 'list_attr_celeba.txt'), # download the celeba dataset and specify the annotation dir here
batch_size,
(192, 160), # resize the image into a target shape
[
lambda attr: attr['Male'], # lambda function to filter domain: male
lambda attr: not attr['Male'] # lambda function to filter domain: female
]
)
import matplotlib.pyplot as plt
def viz_batch(batch):
n_imgs = batch.shape[0]
plt.figure(figsize=(20, 40))
for i in range(n_imgs):
plt.subplot(1, n_imgs, i+1)
plt.imshow(batch[i])
"""
NOTE: you may want to try
1. the usual logistic losses
2. WGAN loss and WGAN-GP loss
3. non-saturating logistic losses
"""
def gen_loss(tensor):
# LSGAN Loss
return tf.reduce_mean(tf.losses.mse(tf.zeros(shape=(4, 1)), tensor))
def recon_loss(a, b):
# reconstruction loss
# pixel-wise l2 loss
return tf.reduce_mean(tf.losses.mse(a, b))
def dis_loss(r, f):
# LSGAN Loss
return tf.reduce_mean(
tf.losses.mse(tf.zeros(shape=(4, 1)), r) + tf.losses.mse(tf.ones(shape=(4, 1)), f))
|
[
"s4392957@eait.uq.edu.au"
] |
s4392957@eait.uq.edu.au
|
6074cbe705474f5e7bd46b7bf9442b9d8c0a6685
|
d072c940209bea63b5f1b4315d4ecfe7cd9b042b
|
/test/test_matrix.py
|
e9a111401df4eca154e828acdaf4e6d6f21b70c6
|
[] |
no_license
|
majegit/workflowtest
|
267174293070635e23fe959da7adfef3f4a1a360
|
99b5a13cf3677dfb81db1cffda350b44dd84db5f
|
refs/heads/main
| 2023-07-19T22:35:58.407508
| 2021-08-05T00:03:53
| 2021-08-05T00:03:53
| 392,796,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import numpy as np
import matrix
def test_matrixMultiplication():
a = np.array([1,2])
b = np.array([1,2])
assert matrix.matrixMultiplication(a,b) == [5]
|
[
"68793210+majegit@users.noreply.github.com"
] |
68793210+majegit@users.noreply.github.com
|
c56cc8021807db2c955139751d6445c4ed332629
|
0517048969512fc73aa7401c7d2a63d3fb005971
|
/main.py
|
a548d57010c2e50d6e3117aa45053dfd06aeae37
|
[] |
no_license
|
krtvand/tornado_proxy
|
77357469cd9b6042d13ff07090a816c83454b6b7
|
28f407ddf80aa8e8648cdcefac4476ecc6eb166d
|
refs/heads/master
| 2020-05-25T20:46:43.671020
| 2017-03-15T12:03:31
| 2017-03-15T12:03:31
| 84,966,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,876
|
py
|
import json
import logging
import os
import sys
import socket
from urllib.parse import urlparse, urlunparse
import tornado.httpserver
import tornado.ioloop
import tornado.iostream
import tornado.web
import tornado.httpclient
import tornado.httputil
MAP = {
'127.0.0.1:77': {x for x in range(7700, 7799)},
'127.0.0.1:78': {x for x in range(7800, 7899)}
}
DEFAULT_DESTINATION = '127.0.0.1:77'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
__all__ = ['ProxyHandler', 'run_proxy']
class ProxyException(Exception):
pass
def fetch_request(client_request, callback, **kwargs):
client = tornado.httpclient.AsyncHTTPClient()
client.fetch(client_request, callback, raise_error=False)
class ProxyHandler(tornado.web.RequestHandler):
SUPPORTED_METHODS = ['GET', 'POST']
def compute_etag(self):
return None # disable tornado Etag
def get_field_from_json_body(self, body, field_name):
body_obj = json.loads(body.decode('utf-8'))
if field_name in body_obj:
return body_obj[field_name]
else:
raise ProxyException(
'{} field not found in requests json body'.format(field_name))
def find_dest_host_by_term_id(self, terminal_id):
for dest in MAP:
if terminal_id in MAP[dest]:
destination = dest
break
else:
destination = DEFAULT_DESTINATION
return destination
def get_dest_host(self, server_request):
if 'content-type' in server_request.headers:
if server_request.headers['content-type'] == 'application/json':
terminal_id = self.get_field_from_json_body(server_request.body, 'terminal_id')
return self.find_dest_host_by_term_id(terminal_id)
else:
raise ProxyException('Content-type should be application/json')
else:
raise ProxyException('Content-type header not found')
def make_client_request(self, server_request):
kwargs = {}
dest_host = self.get_dest_host(server_request)
url = urlunparse(
[server_request.protocol, dest_host,
server_request.path, server_request.query,
None, None]
)
kwargs['method'] = server_request.method
kwargs['headers'] = server_request.headers
if server_request.body:
kwargs['body'] = server_request.body
else:
kwargs['body'] = None
client_req = tornado.httpclient.HTTPRequest(url, **kwargs)
return client_req
@tornado.web.asynchronous
def get(self):
logger.debug('Handle %s request to %s', self.request.method,
self.request.uri)
def handle_response(response):
if (response.error and not
isinstance(response.error, tornado.httpclient.HTTPError)):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = tornado.httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header not in ('Content-Length', 'Transfer-Encoding', 'Content-Encoding', 'Connection'):
self.add_header(header, v) # some header appear multiple times, eg 'Set-Cookie'
if response.body:
self.set_header('Content-Length', len(response.body))
self.write(response.body)
self.finish()
try:
client_request = self.make_client_request(self.request)
fetch_request(client_request, handle_response)
except (tornado.httpclient.HTTPError, ProxyException) as e:
if hasattr(e, 'response') and e.response:
handle_response(e.response)
else:
self.set_status(500)
self.write('Internal server error:\n' + str(e))
self.finish()
@tornado.web.asynchronous
def post(self):
return self.get()
def run_proxy(port, start_ioloop=True):
"""
Run proxy on the specified port. If start_ioloop is True (default),
the tornado IOLoop will be started immediately.
"""
app = tornado.web.Application([
(r'.*', ProxyHandler),
])
app.listen(port)
ioloop = tornado.ioloop.IOLoop.instance()
if start_ioloop:
ioloop.start()
if __name__ == '__main__':
port = 88
print("Starting HTTP proxy on port %d" % port)
run_proxy(port)
|
[
"krtvand@gmail.com"
] |
krtvand@gmail.com
|
843d2dad5d3ab35696c22610183d4689025cc5fb
|
8b85d71b17ac8c5f71565d95011d8a50e9609f5a
|
/cashier/migrations/0004_auto_20180315_1941.py
|
ae964420f0c1f013ea3d9128ed6fa6dd5f9ac7d6
|
[] |
no_license
|
irsyadf27/Point-of-Sale
|
8a6d8d16a7a3e91751829662b35febf33ee433d0
|
a077092f3debb089a05be26b7572db5819f058fc
|
refs/heads/master
| 2021-09-10T15:17:45.797065
| 2018-03-28T11:01:17
| 2018-03-28T11:01:17
| 111,640,735
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-15 12:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('discount', '0001_initial'),
('cashier', '0003_auto_20180315_1938'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='discount',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoice', to='discount.Discount'),
),
migrations.AddField(
model_name='invoice',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='invoice', to=settings.AUTH_USER_MODEL),
),
]
|
[
"irsyad.fauzan777@gmail.com"
] |
irsyad.fauzan777@gmail.com
|
0a9b48c28c74f9a89bad246e7ab0bf551cd00e0a
|
379634162f6bcf9f9eb43b43c985896462a158f9
|
/utils/dnn_app_utils.py
|
191aab664956a77b4ac456b7f95585ec04c6db28
|
[] |
no_license
|
jeffreyrnorton/Notebooks_MachineLearning
|
34b28718949566f55320b6810b186c61c7338c1d
|
2308a9b243ec4664f32c6154b5e8fb930acfa0ba
|
refs/heads/master
| 2020-03-09T02:14:37.693110
| 2018-11-15T07:03:46
| 2018-11-15T07:03:46
| 128,535,323
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,785
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (there are (L-1) or them, indexes from 0 to L-2)
the cache of linear_activation_forward() with "sigmoid" (there is one, index L-1)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1, m),dtype=int)
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: %s" % str(np.sum(p == y)/float(m)))
return p
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
|
[
"jrnorton0@gmail.com"
] |
jrnorton0@gmail.com
|
9c3d015fca656b111401ededbdedbbbc63be072f
|
10de91a2ee64080d9e5b45b743d5d58b106139ac
|
/visualize.py
|
801b8f068052413186891de70648b8c18dd5d72e
|
[] |
no_license
|
Roi262/COVID-19
|
2950998c643928b176c1e628c0ed1030561f58e6
|
c1b8e72e1e0165e8a17333281112eede8181e656
|
refs/heads/master
| 2021-05-25T22:01:20.078936
| 2020-04-08T00:41:46
| 2020-04-08T00:41:46
| 253,933,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
from config import *
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
path = NIR_ETZION_PATH
def plot_counter(value):
with open(path, newline='') as f:
df = pd.read_csv(f)
values = df[value]
distincts = values.value_counts()
sizes = list(distincts)
labels = list(distincts.index)
# for i in range(len(sizes)):
# if sizes[i] < .05 * sum(sizes):
labels = [label[::-1] for label in labels]
labels.reverse()
print(distincts)
# colors = ['orange', 'blue']
# explode = (0.1, 0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, labels=labels,
autopct='%1.1f%%', startangle=140)
plt.axis('equal')
plt.show()
def plot_age():
with open(path, newline='') as f:
df = pd.read_csv(f)
ages = list(df['גיל'].dropna())
sorted_ages = sorted(ages)
# thresholds = [0]
thresholds = [10*i for i in range(10)]
i = 0
ages_counters = []
labels = []
for thresh in thresholds:
counter = 0
while i < len(sorted_ages) and (sorted_ages[i] < thresh + 9):
counter += 1
i += 1
ages_counters.append(counter)
labels.append(str(thresh) + '-' + str(thresh + 9))
# Plot
x = np.arange(10)
plt.bar(x, height=ages_counters)
plt.xticks(x, labels)
# plt.legend(loc="upper left")
plt.ylabel('Number of Guests')
plt.xlabel('Age Groups')
plt.title("Guest Ages")
for i in range(len(ages_counters)):
print('Age group {}: Count {}'.format(labels[i], ages_counters[i]))
plt.show()
print(sum(ages_counters))
def plot_sex():
with open(path, newline='') as f:
df = pd.read_csv(f)
sex = df['מין']
male_count = list(sex).count('ז')
female_count = list(sex).count('נ')
labels = 'Male', 'Female'
sizes = [male_count, female_count]
# explode = (0.1, 0, 0, 0) # explode 1st slice
plt.pie(sizes, labels=labels,
autopct='%1.1f%%', startangle=140)
plt.axis('equal')
plt.show()
plot_sex()
# plot_age()
# plot_counter('עיר מגורים')
# plot_counter('קופת חולים')
|
[
"roi262@gmail.com"
] |
roi262@gmail.com
|
05316d88a35289d491a107f6328cede2a1c6eb9f
|
4eaa1b9b08914e0a2cc9276363e489ccef19d3a2
|
/ch9/electric_car.py
|
a3903742781c7a56beb7c524f66ba35a4bb8f545
|
[] |
no_license
|
melihcanyardi/Python-Crash-Course-2e-Part-I
|
69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3
|
0c9b250f512985c04b2c0397f3afaa8bf3a57f17
|
refs/heads/main
| 2023-03-12T21:43:14.012537
| 2021-03-03T19:23:41
| 2021-03-03T19:23:41
| 344,236,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
from car import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=75):
"""Initialize the battery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print(f"This car has a {self.battery_size}-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 75:
range = 260
elif self.battery_size == 100:
range = 315
print(f"This car can go about {range} miles on a full charge.")
class ElectricCar(Car):
"""Represent aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
|
[
"melihcanyardi@hotmail.com"
] |
melihcanyardi@hotmail.com
|
67dcb99c50642a8467d4d28635d4b33f9f713578
|
c49a80ed4c69ecbe56c26d0fda04bf0e81f62b98
|
/8_Puzzle_solver_branching_factor.py
|
169b9454007b89a3ccc079c83aad0c621b12983a
|
[] |
no_license
|
aayushsoni10/8_Puzzle_AI
|
1d7e2950233ec8572ccede62388560fa2cf8062d
|
363d11968c54f5ae64e0fbaaae42f9244a8e94d9
|
refs/heads/master
| 2020-08-03T00:24:18.298639
| 2019-10-18T20:52:02
| 2019-10-18T20:52:02
| 211,562,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,656
|
py
|
import sys
import heapq
from random import randint
# This class is used to Create the n X n puzzle
class EightPuzzle:
# Initiating the object
def __init__(self,size):
self.size=size
self.puzzle=[]
self.zero=(0,0)
self.moves=["U","D","L","R"]
count=1
for i in range(0,size):
self.puzzle.append([])
for j in range(0,size):
self.puzzle[i].append(count)
count+=1
self.puzzle[size-1][size-1]=0
self.zero=(size-1,size-1)
#Converting our puzzle in string
def readPuzzle(self,string):
a=string.split(" ")
count=0
for i in range(0,self.size):
for j in range(0,self.size):
if int(a[count])==0:
self.zero=(i,j)
self.puzzle[i][j]=int(a[count])
count+=1
#Checking if the puzzle is correct
def checkPuzzle(self):
count=1
for i in range(0,self.size):
for j in range(0,self.size):
if self.puzzle[i][j]!=(count%(self.size*self.size)):
return False
count+=1
return True
#Swapping 2 elemets in the matrix
def swap(self,(x1,y1),(x2,y2)):
temp=self.puzzle[x1][y1]
self.puzzle[x1][y1]=self.puzzle[x2][y2]
self.puzzle[x2][y2]=temp
#move functions
def up(self):
if (self.zero[0]!=0):
self.swap((self.zero[0]-1,self.zero[1]),self.zero)
self.zero=(self.zero[0]-1,self.zero[1])
def down(self):
if (self.zero[0]!=self.size-1):
self.swap((self.zero[0]+1,self.zero[1]),self.zero)
self.zero=(self.zero[0]+1,self.zero[1])
def left(self):
if (self.zero[1]!=0):
self.swap((self.zero[0],self.zero[1]-1),self.zero)
self.zero=(self.zero[0],self.zero[1]-1)
def right(self):
if (self.zero[1]!=self.size-1):
self.swap((self.zero[0],self.zero[1]+1),self.zero)
self.zero=(self.zero[0],self.zero[1]+1)
#Printing the puzzle in matrix form
def printPuzzle(self):
for i in range(0,self.size):
for j in range(0,self.size):
print self.puzzle[i][j],
print ""
#print
#Performing Move on the matrix
def doMove(self,move):
if move=="U":
self.up()
if move=="D":
self.down()
if move=="L":
self.left()
if move=="R":
self.right()
def permute(self,numPerm):
for i in range(0,numPerm):
self.doMove(self.moves[randint(0,3)])
def parseMoveSequence(self,string):
for m in string:
self.doMove(m)
#Taking inputs from command line for the structure of the puzzle
#In our case the size is 3 X 3 ~= 8 PUZZLE
t1=EightPuzzle(3)
t1.permute(3)
#Printing the initial form of Puzzle
print("Initial puzzle")
t1.printPuzzle()
# Using heap for level ordered bfs for the puzzle with initial puzzle layer = 0
leaves = []
heapq.heappush(leaves, (0, t1.puzzle))
l = 0
# Variable TBF to store the total branching factor
TBF = 0
# Variable NS to store the total number of states
NS = 0
# I have checked the branching factor till level 13 only due to bad processor
# If we increase the level then we might get a more accurate result ~ 2.66
a = [0,0,0,0,0,0,0,0,0,0,0,0,0]
ind = 0
values = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
# Performing BFS
while leaves:
t = [[0,0,0],[0,0,0],[0,0,0]]
# Popping an element which has the least level
next_item = heapq.heappop(leaves)
# Variable r is storing the matrix
r = next_item[1]
# Variable l is storing the current puzzle/ matrix level number
l = next_item[0]
if(ind>11):
break
# Printing the TBF AND NS for different level constraints
if l>(ind+1) and a[ind]==0:
print " Results till level<=", ind+1
print " Total branching factor = ", TBF
print " Total number of states for the puzzle(n) = ", NS
print " Average branching factor(b) = ", ((TBF+0.0)/NS)
a[ind]=1
values[ind] = ((TBF+0.0)/NS)
tm = 1
for x in range(ind+2):
tm = tm*values[ind]
k = NS/tm
print " Applying the formula n = k(b)^(d+1), value of k = ", k
ind = ind +1
# Incrementing the number of states
NS=NS+1
# Storing the currently popped matrix (r) in variable t
t[0][0] = r[0][0]
t[0][1] = r[0][1]
t[0][2] = r[0][2]
t[1][0] = r[1][0]
t[1][1] = r[1][1]
t[1][2] = r[1][2]
t[2][0] = r[2][0]
t[2][1] = r[2][1]
t[2][2] = r[2][2]
'''IMPORTANT: TO print the current state we can print the matrix in variable t'''
# Performing moves depending on the place where 0(or empty cell) is present
# Then pushing the new matrix with l+1 i.e. current level + 1 in the heap
if(r[0][0]==0):
TBF = TBF+2
temp = t[0][1]
t[0][1] = 0
t[0][0] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[0][0]
t[0][0] = 0
t[0][1] = temp
temp = t[1][0]
t[1][0]=0
t[0][0] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[0][1]==0):
TBF = TBF+3
temp = t[1][1]
t[1][1]=0
t[0][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[0][1]
t[0][1] = 0
t[1][1] = temp
temp = t[0][2]
t[0][2]=0
t[0][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[0][1]
t[0][1] = 0
t[0][2] = temp
temp = t[0][0]
t[0][0]=0
t[0][1] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[1][0]==0):
TBF = TBF+3
temp = t[1][1]
t[1][1]=0
t[1][0] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][0]
t[1][0] = 0
t[1][1] = temp
temp = t[2][0]
t[2][0]=0
t[1][0] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][0]
t[1][0] = 0
t[2][0] = temp
temp = t[0][0]
t[0][0]=0
t[1][0] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[0][2]==0):
TBF = TBF+2
temp = t[0][1]
t[0][1]=0
t[0][2] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[0][2]
t[0][2] = 0
t[0][1] = temp
temp = t[1][2]
t[1][2]=0
t[0][2] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[1][2]==0):
TBF = TBF+3
temp = t[1][1]
t[1][1]=0
t[1][2] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][2]
t[1][2] = 0
t[1][1] = temp
temp = t[0][2]
t[0][2]=0
t[1][2] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][2]
t[1][2] = 0
t[0][2] = temp
temp = t[2][2]
t[2][2]=0
t[1][2] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[1][1]==0):
TBF = TBF+4
temp = t[0][1]
t[0][1]=0
t[1][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][1]
t[1][1] = 0
t[0][1] = temp
temp = t[1][2]
t[1][2]=0
t[1][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][1]
t[1][1] = 0
t[1][2] = temp
temp = t[1][0]
t[1][0]=0
t[1][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[1][1]
t[1][1] = 0
t[1][0] = temp
temp = t[2][1]
t[2][1]=0
t[1][1] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[2][1]==0):
TBF = TBF+3
temp = t[1][1]
t[1][1]=0
t[2][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[2][1]
t[2][1] = 0
t[1][1] = temp
temp = t[2][2]
t[2][2]=0
t[2][1] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[2][1]
t[2][1] = 0
t[2][2] = temp
temp = t[2][0]
t[2][0]=0
t[2][1] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[2][0]==0):
TBF = TBF+2
temp = t[1][0]
t[1][0]=0
t[2][0] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[2][0]
t[2][0] = 0
t[1][0] = temp
temp = t[2][1]
t[2][1]=0
t[2][0] = temp
heapq.heappush(leaves, (l+1, t))
elif(r[2][2]==0):
TBF = TBF+2
temp = t[2][1]
t[2][1]=0
t[2][2] = temp
heapq.heappush(leaves, (l+1, t))
temp = t[2][2]
t[2][2] = 0
t[2][1] = temp
temp = t[1][2]
t[1][2]=0
t[2][2] = temp
heapq.heappush(leaves, (l+1, t))
ABF = 0.0
for x in range(13):
ABF = ABF + values[x]
ABF = (ABF/13)
print "Overall average branching factor = ", ABF
tm = 1
for x in range(ind+1):
tm = tm*ABF
k = NS/tm
print "Applying the formula n = k(b)^(d+1), value of k = ", k
if(k>=0 and k<2):
print "The value of k is in between 0 and 2, Hence Satisfied."
|
[
"soni.aayush98@gmail.com"
] |
soni.aayush98@gmail.com
|
54bf75dd8884b487a9504f14b27458b18c386ed7
|
6226e633c04ed8d79b5d6a444fee3cc5736e5eeb
|
/mri_analysis/apps/app.py
|
6e0b4289826d47a858aa33111f759a26b017fcd0
|
[] |
no_license
|
mszinte/PredictEye
|
97aab2c20813072645c3c4f4cfea8670d50f79ad
|
f17e2481134fdad223b62393e53db1b4c192e2ac
|
refs/heads/master
| 2023-06-12T06:58:09.722847
| 2021-06-16T12:30:27
| 2021-06-16T12:30:27
| 285,290,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import dash
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, suppress_callback_exceptions=True)
server = app.server
|
[
"mszinte@login02.cluster"
] |
mszinte@login02.cluster
|
9878aa70c6c4979e347708410e1b4bfdc95469e0
|
f4fbeb3b1f91043b82d1aacf7b40608644b4130e
|
/tensorflow/python/keras/_impl/keras/layers/normalization_test.py
|
84f0b2776c9980e0bdc00c173b275604ce16697a
|
[
"Apache-2.0"
] |
permissive
|
angelmorinigo/tensorflow
|
0609a99122315ef466bfb1f8e5334b45361b9d29
|
3176ba990070cdde62b7cdf81747d70107d2e032
|
refs/heads/master
| 2020-03-17T15:33:47.145977
| 2018-05-16T16:58:05
| 2018-05-16T16:58:05
| 133,715,400
| 1
| 0
|
Apache-2.0
| 2018-05-16T19:45:02
| 2018-05-16T19:45:02
| null |
UTF-8
|
Python
| false
| false
| 8,571
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class NormalizationLayersTest(test.TestCase):
def test_basic_batchnorm(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
def test_batchnorm_weights(self):
with self.test_session():
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
def test_batchnorm_regularization(self):
with self.test_session():
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
def test_batchnorm_correctness(self):
with self.test_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_batchnorm_convnet_channel_last(self):
with self.test_session():
# keras.backend.set_learning_phase(True)
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
def test_shared_batchnorm(self):
"""Test that a BN layer can be shared across different data streams.
"""
with self.test_session():
# Test single layer reuse
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
self.assertEqual(len(bn.updates), 4)
self.assertEqual(len(model.updates), 2)
self.assertEqual(len(model.get_updates_for(x1)), 0)
self.assertEqual(len(model.get_updates_for(x2)), 2)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertEqual(len(new_model.updates), 2)
self.assertEqual(len(model.updates), 4)
self.assertEqual(len(new_model.get_updates_for(x3)), 2)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
"""
with self.test_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = keras.layers.BatchNormalization()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
keras.backend.set_learning_phase(1)
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
c34f28b064723496b9f76b20880853d1b861b23c
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_tables_operations.py
|
1cfeb4326088fb395f5d11a07bd548a359826a3a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 32,472
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union, cast
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tables_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_workspace_request, build_migrate_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TablesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.loganalytics.aio.LogAnalyticsManagementClient`'s
:attr:`tables` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable[_models.TablesListResult]:
"""Gets all the tables for the specified Log Analytics workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TablesListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.TablesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.TablesListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
template_url=self.list_by_workspace.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TablesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> Optional[_models.Table]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.Table]]
_json = self._serialize.body(parameters, 'Table')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> AsyncLROPoller[_models.Table]:
"""Update or Create a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:param parameters: The parameters required to update table properties.
:type parameters: ~azure.mgmt.loganalytics.models.Table
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Table or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.Table]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> Optional[_models.Table]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.Table]]
_json = self._serialize.body(parameters, 'Table')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> AsyncLROPoller[_models.Table]:
"""Update a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:param parameters: The parameters required to update table properties.
:type parameters: ~azure.mgmt.loganalytics.models.Table
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Table or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.Table]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> _models.Table:
"""Gets a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Table, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.Table
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def migrate( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> None:
"""Migrate a Log Analytics table from support of the Data Collector API and Custom Fields features
to support of Data Collection Rule-based Custom Logs.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_migrate_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self.migrate.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
migrate.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}/migrate"} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
71e14762493667ad2bae0d74ce90bd364e9333a7
|
780183a7842ad548703f3a62be0d1413fc901254
|
/frappe/frappe/sessions.py
|
f2c5efe8e3fb2c2bf63699f3729c398fb244d934
|
[
"MIT"
] |
permissive
|
Shreyasnaik01/Library-Management
|
9ab49281fd331d73c85c0d6f15797be97ecdbfc4
|
8bda4131309897c22e2fcbc54b402aded35a5523
|
refs/heads/master
| 2023-08-29T02:36:15.972349
| 2021-10-19T13:43:43
| 2021-10-19T13:43:43
| 418,891,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,260
|
py
|
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint, cstr
import frappe.model.meta
import frappe.defaults
import frappe.translate
import redis
from six.moves.urllib.parse import unquote
from six import text_type
from frappe.cache_manager import clear_user_cache
@frappe.whitelist(allow_guest=True)
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_user_cache(frappe.session.user)
frappe.response['message'] = _("Cache Cleared")
def clear_sessions(user=None, keep_current=False, device=None, force=False):
'''Clear other sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
:param force: triggered by the user (default false)
'''
reason = "Logged In From Another Session"
if force:
reason = "Force Logged out by the user"
for sid in get_sessions_to_clear(user, keep_current, device):
delete_session(sid, reason=reason)
def get_sessions_to_clear(user=None, keep_current=False, device=None):
'''Returns sessions of the current user. Called at login / logout
:param user: user name (default: current user)
:param keep_current: keep current session (default: false)
:param device: delete sessions of this device (default: desktop, mobile)
'''
if not user:
user = frappe.session.user
if not device:
device = ("desktop", "mobile")
if not isinstance(device, (tuple, list)):
device = (device,)
offset = 0
if user == frappe.session.user:
simultaneous_sessions = frappe.db.get_value('User', user, 'simultaneous_sessions') or 1
offset = simultaneous_sessions - 1
condition = ''
if keep_current:
condition = ' AND sid != {0}'.format(frappe.db.escape(frappe.session.sid))
return frappe.db.sql_list("""
SELECT `sid` FROM `tabSessions`
WHERE `tabSessions`.user=%(user)s
AND device in %(device)s
{condition}
ORDER BY `lastupdate` DESC
LIMIT 100 OFFSET {offset}""".format(condition=condition, offset=offset),
{"user": user, "device": device})
def delete_session(sid=None, user=None, reason="Session Expired"):
from frappe.core.doctype.activity_log.feed import logout_feed
frappe.cache().hdel("session", sid)
frappe.cache().hdel("last_db_session_update", sid)
if sid and not user:
user_details = frappe.db.sql("""select user from tabSessions where sid=%s""", sid, as_dict=True)
if user_details: user = user_details[0].get("user")
logout_feed(user, reason)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
frappe.db.commit()
def clear_all_sessions(reason=None):
"""This effectively logs out all users"""
frappe.only_for("Administrator")
if not reason: reason = "Deleted All Active Session"
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid, reason=reason)
def get_expired_sessions():
'''Returns list of expired sessions'''
expired = []
for device in ("desktop", "mobile"):
expired += frappe.db.sql_list("""SELECT `sid`
FROM `tabSessions`
WHERE (NOW() - `lastupdate`) > %s
AND device = %s""", (get_expiry_period_for_query(device), device))
return expired
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for sid in get_expired_sessions():
delete_session(sid, reason="Session Expired")
def get():
"""get session boot info"""
from frappe.boot import get_bootinfo, get_unseen_notes
from frappe.utils.change_log import get_change_log
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().hget("bootinfo", frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["user"]["recent"] = json.dumps(\
frappe.cache().hget("user_recent", frappe.session.user))
if not bootinfo:
# if not create it
bootinfo = get_bootinfo()
frappe.cache().hset("bootinfo", frappe.session.user, bootinfo)
try:
frappe.cache().ping()
except redis.exceptions.ConnectionError:
message = _("Redis cache server not running. Please contact Administrator / Tech support")
if 'messages' in bootinfo:
bootinfo['messages'].append(message)
else:
bootinfo['messages'] = [message]
# check only when clear cache is done, and don't cache this
if frappe.local.request:
bootinfo["change_log"] = get_change_log()
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
bootinfo.notes = get_unseen_notes()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
bootinfo["lang"] = frappe.translate.get_user_lang()
bootinfo["disable_async"] = frappe.conf.disable_async
bootinfo["setup_complete"] = cint(frappe.db.get_single_value('System Settings', 'setup_complete'))
bootinfo["is_first_startup"] = cint(frappe.db.get_single_value('System Settings', 'is_first_startup'))
return bootinfo
def get_csrf_token():
if not frappe.local.session.data.csrf_token:
generate_csrf_token()
return frappe.local.session.data.csrf_token
def generate_csrf_token():
frappe.local.session.data.csrf_token = frappe.generate_hash()
frappe.local.session_obj.update(force=True)
class Session:
def __init__(self, user, resume=False, full_name=None, user_type=None):
self.sid = cstr(frappe.form_dict.get('sid') or
unquote(frappe.request.cookies.get('sid', 'Guest')))
self.user = user
self.device = frappe.form_dict.get("device") or "desktop"
self.user_type = user_type
self.full_name = full_name
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
# set local session
frappe.local.session = self.data
if resume:
self.resume()
else:
if self.user:
self.start()
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data.user = self.user
self.data.sid = sid
self.data.data.user = self.user
self.data.data.session_ip = frappe.local.request_ip
if self.user != "Guest":
self.data.data.update({
"last_updated": frappe.utils.now(),
"session_expiry": get_expiry_period(self.device),
"full_name": self.full_name,
"user_type": self.user_type,
"device": self.device,
"session_country": get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None,
})
# insert session
if self.user!="Guest":
self.insert_session_record()
# update user
user = frappe.get_doc("User", self.data['user'])
frappe.db.sql("""UPDATE `tabUser`
SET
last_login = %(now)s,
last_ip = %(ip)s,
last_active = %(now)s
WHERE name=%(name)s""", {
'now': frappe.utils.now(),
'ip': frappe.local.request_ip,
'name': self.data['user']
})
user.run_notifications("before_change")
user.run_notifications("on_update")
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into `tabSessions`
(`sessiondata`, `user`, `lastupdate`, `sid`, `status`, `device`)
values (%s , %s, NOW(), %s, 'Active', %s)""",
(str(self.data['data']), self.data['user'], self.data['sid'], self.device))
# also add to memcache
frappe.cache().hset("session", self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
from frappe.auth import validate_ip_address
data = self.get_session_record()
if data:
self.data.update({'data': data, 'user':data.user, 'sid': self.sid})
self.user = data.user
validate_ip_address(self.user)
self.device = data.device
else:
self.start_as_guest()
if self.sid != "Guest":
frappe.local.user_lang = frappe.translate.get_user_lang(self.data.user)
frappe.local.lang = frappe.local.user_lang
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe.cache().hget("session", self.sid)
if data:
data = frappe._dict(data)
session_data = data.get("data", {})
# set user for correct timezone
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self._delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
self.device = frappe.db.sql('SELECT `device` FROM `tabSessions` WHERE `sid`=%s', self.sid)
self.device = self.device and self.device[0][0] or 'desktop'
rec = frappe.db.sql("""
SELECT `user`, `sessiondata`
FROM `tabSessions` WHERE `sid`=%s AND
(NOW() - lastupdate) < %s
""", (self.sid, get_expiry_period_for_query(self.device)))
if rec:
data = frappe._dict(eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self._delete_session()
data = None
return data
def _delete_session(self):
delete_session(self.sid, reason="Session Expired")
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
if (frappe.session['user'] == "Guest" or frappe.form_dict.cmd=="logout"):
return
now = frappe.utils.now()
self.data['data']['last_updated'] = now
self.data['data']['lang'] = str(frappe.lang)
# update session in db
last_updated = frappe.cache().hget("last_db_session_update", self.sid)
time_diff = frappe.utils.time_diff_in_seconds(now, last_updated) if last_updated else None
# database persistence is secondary, don't update it too often
updated_in_db = False
if force or (time_diff==None) or (time_diff > 600):
# update sessions table
frappe.db.sql("""update `tabSessions` set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
# update last active in user table
frappe.db.sql("""update `tabUser` set last_active=%(now)s where name=%(name)s""", {
"now": now,
"name": frappe.session.user
})
frappe.db.commit()
frappe.cache().hset("last_db_session_update", self.sid, now)
updated_in_db = True
# set in memcache
frappe.cache().hset("session", self.sid, self.data)
return updated_in_db
def get_expiry_period_for_query(device=None):
if frappe.db.db_type == 'postgres':
return get_expiry_period(device)
else:
return get_expiry_in_seconds(device=device)
def get_expiry_in_seconds(expiry=None, device=None):
if not expiry:
expiry = get_expiry_period(device)
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def get_expiry_period(device="desktop"):
if device=="mobile":
key = "session_expiry_mobile"
default = "720:00:00"
else:
key = "session_expiry"
default = "06:00:00"
exp_sec = frappe.defaults.get_global_default(key) or default
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_from_ip(ip_addr):
try:
from geolite2 import geolite2
with geolite2 as f:
reader = f.reader()
data = reader.get(ip_addr)
return frappe._dict(data)
except ImportError:
return
except ValueError:
return
except TypeError:
return
def get_geo_ip_country(ip_addr):
match = get_geo_from_ip(ip_addr)
if match:
return match.country
|
[
"you@example.com"
] |
you@example.com
|
5e07f54214c2e42f5d3fcea511e087cabd630ece
|
1989d4bc58c7f886c6afdc44d83af9b626ea5aa9
|
/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi
|
2f1bdbe92e174996b2a40b303b43adfe72d1c4e2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
navin1976/MDT-API
|
eebb4fefb20aef7234ceb8a6e9f40231043b7d79
|
fbf7ad256e488daebd471a13822786257f929070
|
refs/heads/master
| 2021-01-12T12:00:05.645995
| 2016-09-21T15:21:20
| 2016-09-21T15:21:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,758
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/ThatTobMate/.node-gyp/0.12.4",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/usr/local/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/ThatTobMate/.npm-init.js",
"userconfig": "/Users/ThatTobMate/.npmrc",
"node_version": "0.12.4",
"user": "501",
"save": "true",
"editor": "subl -w -n",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/ThatTobMate/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.10.1 node/v0.12.4 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/58/2p07cjv57xnb5hys269tbwy40000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"hale.j.0810@gmail.com"
] |
hale.j.0810@gmail.com
|
fe9398d41deafc9f535b1e5e54c14691d00280a9
|
0d990de062665b41e0e77da0756a21635915d659
|
/TP3/mediciones placa ada/continua/continua.py
|
f182a292e2f609451c950e14595c3c4f02a0222d
|
[] |
no_license
|
newtonis/22.05-Signals-and-Systems-Analysis-Projects
|
954b6b5f3bfdbf10a6124fbf49128118bdc44fae
|
1d759aa22d70ae19fd6fc66b335296b8242339e5
|
refs/heads/master
| 2022-03-23T08:58:55.544863
| 2019-12-02T03:07:55
| 2019-12-02T03:07:55
| 174,744,007
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
import matplotlib.pyplot as plt
import csv
import matplotlib.patches as mpatches
import numpy as np
filename = "adc.csv"
vin = []
vout = []
bin = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for row in reader:
vin.append(float(row[0]))
bin.append(float(row[1]))
vout.append(float(row[2]))
ideal = np.arange(0, 5000, 0.01)
plt.plot(vin, vout, color="green", marker="o")
plt.plot(vin, bin, "orange", marker='o')
plt.plot(ideal, ideal, "blue")
plt.xlabel("Tensión de entrada (mV)")
plt.ylabel("Tensión de salida (mV)")
# pongo una grilla
plt.minorticks_on()
plt.grid(which='major', linestyle='-', linewidth=0.3, color='black')
plt.grid(which='minor', linestyle=':', linewidth=0.1, color='black')
# agregamos patches
patches = [
mpatches.Patch(color="green", label="Analógica"),
mpatches.Patch(color="orange", label="Digital"),
mpatches.Patch(color="blue", label="Ideal")
]
# agregamos leyenda
plt.legend(handles=patches)
# muestro el grafico que prepare
plt.show()
|
[
"rparra@itba.edu.ar"
] |
rparra@itba.edu.ar
|
8bf53a909789453f987ea914f19cfea3897e44ae
|
bcf4316366954dd25a61ab9c2f02965613883184
|
/Argument Mining/run_final.py
|
7c5eb50dcc740b1efba519f2c533ba1cee72d276
|
[] |
no_license
|
sshivam95/Snowden2.0
|
ad1da2195d058d809a2392018a709057bd0e9ec6
|
298d683d748b52dee9d56d93a96d4352b66f36c7
|
refs/heads/master
| 2023-06-12T20:33:24.121193
| 2021-07-10T21:51:01
| 2021-07-10T21:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,099
|
py
|
import numpy as np
import pandas as pd
import spacy
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import f1_score
from sklearn.preprocessing import normalize
nlp = spacy.load('en_core_web_sm')
train_file = 'train-data-prepared.json'
# Change to test filename location
test_file = 'val-data-prepared.json'
def write_file(predictions, id_df):
"""Write the predictions to JSON file"""
result = pd.concat([id_df, pd.DataFrame(predictions)], axis=1)
result.columns = ['id', 'label']
result.set_index('id')['label'].to_json(r'output.json')
def get_pos_vector(text):
"""Get the Parts-of-Speech feature vectors"""
pos_feature_dict = {'ADJ': 0, 'SPACE': 0, 'ADV': 0, 'INTJ': 0, 'SYM': 0, 'VERB': 0, 'SCONJ': 0, 'PART': 0, 'X': 0,
'PUNCT': 0, 'AUX': 0, 'ADP': 0, 'NUM': 0, 'PRON': 0, 'NOUN': 0, 'DET': 0, 'CCONJ': 0,
'PROPN': 0}
doc = nlp(text)
for token in doc:
pos = token.pos_
if pos in pos_feature_dict:
pos_feature_dict[pos] += 1
else:
pos_feature_dict[pos] = 1
values_list = []
for k in list(pos_feature_dict.keys()):
values_list.append(pos_feature_dict[k])
return values_list
def get_tag_vector(text):
"""Get a feature vector of parsed tree nodes."""
pos_feature_dict = {'VBP': 0, 'RBS': 0, 'VBZ': 0, 'WRB': 0, 'VB': 0, 'NNS': 0, 'WDT': 0, 'UH': 0, '-RRB-': 0,
'AFX': 0, 'CC': 0, 'WP': 0, 'VBN': 0, 'IN': 0, 'PRP$': 0, 'XX': 0, 'WP$': 0, 'RBR': 0, 'PDT': 0,
'HYPH': 0, 'POS': 0, '$': 0, 'NNPS': 0, 'MD': 0, '.': 0, 'VBD': 0, 'JJR': 0, 'NFP': 0, ',': 0,
'JJS': 0, 'DT': 0, '_SP': 0, 'VBG': 0, 'FW': 0, 'RP': 0, 'SYM': 0, 'LS': 0, 'CD': 0, 'RB': 0,
'EX': 0, '``': 0, 'PRP': 0, "''": 0, ':': 0, 'TO': 0, 'JJ': 0, 'ADD': 0, '-LRB-': 0, 'NN': 0,
'NNP': 0}
doc = nlp(text)
for token in doc:
pos = token.tag_
if pos in pos_feature_dict:
pos_feature_dict[pos] += 1
else:
pos_feature_dict[pos] = 1
values_list = []
for k in list(pos_feature_dict.keys()):
values_list.append(pos_feature_dict[k])
return values_list
def get_pos_tag_vector(text):
"""Get a combined feature vector of Parts-of-Speech and Tag"""
pos_vector = get_pos_vector(text)
tag_vector = get_tag_vector(text)
pos_vector.extend(tag_vector)
return pos_vector
def create_tf_idf_feature_vector(df, vectorizer, type):
"""Get tf/idf features."""
x_train = df['text'].to_list()
if type == 'train':
X_train_vec = vectorizer.fit_transform(x_train)
elif type == 'test':
X_train_vec = vectorizer.transform(x_train)
return X_train_vec.toarray()
def create_feature_vector(df, type, vectorizer):
text_list = df['text'].to_list()
vectors_list = [get_pos_tag_vector(text) for text in text_list]
# Get tf/idf feature vector
X_vec = create_tf_idf_feature_vector(df, vectorizer, type)
for i in range(len(vectors_list)):
vectors_list[i].extend(X_vec[i])
df['feature_vector'] = vectors_list
y = df['label']
X = np.stack(vectors_list, axis=0)
# Normalize the feature vector
X = normalize(X, axis=1, norm='l1')
return X, y, df
df_train = pd.read_json(train_file)
# Specify vectorizer
vectorizer = TfidfVectorizer()
# Create feature vector for training set
X_train, y_train, df_train = create_feature_vector(df_train, 'train', vectorizer)
df_test = pd.read_json(test_file)
# Create feature vector for test set
X_test, y_test, df_test = create_feature_vector(df_test, 'test', vectorizer)
id_test = df_test['id'].to_list()
id_test_df = pd.DataFrame(id_test)
print("SVM RBF")
clf_rbf = svm.SVC(kernel='rbf', C=10 ** 8)
clf_rbf.fit(X_train, y_train)
y_prediction_svm = clf_rbf.predict(X_test)
print(f1_score(y_true=y_test, y_pred=y_prediction_svm))
# Write the predictions to JSON file
write_file(y_prediction_svm, id_test_df)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1d0bc33f6c20182daefaba5af9e8841db4fc115c
|
144d462ba4a87fbb89b37d28f421721eef4f04c8
|
/tic-tac-toe-socket.py
|
ef4dcda57c12bc47a2850749a548dea79d299a6f
|
[] |
no_license
|
arjunoggu/coding
|
c0083c5c025225be9fba14b5dbbff0d47dea865d
|
3ac21506af8e0f3825c42b8157cbcc5cdf7730a7
|
refs/heads/master
| 2022-12-03T06:20:13.283573
| 2020-08-14T10:57:19
| 2020-08-14T10:57:19
| 286,955,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
import socket
class ServerClient:
global board
play = True
board = ['', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
def __init__(self):
self.serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def bind(self):
port = 1235
self.serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serversock.bind(('', port))
self.serversock.listen(5)
def accept(self):
(clientsocket, address) = self.serversock.accept()
print('Got connection from', address)
return clientsocket
def connect(self, host, port):
self.serversock.connect((host, port))
def send(self, msg):
self.serversock.send(bytes(msg))
def receive(self):
return self.serversock.recv(1024).decode()
def stopApp(self):
print("Game Ended. Thanks for playing!")
self.serversock.close()
def bboard(self):
print('| ' ' | | ' ' | | ' ' |')
print("")
print('| ' ' | | ' ' | | ' ' |')
print("")
print('| ' ' | | ' ' | | ' ' |')
print("")
def display_board(self):
print('| ' + board[1] + ' | | ' + board[2] + ' | | ' + board[3] + ' |')
print("")
print('| ' + board[4] + ' | | ' + board[5] + ' | | ' + board[6] + ' |')
print("")
print('| ' + board[7] + ' | | ' + board[8] + ' | | ' + board[9] + ' |')
print("")
def human_input(self, mark):
while True:
inp = raw_input('Enter your move [1-9]:')
if inp.isdigit():
if int(inp) < 10 and int(inp) > 0:
inp = int(inp)
if board[inp] == " ":
return inp
else:
print("Please enter a valid move")
else:
print("Please enter a valid move")
else:
print("Only integers [1-9] are allowed\nPlease enter a valid move")
def winning(self, mark, board):
winning_place = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 4, 7], [2, 5, 8], [3, 6, 9], [1, 5, 9], [3, 5, 7]]
for win_place in winning_place:
if board[win_place[0]] == board[win_place[1]] == board[win_place[2]] == mark:
return True
def win_move(self, i, board, mark):
temp_board = list(board)
temp_board[i] = mark
if self.winning(mark, temp_board):
return True
else:
return False
def win_check(self, human, opponent):
winning_place = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 4, 7], [2, 5, 8], [3, 6, 9], [1, 5, 9], [3, 5, 7]]
for win_place in winning_place:
if board[win_place[0]] == board[win_place[1]] == board[win_place[2]] == human:
print('You won!!')
return False
elif board[win_place[0]] == board[win_place[1]] == board[win_place[2]] == opponent:
print('Opponent won')
return False
if ' ' not in board:
print('MATCH DRAW!!')
return False
return True
def end(self):
print("Game Ended. Thanks for playing!")
def main():
global board
play = True
app = ServerClient()
server = raw_input("Are u the server (y/n) :")
if server == 'y':
h1 = 'X'
h2 = 'O'
print('Your moves are s')
app.bboard()
print('Waiting for other player to connect...')
app.bind()
client = app.accept()
print("Welcome to Tic Tac Toe Game!!")
reply = 'init'
while play:
play = app.win_check(h1, h2)
if play:
x = app.human_input(h1)
board[x] = h1
app.display_board()
play=app.win_check(h1,h2)
if(play):
print("Waiting for opponent move")
x_str = str(x)
client.send(x_str.encode('utf-8'))
reply = int(client.recv(1024).decode())
board[reply] = h2
app.display_board()
else:
x_str = str(x)
client.send(x_str.encode('utf-8'))
stop = '0'
client.send(stop.encode('utf-8'))
app.stopApp()
else:
x_str = str(x)
client.send(x_str.encode('utf-8'))
stop = '0'
client.send(stop.encode('utf-8'))
app.stopApp()
#app.end()
elif server == 'n':
h1 = 'X'
h2 = 'O'
print('Your moves are Os')
client = raw_input("Enter opponent name to connect to:")
app.connect(client, 1235)
print("Welcome to Tic Tac Toe Game!!")
app.bboard()
print("Waiting for opponent move:")
reply = 'init'
while play:
msg = int(app.receive())
board[msg] = h1
if msg == 0:
play = False
app.stopApp()
break
else:
board[msg] = h1
app.display_board()
o_check = app.win_check(h2, h1)
if o_check:
if msg == '0':
reply = '0'
app.send(reply.encode())
break
o = app.human_input(h2)
board[o] = h2
o_str = str(o)
app.display_board()
if play:
app.send(o_str.encode('utf-8'))
print("Waiting for opponent move:")
else:
print('please enter valid answer')
if __name__ == '__main__':
main()
|
[
"arjunoggu11@gmail.com"
] |
arjunoggu11@gmail.com
|
466c50bd91fc4be61abb950479c4d47fb1041ed9
|
8ed80561e1b3c0bcdb6201cae8af845d5da23edc
|
/guppe/exercicios_secao_8/ex_10.py
|
62a551fc304de9116280ee458a9d1eaa9680822e
|
[] |
no_license
|
Fulvio7/curso-python-guppe
|
42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb
|
98966963f698eb33e65ed58a84f96e28f675848a
|
refs/heads/main
| 2023-08-28T13:31:12.916407
| 2021-10-09T19:03:17
| 2021-10-09T19:03:17
| 415,393,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
10- Faça uma função que receba dois números e retorne qual deles
é o maior.
"""
def retorna_maior(n1, n2):
if n1 > n2:
return f'{n1} é maior.'
elif n2 > n1:
return f'{n2} é maior.'
return 'Os dois números são iguais! XP '
print('Descubra o maior número')
num1 = int(input('Num1 = '))
num2 = int(input('Num2 = '))
print(retorna_maior(num1, num2))
|
[
"fulvio.barichello@gmail.com"
] |
fulvio.barichello@gmail.com
|
6f5739437c1132e3e559ac5dced0ee9060cff126
|
0e3663763320586449c4206963e982abf81a0f9b
|
/sumogsuitealertscollector/main.py
|
01394d5e601350cf68c4ff000872eff52225f763
|
[
"Apache-2.0"
] |
permissive
|
blaise-sumo/sumologic-gsuitealertcenter
|
de3a38dc3e89fcf280e5632fded48134ff3d91b5
|
daba442c9eab39b6be9fba7c17d990f6a266dd92
|
refs/heads/master
| 2020-05-20T17:26:52.407000
| 2019-04-08T04:52:09
| 2019-04-08T04:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,290
|
py
|
# -*- coding: future_fstrings -*-
import traceback
import sys
import time
import os
import datetime
from concurrent import futures
from common.logger import get_logger
from omnistorage.factory import ProviderFactory
from sumoclient.factory import OutputHandlerFactory
from sumoclient.utils import get_current_timestamp, convert_epoch_to_utc_date, convert_utc_date_to_epoch
from common.config import Config
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
class GSuiteAlertsCollector(object):
CONFIG_FILENAME = "gsuitealertcenter.yaml"
STOP_TIME_OFFSET_SECONDS = 10
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
MOVING_WINDOW_DELTA = 0.001
FUNCTION_TIMEOUT = 5*60
def __init__(self):
self.start_time = datetime.datetime.utcnow()
cfgpath = sys.argv[1] if len(sys.argv) > 1 else ''
self.root_dir = self.get_current_dir()
self.config = Config().get_config(self.CONFIG_FILENAME, self.root_dir, cfgpath)
self.log = get_logger(__name__, force_create=True, **self.config['Logging'])
self.collection_config = self.config['Collection']
self.api_config = self.config['GsuiteAlertCenter']
op_cli = ProviderFactory.get_provider(self.config['Collection']['ENVIRONMENT'])
self.kvstore = op_cli.get_storage("keyvalue", name=self.config['Collection']['DBNAME'])
self.DEFAULT_START_TIME_EPOCH = get_current_timestamp() - self.collection_config['BACKFILL_DAYS']*24*60*60
self.alertcli = self.get_alert_client()
def get_current_dir(self):
cur_dir = os.path.dirname(__file__)
return cur_dir
def get_credentials(self):
SCOPES = self.config['GsuiteAlertCenter']['SCOPES']
DELEGATED_EMAIL = self.config['GsuiteAlertCenter']['DELEGATED_EMAIL']
CREDENTIALS_FILEPATH = self.config['GsuiteAlertCenter']['CREDENTIALS_FILEPATH']
credentials = ServiceAccountCredentials.from_json_keyfile_name(CREDENTIALS_FILEPATH)
delegated_credentials = credentials.create_delegated(DELEGATED_EMAIL).create_scoped(SCOPES)
return delegated_credentials
def get_alert_client(self):
API_VERSION = self.config['GsuiteAlertCenter']['VERSION']
credentials = self.get_credentials()
alertcli = build('alertcenter', API_VERSION, credentials=credentials, cache_discovery=False)
return alertcli
def set_fetch_state(self, alert_type, start_time_epoch, end_time_epoch, pageToken=None):
if end_time_epoch: # end time epoch could be none in cases where no event is present
assert start_time_epoch <= end_time_epoch
obj = {
"pageToken": pageToken,
"alert_type": alert_type,
"start_time_epoch": start_time_epoch,
"end_time_epoch": end_time_epoch
}
self.kvstore.set(alert_type, obj)
return obj
def build_params(self, alert_type, start_time_epoch, end_time_epoch, pageToken, page_size):
params = {
'pageSize': page_size,
'pageToken': pageToken,
'filter': f'''create_time >= \"{convert_epoch_to_utc_date(start_time_epoch, self.DATE_FORMAT)}\" AND create_time <= \"{convert_epoch_to_utc_date(end_time_epoch, self.DATE_FORMAT)}\" AND type = \"{alert_type}\"''',
'orderBy': "create_time desc"
}
return params
def set_new_end_epoch_time(self, alert_type, start_time_epoch):
end_time_epoch = get_current_timestamp() - self.collection_config['END_TIME_EPOCH_OFFSET_SECONDS']
params = self.build_params(alert_type, start_time_epoch, end_time_epoch, None, 1)
response = self.alertcli.alerts().list(**params).execute()
start_date = convert_epoch_to_utc_date(start_time_epoch, self.DATE_FORMAT)
end_date = convert_epoch_to_utc_date(end_time_epoch, self.DATE_FORMAT)
if response.get("alerts") and len(response["alerts"]) > 0:
new_end_date = response["alerts"][0]["createTime"]
new_end_time_epoch = convert_utc_date_to_epoch(new_end_date)
obj = self.set_fetch_state(alert_type, start_time_epoch, new_end_time_epoch)
self.log.info(f'''Creating task for {alert_type} from {start_date} to {new_end_date}''')
return obj
else:
self.log.info(f'''No events are available for {alert_type} from {start_date} to {end_date}''')
return None
def transform_data(self, data):
# import random
# srcip = ["216.161.180.148", "54.203.63.36"]
for d in data:
d["createTime"] = convert_epoch_to_utc_date(int(time.time()), self.DATE_FORMAT)
return data
def is_time_remaining(self):
now = datetime.datetime.utcnow()
time_passed = (now - self.start_time).total_seconds()
self.log.info("checking time_passed: %s" % time_passed)
return time_passed + self.STOP_TIME_OFFSET_SECONDS < self.FUNCTION_TIMEOUT
def fetch(self, alert_type, start_time_epoch, end_time_epoch, pageToken):
params = self.build_params(alert_type, start_time_epoch, end_time_epoch, pageToken, self.api_config['PAGINATION_LIMIT'])
output_handler = OutputHandlerFactory.get_handler(self.config['Collection']['OUTPUT_HANDLER'], path="%s.json" % alert_type, config=self.config)
next_request = True
send_success = has_next_page = False
count = 0
alertcli = self.get_alert_client()
try:
while next_request:
count += 1
response = alertcli.alerts().list(**params).execute()
fetch_success = response.get("alerts")
if fetch_success:
data = response["alerts"]
data = self.transform_data(data)
send_success = output_handler.send(data)
# Todo save data and separate out fetching and sending pipelines
params['pageToken'] = response.get('next_page_token') if send_success else params['pageToken']
has_next_page = True if params['pageToken'] else False
self.log.info(f'''Finished Fetching Page: {count} Event Type: {alert_type} Datalen: {len(data)} starttime: {convert_epoch_to_utc_date(start_time_epoch, self.DATE_FORMAT)} endtime: {convert_epoch_to_utc_date(end_time_epoch, self.DATE_FORMAT)}''')
is_data_ingested = fetch_success and send_success
next_request = is_data_ingested and has_next_page and self.is_time_remaining()
if not (is_data_ingested or self.is_time_remaining()): # saving in case of failures or function timeout
self.set_fetch_state(alert_type, start_time_epoch, end_time_epoch, params["pageToken"])
elif not has_next_page:
self.log.info(f'''Moving starttime window for {alert_type} to {convert_epoch_to_utc_date(end_time_epoch + self.MOVING_WINDOW_DELTA, self.DATE_FORMAT)}''')
self.set_fetch_state(alert_type, end_time_epoch + self.MOVING_WINDOW_DELTA, None)
finally:
output_handler.close()
self.log.info(f''' Total Pages fetched {count} for Event Type: {alert_type}''')
def build_task_params(self):
tasks = []
for alert_type in self.api_config['ALERT_TYPES']:
if self.kvstore.has_key(alert_type):
obj = self.kvstore.get(alert_type)
if obj["end_time_epoch"] is None:
obj = self.set_new_end_epoch_time(alert_type, obj["start_time_epoch"])
else:
obj = self.set_new_end_epoch_time(alert_type, self.DEFAULT_START_TIME_EPOCH)
if obj is None: # no new events so continue
continue
tasks.append(obj)
self.log.info(f'''Building tasks {len(tasks)}''')
return tasks
def run(self):
self.log.info('Starting Gsuite AlertCenter Forwarder...')
task_params = self.build_task_params()
all_futures = {}
with futures.ThreadPoolExecutor(max_workers=self.config['Collection']['NUM_WORKERS']) as executor:
results = {executor.submit(self.fetch, **param): param for param in task_params}
all_futures.update(results)
for future in futures.as_completed(all_futures):
param = all_futures[future]
alert_type = param["alert_type"]
try:
future.result()
obj = self.kvstore.get(alert_type)
except Exception as exc:
self.log.error(f'''Alert Type: {alert_type} thread generated an exception: {exc}''', exc_info=True)
else:
self.log.info(f'''Alert Type: {alert_type} thread completed {obj}''')
def test(self):
params = {
"start_time_epoch": 1505228760,
"end_time_epoch": int(time.time()),
"alert_type": "User reported phishing",
"pageToken": None
}
self.fetch(**params)
def main(context=None):
try:
ns = GSuiteAlertsCollector()
ns.run()
# ns.test()
except BaseException as e:
traceback.print_exc()
if __name__ == '__main__':
main()
|
[
"hp.iiita@gmail.com"
] |
hp.iiita@gmail.com
|
37281aad81bec02c5269f6e7836a07d07e82bdd5
|
cb52f8113203b19b80956ce4fb46b6291f5e0de6
|
/OOP_Problem_set/cash_desk.py
|
7db2b35aae0421c1850182715e004207e966a81f
|
[] |
no_license
|
eevlogieva/HackBulgaria_Programming101
|
0b8d2570b8b4932f8c1b758eb1a23b065e151dea
|
b2d8b5c7e73bb0063dd4cc9a1e35b4d611ed6b54
|
refs/heads/master
| 2020-05-26T09:49:54.468716
| 2015-04-21T13:40:21
| 2015-04-21T13:40:21
| 25,029,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
class CashDesk():
def __init__(self):
self.money = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0}
def take_money(self, notes):
for note in notes:
self.money[note] += notes[note]
def total(self):
total = 0
for note in self.money:
total += self.money[note] * note
return total
def can_withdraw_money(self, amount_of_money):
money_lst = []
copy_money = (self.money).copy()
for note in copy_money:
while copy_money[note] > 0:
money_lst.append(note)
copy_money[note] -= 1
lst = sorted(money_lst)[::-1]
for item in lst:
if item <= amount_of_money:
amount_of_money -= item
return amount_of_money == 0
|
[
"eevlogieva@yahoo.com"
] |
eevlogieva@yahoo.com
|
96898f3e3eb0c9ddd687c732bcf3b7497158bfd5
|
a45cbdc8b6d8b358fbe5604997dc2d8423ea1b32
|
/project 6.py
|
2272b463a80d96bbcda1c8576c95226715bda9b7
|
[] |
no_license
|
GDM-git/database-project
|
e6508ab4073301e8faeb578428697546db9de9cf
|
31979d81c693a28a0325822a0005d418b254c64a
|
refs/heads/main
| 2023-05-07T21:38:15.054638
| 2021-05-28T04:51:26
| 2021-05-28T04:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,462
|
py
|
import psycopg2 as lib
def command_1():
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM free_trucks_exists ();')
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_2():
print("Введите id машины, которую хотите занять:")
message_in = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM truck_busy (%(id)s);', {"id": message_in})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_3():
print("Введите id машины, которую хотите освободить:")
message_in = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM truck_free (%(id)s);', {"id": message_in})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_4():
print("Введите id работника:")
message_in1 = str(input())
print("Введите дату")
message_in2 = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM work_date_exist (%(id)s, %(data)s);', {"id": message_in1, "data": message_in2})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_5():
print("Введите id работника:")
message_in = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM work_exist (%(id)s);', {"id": message_in})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_6():
print("Введите id объекта:")
message_in = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM view_object (%(id)s);', {"id": message_in})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_7():
print("Введите id объекта:")
message_in = str(input())
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM view_object_link (%(id)s);', {"id": message_in})
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
def command_8():
database = lib.connect(dbname="postgres", user="postgres", password="123g321j", host="127.0.0.1", port="5433")
cursor = database.cursor()
cursor.execute('SELECT * FROM view_object_all ();')
for row in cursor:
print(row)
database.commit()
cursor.close()
if database is not None:
database.close()
if __name__ == "__main__":
print("Список команд:")
print("1 - free_trucks_exists () - посмотреть, есть ли свободные машины")
print("2 - truck_busy (id_in) - забрать грузовик с заданным id")
print("3 - truck_free (id_in) - освободить грузовик с заданным id")
print("4 - work_date_exist (id_in, date_in) - узнать, есть ли у данного работника в данный день какая-то работа")
print("5 - work_exist (id_in) - узнать, есть ли у данного работника какая-то работа")
print("6 - view_object (id_in) - посмотреть на объект с заданным id")
print("7 - view_object_link () - посмотреть на объект с заданным id из таблицы связи")
print("8 - view_object_all () - посмотреть на все объекты")
print("9 - выход")
while (True):
print("\nВведите нужную вам команду:")
message = str(input())
if message == '1':
command_1()
elif message == '2':
command_2()
elif message == '3':
command_3()
elif message == '4':
command_4()
elif message == '5':
command_5()
elif message == '6':
command_6()
elif message == '7':
command_7()
elif message == '8':
command_8()
elif message == '9':
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
2d015e55784146c1b52cefe9e60a34d454a025c2
|
69ea17c792c5eee02dbf98f8cc90902e40bca08f
|
/modules/sentry.py
|
9e21eab652e13c02511371966db72059cc74699f
|
[] |
no_license
|
macherstube/git-auto-deploy
|
f662ac2d6ba172f65fec41d3bb20dc0a6b8096c8
|
3a0bc1f0cb70535c2206f5f5bc2a1964ba9eb6a2
|
refs/heads/main
| 2023-07-16T07:13:49.961448
| 2021-08-08T18:41:16
| 2021-08-08T18:41:16
| 388,553,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##########################################################
# title: sentry.py
# author: Josias Bruderer
# date: 29.07.2021
# desc: simple implementation of sentry
##########################################################
import sentry_sdk
from sentry_sdk import capture_exception
def sentry_init(url):
sentry_sdk.init(
url,
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
environment="development"
)
|
[
"josias@macherstube.ch"
] |
josias@macherstube.ch
|
3518fd2cc14d2ce5ab3297741d351dc4635fe976
|
5a04919d14d3f34815987a202d520609c17cc605
|
/problems/forritun/hefja_i_veldi/tests/gen.py
|
1397acd8903770e81c30e87255b3e79a5210b121
|
[] |
no_license
|
SuprDewd/forritunarverkefni
|
49e9864c6efaa192747b3f004f79a53a4519c10a
|
702819f9fa2d106fede4ff2284a00b5141662493
|
refs/heads/master
| 2021-01-15T11:14:44.316308
| 2014-08-09T15:09:09
| 2014-08-09T15:09:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import math
tests = [ (10, 4), (10, -4), (15, -1), (1, 4), (0, 3), (1, -2), (-1, 2), (-1, 3), (-1, 0), (13, 2), (-13, 2), (13, 3), (-13, 3), (-5, -6) ]
for i, t in enumerate(tests):
with open('T%d.in' % i, 'w') as f: f.write('%d %d\n' % tuple(t))
with open('T%d.out' % i, 'w') as f: f.write('%f\n' % math.pow(t[0], t[1]))
|
[
"suprdewd@gmail.com"
] |
suprdewd@gmail.com
|
0aedf92297cf51ec02e89bc2bc311a1853fdf095
|
4725b14e7f5f808ada1a5e1776f25dcd37249c0d
|
/empg_erp/empg_erp/doctype/city/city.py
|
a5d82b5ec64c4f77ff11b1fd94f85c705fda7f0d
|
[
"MIT"
] |
permissive
|
shahzadzameen/empghr_erp
|
cce408247f2d9efe39bce4da2f832d8d3640bf98
|
ba24a833b7fea8f17b155435064ead761f2a134c
|
refs/heads/master
| 2020-09-01T14:01:48.922805
| 2019-11-01T11:58:43
| 2019-11-01T11:58:43
| 218,974,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, zameen and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class City(Document):
pass
|
[
"shahzad.naser@zameen.com"
] |
shahzad.naser@zameen.com
|
a8612ac222aae209f9c985d771c92a8900557d7e
|
f76e8b03862264731be92bc16e4ced7b7e078b0a
|
/instagram/urls.py
|
64a8b30ddbdcb0ea06908f107e67a02fbccf2f17
|
[
"MIT"
] |
permissive
|
bryomajor/instalite
|
b8d400d6b1ecc337e5008ddd6738e8df4653df05
|
c3854b30235960fae89682c55c88637fb8fb05ad
|
refs/heads/master
| 2022-12-11T15:06:32.222163
| 2021-04-07T10:13:46
| 2021-04-07T10:13:46
| 221,914,550
| 0
| 1
|
MIT
| 2021-06-10T22:16:42
| 2019-11-15T11:54:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^home/', views.timeline, name = 'index'),
url(r'^$', views.home, name = 'home'),
url(r'^signup/$', views.signup, name = 'signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^user/(?P<username>\w+)', views.profile, name='profile'),
url(r'^accounts/edit/', views.edit_profile, name='edit_profile'),
url(r'^upload/$', views.upload_image, name='upload_image'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^like/(?P<image_id>\d+)', views.like, name='like'),
url(r'^is_liked/', views.is_liked, name = 'is_liked')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"bryomajor@gmail.com"
] |
bryomajor@gmail.com
|
53b6f743c52e16229449c9f99dc18438957c017f
|
4290daae480aabfc35c85374a468085a6fa1a1ac
|
/ctt-server/openapi_server/test/test_result_controller.py
|
652d6ffdfa52d2b04f99be41db1f222c6a23aec6
|
[
"Apache-2.0"
] |
permissive
|
pjakovits/radon-ctt
|
01c8bc760372f6887798722c291674971b10a86d
|
8c73e05a83ef66bd6e9dba6608d2bee089df7e86
|
refs/heads/master
| 2021-05-20T19:13:35.919410
| 2021-03-28T17:07:20
| 2021-03-28T17:07:20
| 252,386,523
| 0
| 0
|
Apache-2.0
| 2020-04-02T07:34:24
| 2020-04-02T07:34:23
| null |
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.result import Result # noqa: E501
from openapi_server.test import BaseTestCase
class TestResultController(BaseTestCase):
"""ResultController integration test stubs"""
def test_create_result(self):
"""Test case for create_result
Creates new result
"""
body = POSTResult()
response = self.client.open(
'/result',
method='POST',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_delete_result_by_uuid(self):
"""Test case for delete_result_by_uuid
Delete a result
"""
response = self.client.open(
'/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='DELETE')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_download_result_by_uuid(self):
"""Test case for download_result_by_uuid
Downloads the generated results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}/download'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_result_by_uuid(self):
"""Test case for get_result_by_uuid
Retrieve a result
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result/{result_uuid}'.format(result_uuid='result_uuid_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_results(self):
"""Test case for get_results
Get all results
"""
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/RadonCTT/result',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
[
"duellmann@iste.uni-stuttgart.de"
] |
duellmann@iste.uni-stuttgart.de
|
97ddfdff0283a737ef6126c126f4452ed384a780
|
4f3e46570427bed0954de06ea53f1ae739f7db48
|
/node_modules/node-zopfli/build/config.gypi
|
63819a5203dc55a645175f3597fa89e89fd0d8af
|
[
"MIT"
] |
permissive
|
jonoforbes/ether-app
|
c6311d637a302f05da2ba04fc2c3bb462ff7d5fd
|
f8c98df0b9772f6cb4abe842976163c23a3db94f
|
refs/heads/master
| 2020-07-20T07:35:40.181413
| 2017-06-21T16:53:40
| 2017-06-21T16:53:40
| 94,337,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,641
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 51,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/7.1.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "51.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "8.0",
"nodedir": "/Users/jonoforbes/.node-gyp/7.1.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/jonoforbes/Developer/Ether/winecellar/node_modules/node-zopfli/lib/binding/node-v51-darwin-x64/zopfli.node",
"module_name": "zopfli",
"module_path": "/Users/jonoforbes/Developer/Ether/winecellar/node_modules/node-zopfli/lib/binding/node-v51-darwin-x64",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/jonoforbes/.npmrc",
"init_module": "/Users/jonoforbes/.npm-init.js",
"user": "",
"node_version": "7.1.0",
"save": "",
"editor": "vi",
"tag": "latest",
"progress": "true",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/jonoforbes/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/3.10.9 node/v7.1.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/yk/bb2m1jwj6rs0y57gdcq8d2_m0000gn/T",
"onload_script": "",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"jonoforbes@me.com"
] |
jonoforbes@me.com
|
6f080c6049634c09d57e902811bd291c2241e3a2
|
995b0681b0808cc8b3a4e8312bd504fa4d7835bb
|
/src/transformers/models/mbart/__init__.py
|
b98d226625034e2b357c61c0162b99c5cc2ffdd9
|
[
"Apache-2.0"
] |
permissive
|
NatLun137/transformers
|
3e4ae6adc145ddf7204ab127feb7f6d593873d58
|
5fc8cfddb7220cdfbea843ff4db890648fb7017e
|
refs/heads/master
| 2023-01-27T15:37:49.230126
| 2020-12-10T20:16:21
| 2020-12-10T20:16:21
| 320,227,406
| 1
| 0
|
Apache-2.0
| 2020-12-10T20:24:34
| 2020-12-10T09:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available
from .configuration_mbart import MBartConfig
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
if is_tokenizers_available():
from .tokenization_mbart_fast import MBartTokenizerFast
if is_torch_available():
from .modeling_mbart import MBartForConditionalGeneration
if is_tf_available():
from .modeling_tf_mbart import TFMBartForConditionalGeneration
|
[
"noreply@github.com"
] |
noreply@github.com
|
1a19af9a0fadedcb4bd7e45597b3e62571e51821
|
3c6b0521eb788dc5e54e46370373e37eab4a164b
|
/holistic_eval/roberta_mnli/examples/scripts/scripts/run_experiment.py
|
9d5f149859a049824f329d3eeca723f861738f66
|
[
"Apache-2.0"
] |
permissive
|
y12uc231/DialEvalMetrics
|
7402f883390b94854f5d5ae142f700a697d7a21c
|
f27d717cfb02b08ffd774e60faa6b319a766ae77
|
refs/heads/main
| 2023-09-02T21:56:07.232363
| 2021-11-08T21:25:24
| 2021-11-08T21:25:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
import os
import smtplib
from email.mime.text import MIMEText
mail_host = 'smtp.163.com'
mail_user = 'aigu3525'
mail_pass = 'WOaibaobao'
sender = 'aigu3525@163.com'
receivers = ['liuyixian0317@gmail.com']
def run_training(ex_title, type, paras_dict, node, GPU, logger=None , print_=False):
print('_'*100)
if type == 'MNLI': train_file = 'run_glue.py'
opt_dict = paras_dict
try:
os.mkdir('scripts/logs/' + type)
except:
x=1
message = MIMEText('Start training experiment {}'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误
if True:
print_file_train = 'scripts/logs/'+ type + '/' + ex_title+ '.print'
keys = list(opt_dict)
values = [opt_dict[key] for key in keys]
paras = ''
for i in range(len(keys)):
if values[i] == 'False':
continue
paras += ' --'
paras += keys[i]
if values[i] != 'True':
paras += '='
paras += str(values[i])
if True:
commend_list_train = []
# print(paras)
commend_list_train.append('ssh node'+node + ' \"')
commend_list_train.append('cd /root/liuyx/transformers/examples;')
commend_list_train.append('CUDA_VISIBLE_DEVICES=' + str(GPU) + ' /root/anaconda3/envs/transformer/bin/python ')
commend_list_train.append(train_file + paras +' 2>&1 | tee '+print_file_train + '')
commend_list_train.append('\"')
print(commend_list_train)
pred_return = os.system(''.join(commend_list_train))
message = MIMEText('Experiment {}, training end'.format(str(ex_title)), 'plain', 'utf-8')
message['Subject'] = 'Experiment {}'.format(str(ex_title))
message['From'] = sender
message['To'] = receivers[0]
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25)
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(
sender, receivers, message.as_string())
smtpObj.quit()
print('success')
except:
print('error') # 打印错误
|
[
"yitingye@cs.cmu.edu"
] |
yitingye@cs.cmu.edu
|
f5a1eed04c1ee8d3a1ddc5ce81fc7b55a2556021
|
260324a1ddd44ce43d432182d9a24e85520ccd28
|
/manage.py
|
b0540dc2795b540d41c18263b2556e2055a7e405
|
[] |
no_license
|
bengHak/garden_backend
|
cb2de2df64e4308b7aac7a304b887531b5de46c6
|
c50682b388a82d1590bf39a793822c5b19954802
|
refs/heads/master
| 2020-11-30T04:42:32.983563
| 2020-02-22T07:30:51
| 2020-02-22T07:30:51
| 230,305,013
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'garden_api_server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"41236155+bengHak@users.noreply.github.com"
] |
41236155+bengHak@users.noreply.github.com
|
68e23e9acc94c9c9804a24d2b04c9d7e1e9a30ca
|
e464c0243dd93286463f013a9134cefa4e3ea2dd
|
/apps/events/views.py
|
48897e3ff07b7fed3c026aaa67b626442261873a
|
[] |
no_license
|
bboud88/tendenci
|
533f5ec9eab28c2c18f9c83ff44380fd790c7d2c
|
ace9e21008b8f520dd94902cae6079cb5bb21f93
|
refs/heads/master
| 2020-03-21T21:41:18.239973
| 2018-06-28T23:39:09
| 2018-06-28T23:39:09
| 139,077,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179,631
|
py
|
# NOTE: When updating the registration scheme be sure to check with the
# anonymous registration impementation of events in the registration
# module.
import re
import calendar
import itertools
import os
import subprocess
import time
import xlwt
from collections import OrderedDict
from datetime import datetime, date, timedelta
from decimal import Decimal
from copy import deepcopy
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
import simplejson as json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.shortcuts import get_object_or_404, redirect
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.http import QueryDict
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template.loader import render_to_string
from django.template.defaultfilters import date as date_filter
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.views.decorators.csrf import csrf_exempt
from django.db import connection
from tendenci.libs.utils import python_executable
from tendenci.apps.base.decorators import password_required
from tendenci.apps.base.http import Http403
from tendenci.apps.site_settings.utils import get_setting
from tendenci.apps.perms.decorators import is_enabled, superuser_required
from tendenci.apps.perms.utils import (
has_perm,
get_notice_recipients,
get_query_filters,
update_perms_and_save,
has_view_perm,
assign_files_perms)
from tendenci.apps.event_logs.models import EventLog
from tendenci.apps.meta.models import Meta as MetaTags
from tendenci.apps.meta.forms import MetaForm
from tendenci.apps.files.models import File
from tendenci.apps.theme.shortcuts import themed_response as render_to_response
from tendenci.apps.imports.forms import ImportForm
from tendenci.apps.imports.models import Import
from tendenci.apps.base.utils import convert_absolute_urls, checklist_update
from tendenci.apps.imports.utils import (
render_excel)
from tendenci.apps.base.http import HttpCustomResponseRedirect
from tendenci.apps.discounts.models import Discount
from tendenci.apps.notifications import models as notification
from tendenci.apps.events.ics.utils import run_precreate_ics
from tendenci.apps.events.models import (
Event,
Registration,
Registrant,
Speaker,
Organizer,
Type,
RegConfPricing,
Addon,
AddonOption,
CustomRegForm,
CustomRegFormEntry,
CustomRegField,
CustomRegFieldEntry,
RegAddonOption,
RegistrationConfiguration,
EventPhoto,
Place,
RecurringEvent)
from tendenci.apps.events.forms import (
EventForm,
Reg8nEditForm,
PlaceForm,
SpeakerBaseFormSet,
SpeakerForm,
OrganizerForm,
TypeForm,
MessageAddForm,
RegistrationForm,
RegistrantForm,
FreePassCheckForm,
RegistrantBaseFormSet,
Reg8nConfPricingForm,
PendingEventForm,
AddonForm,
AddonOptionForm,
AddonOptionBaseModelFormSet,
FormForCustomRegForm,
RegConfPricingBaseModelFormSet,
GlobalRegistrantSearchForm,
EventICSForm,
EmailForm,
DisplayAttendeesForm,
ReassignTypeForm,
EventRegistrantSearchForm,
MemberRegistrationForm,
ApplyRecurringChangesForm,
EventSearchForm,
EventExportForm,
EventSimpleSearchForm,
EventReportFilterForm)
from tendenci.apps.events.utils import (
email_registrants,
render_event_email,
get_default_reminder_template,
add_registration,
get_registrants_prices,
registration_has_started,
registration_has_ended,
registration_earliest_time,
get_pricing,
clean_price,
get_event_spots_taken,
get_ievent,
get_vevents,
copy_event,
email_admins,
get_active_days,
get_ACRF_queryset,
get_custom_registrants_initials,
render_registrant_excel,
event_import_process,
create_member_registration,
get_recurrence_dates,
get_week_days,
get_next_month,
get_prev_month)
from tendenci.apps.events.addons.forms import RegAddonForm
from tendenci.apps.events.addons.formsets import RegAddonBaseFormSet
from tendenci.apps.events.addons.utils import get_available_addons
def custom_reg_form_preview(request, id, template_name="events/custom_reg_form_preview.html"):
"""
Preview a custom registration form.
"""
form = get_object_or_404(CustomRegForm, id=id)
form_for_form = FormForCustomRegForm(
request.POST or None,
request.FILES or None,
custom_reg_form=form,
user=request.user)
for field in form_for_form.fields:
try:
form_for_form.fields[field].initial = request.GET.get(field, '')
except:
pass
context = {"form": form, "form_for_form": form_for_form}
return render_to_response(template_name, context, RequestContext(request))
@login_required
def event_custom_reg_form_list(request, event_id, template_name="events/event_custom_reg_form_list.html"):
"""
List custom registration forms for this event.
"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user, 'events.change_event', event):
raise Http403
reg_conf = event.registration_configuration
regconfpricings = reg_conf.regconfpricing_set.all()
if reg_conf.use_custom_reg_form:
if reg_conf.bind_reg_form_to_conf_only:
reg_conf.reg_form.form_for_form = FormForCustomRegForm(
custom_reg_form=reg_conf.reg_form)
else:
for price in regconfpricings:
price.reg_form.form_for_form = FormForCustomRegForm(custom_reg_form=price.reg_form)
context = {
'event': event,
'reg_conf': reg_conf,
'regconfpricings': regconfpricings
}
return render_to_response(template_name, context, RequestContext(request))
@is_enabled('events')
def details(request, id=None, private_slug=u'', template_name="events/view.html"):
if not id and not private_slug:
return HttpResponseRedirect(reverse('event.month'))
event = get_object_or_404(Event, pk=id)
days = []
if not event.on_weekend:
days = get_active_days(event)
if event.is_private(private_slug):
pass
else:
if not has_view_perm(request.user, 'events.view_event', event):
raise Http403
if event.registration_configuration:
event.limit = event.get_limit()
else:
reg_conf = RegistrationConfiguration()
reg_conf.save()
event.registration_configuration = reg_conf
event.save()
event.spots_taken, event.spots_available = event.get_spots_status()
EventLog.objects.log(instance=event)
speakers = event.speaker_set.order_by('pk')
organizers = event.organizer_set.all().order_by('pk') or None
organizer = None
if organizers:
organizer = organizers[0]
event_ct = event.content_type()
speaker_ct = ContentType.objects.get_for_model(Speaker)
org_ct = ContentType.objects.get_for_model(Organizer)
place_ct = ContentType.objects.get_for_model(Place)
event_files = File.objects.filter(content_type=event_ct, object_id=event.id)
speaker_files = File.objects.filter(content_type=speaker_ct, object_id__in=speakers)
if organizer:
organizer_files = File.objects.filter(content_type=org_ct, object_id=organizer.id)
else:
organizer_files = File.objects.none()
place_files = File.objects.filter(content_type=place_ct, object_id=event.place_id)
f_speakers = speakers.filter(featured=True)
speakers_length = speakers.count()
if f_speakers:
speakers = f_speakers
else:
speakers = speakers[:1]
registration = event.registration_configuration
pricing = registration.get_available_pricings(request.user, is_strict=False)
free_event = True
if pricing:
pricing = pricing.order_by('position', '-price')
free_event = not bool([p for p in pricing if p.price > 0])
return render_to_response(template_name, {
'days': days,
'event': event,
'speakers': speakers,
'speakers_length': speakers_length,
'organizer': organizer,
'now': datetime.now(),
'addons': event.addon_set.filter(status=True),
'event_files': event_files,
'speaker_files': speaker_files,
'organizer_files': organizer_files,
'place_files': place_files,
'free_event': free_event
}, context_instance=RequestContext(request))
@is_enabled('events')
def speaker_list(request, event_id, template_name='events/speakers.html'):
event = get_object_or_404(Event, pk=event_id)
speakers = event.speaker_set.order_by('pk')
return render_to_response(template_name, {
'event': event,
'speakers': speakers,
}, context_instance=RequestContext(request))
@is_enabled('events')
def view_attendees(request, event_id, template_name='events/attendees.html'):
event = get_object_or_404(Event, pk=event_id)
if not event.can_view_registrants(request.user):
raise Http403
limit = event.get_limit()
registration = event.registration_configuration
pricing = registration.get_available_pricings(request.user, is_strict=False)
pricing = pricing.order_by('position', '-price')
reg_started = registration_has_started(event, pricing=pricing)
reg_ended = registration_has_ended(event, pricing=pricing)
earliest_time = registration_earliest_time(event, pricing=pricing)
# spots taken
if limit > 0:
slots_taken, slots_available = event.get_spots_status()
else:
slots_taken, slots_available = (-1, -1)
is_registrant = False
# check if user has already registered
if hasattr(request.user, 'registrant_set'):
is_registrant = request.user.registrant_set.filter(registration__event=event).exists()
return render_to_response(template_name, {
'event': event,
'registration': registration,
'limit': limit,
'slots_taken': slots_taken,
'slots_available': slots_available,
'reg_started': reg_started,
'reg_ended': reg_ended,
'earliest_time': earliest_time,
'is_registrant': is_registrant,
}, context_instance=RequestContext(request))
def month_redirect(request):
return HttpResponseRedirect(reverse('event.month'))
@is_enabled('events')
def search(request, redirect=False, past=False, template_name="events/search.html"):
"""
This page lists out all the upcoming events starting
from today. If a search index is available, this page
also provides the option to search through events.
"""
if redirect:
return HttpResponseRedirect(reverse('events'))
query = request.GET.get('q', None)
# Handle legacy tag links
if query and "tag:" in query:
return HttpResponseRedirect("%s?q=%s&search_category=tags__icontains" %(reverse('event.search'), query.replace('tag:', '')))
filters = get_query_filters(request.user, 'events.view_event')
events = Event.objects.filter(filters).distinct()
events = events.filter(enable_private_slug=False)
if request.user.is_authenticated():
events = events.select_related()
start_dt = datetime.now()
event_type = ''
with_registration = None
form = EventSearchForm(request.GET or {'start_dt':start_dt.strftime('%Y-%m-%d')},
user=request.user)
if form.is_valid():
with_registration = form.cleaned_data.get('registration', None)
event_type = form.cleaned_data.get('event_type', None)
event_group = form.cleaned_data.get('event_group', None)
start_dt = form.cleaned_data.get('start_dt', None)
cat = form.cleaned_data.get('search_category', None)
try:
start_dt = datetime.strptime(start_dt, '%Y-%m-%d')
except:
start_dt = datetime.now()
if cat == 'priority':
events = events.filter(**{cat : True })
elif query and cat:
events = events.filter(**{cat : query})
if event_type:
events = events.filter(type__slug=event_type)
if event_group:
events = events.filter(groups__id=event_group)
if past:
filter_op = 'lt'
else:
filter_op = 'gte'
start_date_filter = {'start_dt__%s' %filter_op: start_dt}
end_date_filter = {'end_dt__%s' % filter_op: start_dt }
events = events.filter(Q(**start_date_filter) | Q(**end_date_filter))
if past:
events = events.order_by('-start_dt', '-priority')
else:
events = events.order_by('start_dt', '-priority')
if with_registration:
myevents = Event.objects.filter(registration__registrant__email=request.user.email,
registration__registrant__cancel_dt=None)
events = [event for event in events if event in myevents]
EventLog.objects.log()
return render_to_response(template_name, {
'events': events,
'form': form,
'now': datetime.now(),
'past': past,
'event_type': event_type,
'start_dt': start_dt,
'with_registration': with_registration,
}, context_instance=RequestContext(request))
def icalendar(request):
p = re.compile(r'http(s)?://(www.)?([^/]+)')
d = {}
file_name = ''
ics_str = ''
d['site_url'] = get_setting('site', 'global', 'siteurl')
match = p.search(d['site_url'])
if match:
d['domain_name'] = match.group(3)
else:
d['domain_name'] = ""
if request.user.is_authenticated():
file_name = 'ics-%s.ics' % (request.user.pk)
absolute_directory = os.path.join(settings.MEDIA_ROOT, 'files/ics')
if not os.path.exists(absolute_directory):
os.makedirs(absolute_directory)
if file_name:
file_path = os.path.join(absolute_directory, file_name)
# Check if ics file exists
if os.path.isfile(file_path):
ics = open(file_path, 'r+')
ics_str = ics.read()
ics.close()
if not ics_str:
ics_str = "BEGIN:VCALENDAR\n"
ics_str += "PRODID:-//Schipul Technologies//Schipul Codebase 5.0 MIMEDIR//EN\n"
ics_str += "VERSION:2.0\n"
ics_str += "METHOD:PUBLISH\n"
ics_str += get_vevents(request.user, d)
ics_str += "END:VCALENDAR\n"
response = HttpResponse(ics_str)
response['Content-Type'] = 'text/calendar'
if d['domain_name']:
file_name = '%s.ics' % (d['domain_name'])
else:
file_name = "event.ics"
response['Content-Disposition'] = 'attachment; filename="%s"' % (file_name)
return response
def icalendar_single(request, id):
p = re.compile(r'http(s)?://(www.)?([^/]+)')
d = {}
if not Event.objects.filter(pk=id).exists():
raise Http404
d['site_url'] = get_setting('site', 'global', 'siteurl')
match = p.search(d['site_url'])
if match:
d['domain_name'] = match.group(3)
else:
d['domain_name'] = ""
ics_str = "BEGIN:VCALENDAR\n"
ics_str += "PRODID:-//Schipul Technologies//Schipul Codebase 5.0 MIMEDIR//EN\n"
ics_str += "VERSION:2.0\n"
ics_str += "METHOD:PUBLISH\n"
ics_str += get_ievent(request.user, d, id)
ics_str += "END:VCALENDAR\n"
response = HttpResponse(ics_str)
response['Content-Type'] = 'text/calendar'
if d['domain_name']:
file_name = '%s.ics' % (d['domain_name'])
else:
file_name = "event.ics"
response['Content-Disposition'] = 'attachment; filename="%s"' % (file_name)
return response
@is_enabled('events')
def print_view(request, id, template_name="events/print-view.html"):
event = get_object_or_404(Event, pk=id)
if has_view_perm(request.user,'events.view_event',event):
EventLog.objects.log(instance=event)
return render_to_response(template_name, {'event': event},
context_instance=RequestContext(request))
else:
raise Http403
@is_enabled('events')
@login_required
def edit(request, id, form_class=EventForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event', event):
raise Http403
if request.method == "POST":
eventform_params = {'edit_mode': True}
form_event = form_class(request.POST, request.FILES, instance=event,
user=request.user, **eventform_params)
form_attendees = DisplayAttendeesForm(request.POST)
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
forms = [form_event, form_apply_recurring, form_attendees]
if all([form.is_valid() for form in forms]):
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
display_registrants = form_attendees.cleaned_data.get('display_event_registrants')
display_registrants_to = form_attendees.cleaned_data.get('display_registrants_to')
f = form_event.cleaned_data['photo_upload']
if f:
image = EventPhoto()
image.content_type = ContentType.objects.get_for_model(event.__class__)
image.creator = request.user
image.creator_username = request.user.username
image.owner = request.user
image.owner_username = request.user.username
filename = "%s" % (f.name)
f.file.seek(0)
image.file.save(filename, f)
event = form_event.save(commit=False)
event.display_event_registrants = display_registrants
event.display_registrants_to = display_registrants_to
if f:
event.image = image
# update all permissions and save the model
event = update_perms_and_save(request, form_event, event)
form_event.save_m2m()
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
msg_string = 'Successfully updated %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event', args=[event.pk]))
return HttpResponseRedirect(reverse('event.location_edit', args=[event.pk]))
else:
eventform_params2 = {'edit_mode': True, 'recurring_mode': True}
recurring_events = event.recurring_event.event_set.exclude(pk=event.pk)
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
for cur_event in recurring_events:
form_event2 = form_class(request.POST, request.FILES, instance=cur_event,
user=request.user, **eventform_params2)
if form_event2.is_valid():
cur_event = form_event2.save(commit=False)
cur_event.display_event_registrants = display_registrants
cur_event.display_registrants_to = display_registrants_to
if f:
cur_event.image = image
else:
cur_event.image = event.image
# update all permissions and save the model
cur_event = update_perms_and_save(request, form_event2, cur_event)
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event.recurring', args=[event.pk]))
return HttpResponseRedirect(reverse('event.location_edit', args=[event.pk]))
else:
eventform_params = {'edit_mode': True}
form_event = form_class(instance=event, user=request.user, **eventform_params)
form_attendees = DisplayAttendeesForm(
initial={
'display_event_registrants':event.display_event_registrants,
'display_registrants_to':event.display_registrants_to,
}
)
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_event, form_attendees]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "overview"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def location_edit(request, id, form_class=PlaceForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event', event):
raise Http403
if request.method == "POST":
form_place = form_class(request.POST, instance=event.place, prefix='place')
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
forms = [form_place, form_apply_recurring]
if all([form.is_valid() for form in forms]):
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
place = form_place.save(commit=False)
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
if place.event_set.count() > 1 and (place._original_name != place.name):
# Create a new place
place.pk = None
place.save()
event.place = place
event.save(log=False)
msg_string = 'Successfully updated %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event', args=[event.pk]))
return HttpResponseRedirect(reverse('event.organizer_edit', args=[event.pk]))
else:
recurring_events = event.recurring_event.event_set.all()
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
# Check if place is associated to a previous event
place_in_past_events = place.event_set.filter(start_dt__lt=event.start_dt,
recurring_event=event.recurring_event
).exists()
if (place._original_name != place.name) and place_in_past_events:
place.pk = None
place.save()
for cur_event in recurring_events:
cur_event.place = place
cur_event.save(log=False)
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event.recurring', args=[event.pk]))
return HttpResponseRedirect(reverse('event.organizer_edit', args=[event.pk]))
else:
form_place = form_class(instance=event.place, prefix='place')
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_place]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "location"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def organizer_edit(request, id, form_class=OrganizerForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event', event):
raise Http403
# tried get_or_create(); but get a keyword argument :(
try: # look for an organizer
organizer = event.organizer_set.all()[0]
except: # else: create an organizer
organizer = Organizer()
organizer.save()
organizer.event = [event]
organizer.save()
if request.method == "POST":
form_organizer = form_class(request.POST, instance=organizer, prefix='organizer')
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
forms = [form_organizer, form_apply_recurring]
if all([form.is_valid() for form in forms]):
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
organizer = form_organizer.save(commit=False)
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
if organizer.event.count() > 1 and (organizer._original_name != organizer.name):
# Remove event from organizer
organizer.event.remove(event)
# Create a new organizer
organizer.pk = None
organizer.save()
# Readd event to organizer
organizer.event.add(event)
msg_string = 'Successfully updated %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event', args=[event.pk]))
return HttpResponseRedirect(reverse('event.speaker_edit', args=[event.pk]))
else:
recurring_events = event.recurring_event.event_set.all()
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
# Check if organizer is associated to a previous event
organizer_in_past_events = organizer.event.filter(
start_dt__lt=event.start_dt,
recurring_event=event.recurring_event
).exists()
if (organizer._original_name != organizer.name) and organizer_in_past_events:
organizer.pk = None
organizer.save()
for cur_event in recurring_events:
# Remove previous organizer from event
for org in cur_event.organizer_set.all():
org.event.remove(cur_event)
# Add new organizer
organizer.event.add(cur_event)
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event.recurring', args=[event.pk]))
return HttpResponseRedirect(reverse('event.speaker_edit', args=[event.pk]))
else:
form_organizer = form_class(instance=organizer, prefix='organizer')
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_organizer]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "organizer"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def speaker_edit(request, id, form_class=SpeakerForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event', event):
raise Http403
SpeakerFormSet = modelformset_factory(
Speaker,
formset=SpeakerBaseFormSet,
form=form_class,
extra=1,
can_delete=True
)
if request.method == "POST":
form_speaker = SpeakerFormSet(
request.POST, request.FILES,
queryset=event.speaker_set.all(), prefix='speaker'
)
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
forms = [form_speaker, form_apply_recurring]
if all([form.is_valid() for form in forms]):
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
speakers = form_speaker.save(commit=False)
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
for speaker in speakers:
if (speaker.pk and (speaker.event.count() > 1) and
(speaker._original_name != speaker.name)):
# Remove the event from previous speaker
speaker.event.remove(event)
# Create new speaker
speaker.pk = None
speaker.save()
speaker.event.add(event)
# Only delete when speaker has no more events associated to it
for del_speaker in form_speaker.deleted_objects:
del_speaker.event.remove(event)
if not del_speaker.event.count():
del_speaker.delete()
msg_string = 'Successfully updated %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
redirect_url = reverse('event', args=[event.pk])
else:
recurring_events = event.recurring_event.event_set.all()
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
for speaker in speakers:
if speaker.pk and (speaker._original_name != speaker.name):
# Check if speaker is associated to a previous event
if speaker.event.filter(
start_dt__lt=event.start_dt,
recurring_event=event.recurring_event
).exists():
# # Remove the events from previous speaker
# for recur_event in recurring_events:
# speaker.event.remove(recur_event)
speaker.pk = None
speaker.save()
speaker.event = recurring_events
speaker_ids = [speaker.pk for speaker in speakers]
for recur_event in recurring_events:
# remove other speakers that previously associated to some of the occurrences
# but are not being added/edited
speakers_to_remove = recur_event.speaker_set.exclude(id__in=speaker_ids)
if speakers_to_remove:
for speaker in speakers_to_remove:
speaker.event.remove(recur_event)
# Only delete when speaker has no more events associated to it
if form_speaker.deleted_objects:
for recur_event in recurring_events:
for del_speaker in form_speaker.deleted_objects:
try:
del_speaker.event.remove(recur_event)
except ValueError:
pass
if not del_speaker.event.count():
del_speaker.delete()
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
redirect_url = reverse('event.recurring', args=[event.pk])
# make dict (i.e. speaker_bind); bind speaker with speaker image
pattern = re.compile('speaker-\d+-name')
speaker_keys = list(set(re.findall(pattern, ' '.join(request.POST))))
speaker_bind = {}
for speaker_key in speaker_keys: # loop through speaker form items
speaker_name = request.POST.get(speaker_key)
if speaker_name: # if speaker name found in request
speaker_file = request.FILES.get(speaker_key.replace('name','file'))
if speaker_file: # if speaker file found in request
# e.g. speaker_bind['eloy zuniga'] = <file>
speaker_bind[speaker_name] = speaker_file
for speaker in speakers:
assign_files_perms(speaker)
# match speaker w/ speaker image
binary_files = []
if speaker.name in speaker_bind:
binary_files = [speaker_bind[speaker.name]]
files = File.objects.save_files_for_instance(request, speaker, files=binary_files)
for f in files:
f.allow_anonymous_view = event.allow_anonymous_view
f.allow_user_view = event.allow_user_view
f.allow_member_view = event.allow_member_view
f.save()
if "_save" in request.POST:
return HttpResponseRedirect(redirect_url)
return HttpResponseRedirect(reverse('event.regconf_edit', args=[event.pk]))
else:
form_speaker = SpeakerFormSet(
queryset=event.speaker_set.all(),
prefix='speaker', auto_id='speaker_formset'
)
form_speaker.label = "Speaker(s)"
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_speaker]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "speakers"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def regconf_edit(request, id, form_class=Reg8nEditForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event', event):
raise Http403
reg_form_queryset = get_ACRF_queryset(event)
if request.method == "POST":
form_regconf = form_class(
request.POST, instance=event.registration_configuration,
reg_form_queryset=reg_form_queryset, prefix='regconf'
)
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
forms = [form_regconf, form_apply_recurring]
if all([form.is_valid() for form in forms]):
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
regconf = form_regconf.save()
(use_custom_reg_form,
reg_form_id,
bind_reg_form_to_conf_only
) = form_regconf.cleaned_data.get('use_custom_reg').split(',')
if not (use_custom_reg_form == '1' and bind_reg_form_to_conf_only == '1'):
if regconf.reg_form:
regconf.reg_form = None
regconf.save()
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
msg_string = 'Successfully updated %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event', args=[event.pk]))
return HttpResponseRedirect(reverse('event.pricing_edit', args=[event.pk]))
else:
recurring_events = event.recurring_event.event_set.exclude(pk=event.pk)
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
for cur_event in recurring_events:
form_regconf2 = form_class(
request.POST, instance=cur_event.registration_configuration,
reg_form_queryset=reg_form_queryset, recurring_edit=True, prefix='regconf'
)
if form_regconf2.is_valid():
regconf = form_regconf2.save()
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
if "_save" in request.POST:
return HttpResponseRedirect(reverse('event.recurring', args=[event.pk]))
return HttpResponseRedirect(reverse('event.pricing_edit', args=[event.pk]))
else:
form_regconf = Reg8nEditForm(
instance=event.registration_configuration,
reg_form_queryset=reg_form_queryset,
prefix='regconf'
)
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_regconf]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "regconf"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def pricing_edit(request, id, form_class=Reg8nConfPricingForm, template_name="events/edit.html"):
event = get_object_or_404(Event, pk=id)
reg_conf = event.registration_configuration
if not has_perm(request.user,'events.change_event', event):
raise Http403
reg_form_queryset = get_ACRF_queryset(event)
pricing_reg_form_required = (reg_conf.use_custom_reg_form and
not reg_conf.bind_reg_form_to_conf_only)
regconfpricing_params = {'user': request.user,
'reg_form_queryset': reg_form_queryset,
'reg_form_required': pricing_reg_form_required}
if reg_conf and reg_conf.regconfpricing_set.all():
extra = 0
else:
extra = 1
RegConfPricingSet = modelformset_factory(
RegConfPricing,
formset=RegConfPricingBaseModelFormSet,
form=form_class,
extra=extra,
can_delete=True
)
if request.method == "POST":
form_regconfpricing = RegConfPricingSet(
request.POST,
queryset=RegConfPricing.objects.filter(
reg_conf=reg_conf,
status=True,
), prefix='regconfpricing', **regconfpricing_params
)
post_data = request.POST
if 'apply_changes_to' not in post_data:
post_data = {'apply_changes_to':'self'}
form_apply_recurring = ApplyRecurringChangesForm(post_data)
if form_regconfpricing.is_valid() and form_apply_recurring.is_valid():
apply_changes_to = form_apply_recurring.cleaned_data.get('apply_changes_to')
regconf_pricing = form_regconfpricing.save()
EventLog.objects.log(instance=event)
if apply_changes_to == 'self':
for regconf_price in regconf_pricing:
regconf_price.reg_conf = reg_conf
if not pricing_reg_form_required:
regconf_price.reg_form = None
regconf_price.save()
msg_string = 'Successfully updated %s' % unicode(event)
redirect_url = reverse('event', args=[event.pk])
else:
recurring_events = event.recurring_event.event_set.all()
if apply_changes_to == 'rest':
recurring_events = recurring_events.filter(start_dt__gte=event.start_dt)
for e in recurring_events:
e_reg_conf = e.registration_configuration
if e.id != event.id:
# delete old pricings associated to this event e
RegConfPricing.objects.filter(reg_conf=e_reg_conf).delete()
for regconf_price in regconf_pricing:
e_regconf_price = deepcopy(regconf_price)
e_regconf_price.reg_conf = e_reg_conf
if e.id != event.id:
e_regconf_price.pk = None
# calculate the start_dt and end_dt for this pricing
time_diff = e.start_dt.date() - event.start_dt.date()
e_regconf_price.start_dt += time_diff
e_regconf_price.end_dt += time_diff
e_regconf_price.save()
msg_string = 'Successfully updated the recurring events for %s' % unicode(event)
redirect_url = reverse('event.recurring', args=[event.pk])
messages.add_message(request, messages.SUCCESS, _(msg_string))
return HttpResponseRedirect(redirect_url)
else:
form_regconfpricing = RegConfPricingSet(
queryset=RegConfPricing.objects.filter(
reg_conf=event.registration_configuration,
status=True,
), prefix='regconfpricing', auto_id='regconfpricing_formset',
**regconfpricing_params
)
form_regconfpricing.label = _("Pricing(s)")
form_apply_recurring = ApplyRecurringChangesForm()
multi_event_forms = [form_regconfpricing]
if event.is_recurring_event:
multi_event_forms = multi_event_forms + [form_apply_recurring]
# response
return render_to_response(template_name,
{'event': event, 'multi_event_forms': multi_event_forms, 'label': "pricing"},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def edit_meta(request, id, form_class=MetaForm, template_name="events/edit-meta.html"):
# check permission
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.change_event',event):
raise Http403
defaults = {
'title': event.get_title(),
'description': event.get_description(),
'keywords': event.get_keywords(),
'canonical_url': event.get_canonical_url(),
}
event.meta = MetaTags(**defaults)
if request.method == "POST":
form = form_class(request.POST, instance=event.meta)
if form.is_valid():
event.meta = form.save() # save meta
event.save() # save relationship
msg_string = 'Successfully updated meta for %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return HttpResponseRedirect(reverse('event', args=[event.pk]))
else:
form = form_class(instance=event.meta)
return render_to_response(template_name, {'event': event, 'form':form},
context_instance=RequestContext(request))
@csrf_exempt
def get_place(request):
if request.method == 'POST':
place_id = request.POST.get('id', None)
if place_id:
try:
place = Place.objects.get(pk=place_id)
return HttpResponse(json.dumps(
{
"error": False,
"message": "Get place success.",
"name": place.name,
"description": place.description,
"address": place.address,
"city": place.city,
"state": place.state,
"zip": place.zip,
"country": place.country,
"url": place.url,
}), content_type="text/plain")
except Place.DoesNotExist:
return HttpResponse(json.dumps({
"error": True,
"message": "Place does not exist.",
}), content_type="text/plain")
return HttpResponse(json.dumps(
{
"error": True,
"message": "No id provided.",
}), content_type="text/plain")
return HttpResponse('Requires POST method.')
@is_enabled('events')
@login_required
def add(request, year=None, month=None, day=None,
form_class=EventForm, template_name="events/add.html"):
"""
Add event page. You can preset the start date of
the event by traveling to the appropriate URL.
"""
# custom reg_form queryset
reg_form_queryset = get_ACRF_queryset()
regconfpricing_params = {'user': request.user,
'reg_form_queryset': reg_form_queryset}
SpeakerFormSet = modelformset_factory(
Speaker,
formset=SpeakerBaseFormSet,
form=SpeakerForm,
extra=1
)
RegConfPricingSet = modelformset_factory(
RegConfPricing,
formset=RegConfPricingBaseModelFormSet,
form=Reg8nConfPricingForm,
extra=1
)
if has_perm(request.user,'events.add_event'):
if request.method == "POST":
# single forms
form_event = form_class(request.POST, request.FILES, user=request.user)
form_place = PlaceForm(request.POST, prefix='place')
form_organizer = OrganizerForm(request.POST, prefix='organizer')
form_regconf = Reg8nEditForm(request.POST, prefix='regconf',
reg_form_queryset=reg_form_queryset,)
form_attendees = DisplayAttendeesForm(request.POST)
# form sets
form_speaker = SpeakerFormSet(
request.POST,
request.FILES,
queryset=Speaker.objects.none(),
prefix='speaker'
)
conf_reg_form_required = False # if reg_form is required on regconf
pricing_reg_form_required = False # if reg_form is required on regconfpricing
if form_regconf.is_valid():
(use_custom_reg_form,
reg_form_id,
bind_reg_form_to_conf_only
) = form_regconf.cleaned_data['use_custom_reg'].split(',')
if use_custom_reg_form == '1':
if bind_reg_form_to_conf_only == '1':
conf_reg_form_required = True
else:
pricing_reg_form_required = True
regconfpricing_params.update({'reg_form_required': pricing_reg_form_required})
form_regconfpricing = RegConfPricingSet(
request.POST,
queryset=RegConfPricing.objects.none(),
prefix='regconfpricing',
**regconfpricing_params
)
# label the form sets
form_speaker.label = _("Speaker(s)")
form_regconfpricing.label = _("Pricing(s)")
forms = [
form_event,
form_place,
form_speaker,
form_organizer,
form_regconf,
form_attendees,
form_regconfpricing
]
if all([form.is_valid() for form in forms]):
# pks have to exist; before making relationships
place = form_place.save()
regconf = form_regconf.save()
speakers = form_speaker.save()
organizer = form_organizer.save()
regconf_pricing = form_regconfpricing.save()
event = form_event.save(commit=False)
event.display_event_registrants = form_attendees.cleaned_data['display_event_registrants']
event.display_registrants_to = form_attendees.cleaned_data['display_registrants_to']
# update all permissions and save the model
event = update_perms_and_save(request, form_event, event)
groups = form_event.cleaned_data['groups']
event.groups = groups
event.save(log=False)
assign_files_perms(place)
assign_files_perms(organizer)
# handle image
f = form_event.cleaned_data['photo_upload']
if f:
image = EventPhoto()
image.object_id = event.id
image.content_type = ContentType.objects.get_for_model(event.__class__)
image.creator = request.user
image.creator_username = request.user.username
image.owner = request.user
image.owner_username = request.user.username
filename = "%s-%s" % (event.id, f.name)
f.file.seek(0)
image.file.save(filename, f)
event.image = image
# make dict (i.e. speaker_bind); bind speaker with speaker image
pattern = re.compile('speaker-\d+-name')
speaker_keys = list(set(re.findall(pattern, ' '.join(request.POST))))
speaker_bind = {}
for speaker_key in speaker_keys: # loop through speaker form items
speaker_name = request.POST.get(speaker_key)
if speaker_name: # if speaker name found in request
speaker_file = request.FILES.get(speaker_key.replace('name','file'))
if speaker_file: # if speaker file found in request
# e.g. speaker_bind['eloy zuniga'] = <file>
speaker_bind[speaker_name] = speaker_file
for speaker in speakers:
speaker.event = [event]
speaker.save()
assign_files_perms(speaker)
# match speaker w/ speaker image
binary_files = []
if speaker.name in speaker_bind:
binary_files = [speaker_bind[speaker.name]]
files = File.objects.save_files_for_instance(request, speaker, files=binary_files)
for f in files:
f.allow_anonymous_view = event.allow_anonymous_view
f.allow_user_view = event.allow_user_view
f.allow_member_view = event.allow_member_view
f.save()
if not conf_reg_form_required and regconf.reg_form:
regconf.reg_form = None
regconf.save()
for regconf_price in regconf_pricing:
regconf_price.reg_conf = regconf
if not pricing_reg_form_required:
regconf_price.reg_form = None
regconf_price.save()
organizer.event = [event]
organizer.save() # save again
# update event
event.place = place
event.registration_configuration = regconf
event.save(log=False)
if form_event.cleaned_data['is_recurring_event']:
init_date = event.start_dt
init_end = event.end_dt
event_length = init_end - init_date
freq = int(form_event.cleaned_data['frequency'])
r_type = int(form_event.cleaned_data['repeat_type'])
end_recurring = form_event.cleaned_data['end_recurring']
recur_every = form_event.cleaned_data['recurs_on']
recur_event = RecurringEvent(repeat_type=r_type,
frequency=freq,
starts_on=init_date,
ends_on=end_recurring)
recur_event.save()
event.is_recurring_event = True
event.recurring_event = recur_event
event.save()
event_list = get_recurrence_dates(r_type, init_date, end_recurring, freq, recur_every)[:20]
for counter in range(1, len(event_list)):
new_event = copy_event(event, request.user, reuse_rel=True)
new_event.start_dt = event_list[counter]
new_event.end_dt = event_list[counter] + event_length
new_event.is_recurring_event = True
new_event.recurring_event = recur_event
new_event.allow_anonymous_view = event.allow_anonymous_view
new_event.save()
reg_conf = new_event.registration_configuration
# Do not copy custom reg form on registration configuration
reg_conf.use_custom_reg_form = False
reg_conf.reg_form = None
reg_conf.bind_reg_form_to_conf_only = False
reg_conf.save()
for pricing in reg_conf.regconfpricing_set.all():
start_dt_diff = pricing.start_dt - event.start_dt
end_dt_diff = pricing.end_dt - event.end_dt
# Do not copy custom reg form on pricings
pricing.reg_form = None
# Adjust pricing dates
pricing.start_dt = new_event.start_dt + start_dt_diff
pricing.end_dt = new_event.end_dt + end_dt_diff
pricing.save()
msg_string = 'Successfully added the recurring event %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
redirect_url = reverse('event.recurring', args=[event.pk])
else:
msg_string = 'Successfully added %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
redirect_url = reverse('event', args=[event.pk])
# notification to administrator(s) and module recipient(s)
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
if recipients and notification:
notification.send_emails(recipients, 'event_added', {
'event':event,
'user':request.user,
'registrants_paid':event.registrants(with_balance=False),
'registrants_pending':event.registrants(with_balance=True),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
})
checklist_update('add-event')
return HttpResponseRedirect(redirect_url)
else: # if not post request
event_init = {}
# default to 30 days from now
mydate = datetime.now()+timedelta(days=30)
offset = timedelta(hours=2)
if all((year, month, day)):
date_str = '-'.join([year,month,day])
time_str = '10:00 AM'
dt_str = "%s %s" % (date_str, time_str)
dt_fmt = '%Y-%m-%d %H:%M %p'
start_dt = datetime.strptime(dt_str, dt_fmt)
end_dt = datetime.strptime(dt_str, dt_fmt) + offset
event_init['start_dt'] = start_dt
event_init['end_dt'] = end_dt
else:
start_dt = mydate
end_dt = start_dt + offset
event_init['start_dt'] = start_dt
event_init['end_dt'] = end_dt
reg_init = {
'start_dt':start_dt,
'end_dt':end_dt,
}
# single forms
form_event = form_class(user=request.user, initial=event_init)
form_place = PlaceForm(prefix='place')
form_organizer = OrganizerForm(prefix='organizer')
form_regconf = Reg8nEditForm(initial=reg_init, prefix='regconf',
reg_form_queryset=reg_form_queryset,)
form_attendees = DisplayAttendeesForm()
# form sets
form_speaker = SpeakerFormSet(
queryset=Speaker.objects.none(),
prefix='speaker',
auto_id='speaker_formset'
)
form_regconfpricing = RegConfPricingSet(
queryset=RegConfPricing.objects.none(),
prefix='regconfpricing',
auto_id='regconfpricing_formset',
**regconfpricing_params
)
# label the form sets
form_speaker.label = _("Speaker(s)")
form_regconfpricing.label = _("Pricing(s)")
# response
return render_to_response(template_name, {
'multi_event_forms':[
form_event,
form_place,
form_organizer,
form_speaker,
form_regconf,
form_attendees,
form_regconfpricing
],
},
context_instance=RequestContext(request))
else:
raise Http403
@is_enabled('events')
@login_required
def delete(request, id, template_name="events/delete.html"):
event = get_object_or_404(Event, pk=id)
if has_perm(request.user, 'events.delete_event'):
if request.method == "POST":
eventlog = EventLog.objects.log(instance=event)
# send email to admins
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
if recipients and notification:
notification.send_emails(recipients, 'event_deleted', {
'event': event,
'request': request,
'user': request.user,
'registrants_paid': event.registrants(with_balance=False),
'registrants_pending': event.registrants(with_balance=True),
'eventlog_url': reverse('event_log', args=[eventlog.pk]),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
})
if event.image:
event.image.delete()
event.delete(log=False)
msg_string = 'Successfully deleted %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return HttpResponseRedirect(reverse('event.search'))
return render_to_response(template_name, {'event': event},
context_instance=RequestContext(request))
else:
raise Http403
@is_enabled('events')
@login_required
def delete_recurring(request, id, template_name="events/delete_recurring.html"):
event = get_object_or_404(Event, pk=id)
if not has_perm(request.user,'events.delete_event'):
raise Http403
if not event.is_recurring_event:
raise Http404
event_list = event.recurring_event.event_set.all()
if request.method == "POST":
recurring_manager = event.recurring_event
for event in event_list:
eventlog = EventLog.objects.log(instance=event)
# send email to admins
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
if recipients and notification:
notification.send_emails(recipients,'event_deleted', {
'event':event,
'request':request,
'user':request.user,
'registrants_paid':event.registrants(with_balance=False),
'registrants_pending':event.registrants(with_balance=True),
'eventlog_url': reverse('event_log', args=[eventlog.pk]),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
})
reg_conf = event.registration_configuration
if event.image:
event.image.delete()
event.delete()
# The one-to-one relationship is on events which
# doesn't delete the registration_configuration record.
# The delete must occur on registration_configuration
# for both to be deleted. An honest accident on
# one-to-one fields.
try:
reg_conf.delete()
except:
# roll back the transaction to fix the error for postgresql
#"current transaction is aborted, commands ignored until
# end of transaction block"
connection._rollback()
recurring_manager.delete()
msg_string = 'Successfully deleted the recurring event for "%s"' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return HttpResponseRedirect(reverse('event.search'))
return render_to_response(template_name, {'event': event, 'events': event_list},
context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def recurring_details(request, id, template_name="events/recurring_view.html"):
event = get_object_or_404(Event, pk=id)
if not event.is_recurring_event:
raise Http404
recurring_detail = event.recurring_event
event_list = recurring_detail.event_set.order_by('start_dt')
return render_to_response(template_name,
{'event':event, 'recurring_event':recurring_detail, 'events':event_list},
context_instance=RequestContext(request))
@is_enabled('events')
def register_pre(request, event_id, template_name="events/reg8n/register_pre2.html"):
event = get_object_or_404(Event, pk=event_id)
reg_conf=event.registration_configuration
#anony_reg8n = get_setting('module', 'events', 'anonymousregistration')
# check spots available
limit = event.get_limit()
spots_taken, spots_available = event.get_spots_status()
if limit > 0 and spots_available == 0:
if not request.user.profile.is_superuser:
# is no more spots available, redirect to event view.
return multi_register_redirect(request, event, _('Registration is full.'))
event.limit, event.spots_taken, event.spots_available = limit, spots_taken, spots_available
pricings = reg_conf.get_available_pricings(request.user,
is_strict=False,
spots_available=spots_available
)
individual_pricings = pricings.filter(quantity=1).order_by('position', '-price')
table_pricings = pricings.filter(quantity__gt=1).order_by('position', '-price')
if not (individual_pricings or table_pricings):
raise Http404
return render_to_response(template_name, {
'event':event,
'individual_pricings': individual_pricings,
'table_pricings': table_pricings,
'quantity_options': range(31)
}, context_instance=RequestContext(request))
def multi_register_redirect(request, event, msg):
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(reverse('event', args=(event.pk,),))
@is_enabled('events')
@superuser_required
def member_register(request, event_id,
template_name="events/reg8n/member-register.html"):
event = get_object_or_404(Event, pk=event_id)
# check if event allows registration
if not (event.registration_configuration and
event.registration_configuration.enabled):
msg_string = 'Registration is disabled for event %s' % unicode(event)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event', args=[event_id]))
spots_taken, spots_available = event.get_spots_status()
reg_conf=event.registration_configuration
pricings = reg_conf.get_available_pricings(request.user,
is_strict=False,
spots_available=spots_available)
pricings = pricings.filter(quantity=1)
form = MemberRegistrationForm(event, pricings, request.POST or None)
if request.method == "POST":
if form.is_valid():
create_member_registration(request.user, event, form)
msg_string = 'Successfully registered members for event %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return HttpResponseRedirect(reverse('event', args=[event_id]))
return render_to_response(template_name, {
'event':event,
'form': form
}, context_instance=RequestContext(request))
@is_enabled('events')
def register(request, event_id=0,
individual=False,
is_table=False,
pricing_id=None,
template_name="events/reg8n/register.html"):
"""
Handles both table and non-table registrations.
Table registration requires is_table=True and a valid pricing_id.
"""
event = get_object_or_404(Event, pk=event_id)
# open,validated or strict
anony_setting = get_setting('module', 'events', 'anonymousregistration')
event.anony_setting = anony_setting
is_strict = anony_setting == 'strict'
flat_registrants = []
discount_applied = False
if is_strict:
# strict requires logged in
if not request.user.is_authenticated():
messages.add_message(request, messages.INFO,
_('Please log in or sign up for a user account to register for an event.'))
return HttpResponseRedirect('%s?next=%s' % (reverse('auth_login'),
reverse('event.register', args=[event.id])))
# check if event allows registration
if not (event.registration_configuration and
event.registration_configuration.enabled):
raise Http404
# check spots available
limit = event.get_limit()
spots_taken, spots_available = event.get_spots_status()
if limit > 0 and spots_available == 0:
if not request.user.profile.is_superuser:
# is no more spots available, redirect to event view.
return multi_register_redirect(request, event, _('Registration is full.'))
event.limit, event.spots_taken, event.spots_available = limit, spots_taken, spots_available
reg_conf=event.registration_configuration
if not any((individual, is_table)):
# Check if the event has both individual and table registrations.
# If so, redirect them to the intermediate page to choose individual
# or table.
pricings = reg_conf.get_available_pricings(request.user,
is_strict=False,
spots_available=spots_available)
if not pricings:
raise Http404
if len(pricings) > 1:
return HttpResponseRedirect(reverse('event.register_pre', args=(event.pk,),))
pricing = pricings[0]
if pricing.quantity == 1:
individual = True
event.default_pricing = pricing
else:
is_table = True
pricing_id = pricing.id
else:
pricings = None
event.is_table = is_table
event.require_guests_info = reg_conf.require_guests_info
if is_table and pricing_id:
pricing = get_object_or_404(RegConfPricing, pk=pricing_id)
event.free_event = pricing.price <=0
else:
# get all available pricing for the Price Options to select
if not pricings:
pricings = reg_conf.get_available_pricings(request.user,
is_strict=False,
spots_available=spots_available)
pricings = pricings.filter(quantity=1)
event.has_member_price = pricings.filter(allow_member=True
).exclude(
Q(allow_user=True) | Q(allow_anonymous=True)
).exists()
pricings = pricings.order_by('position', '-price')
# registration might be closed, redirect to detail page
if not pricings.exists():
return HttpResponseRedirect(reverse('event', args=(event.pk,),))
try:
pricing_id = int(pricing_id)
except:
pass
if pricing_id:
[event.default_pricing] = RegConfPricing.objects.filter(id=pricing_id) or [None]
event.free_event = not bool([p for p in pricings if p.price > 0])
pricing = None
# check if using a custom reg form
custom_reg_form = None
if reg_conf.use_custom_reg_form:
if reg_conf.bind_reg_form_to_conf_only:
custom_reg_form = reg_conf.reg_form
if custom_reg_form:
RF = FormForCustomRegForm
else:
RF = RegistrantForm
#RF = RegistrantForm
total_regt_forms = pricing and pricing.quantity or 1
can_delete = (not is_table)
# start the form set factory
RegistrantFormSet = formset_factory(
RF,
formset=RegistrantBaseFormSet,
can_delete=can_delete,
max_num=total_regt_forms,
extra=pricing and (pricing.quantity - 1) or 0
)
# get available addons
addons = get_available_addons(event, request.user)
# start addon formset factory
RegAddonFormSet = formset_factory(
RegAddonForm,
formset=RegAddonBaseFormSet,
extra=0,
)
# REGISTRANT formset
post_data = request.POST or None
params = {'prefix': 'registrant',
'event': event,
'user': request.user}
if not is_table:
# pass the pricings to display the price options
params.update({'pricings': pricings})
if custom_reg_form:
params.update({"custom_reg_form": custom_reg_form})
addon_extra_params = {'addons':addons}
# Setting the initial or post data
if request.method != 'POST':
# set the initial data if logged in
initial = {}
if request.user.is_authenticated():
profile = request.user.profile
initial = {'first_name':request.user.first_name,
'last_name':request.user.last_name,
'email':request.user.email,}
if profile:
initial.update({'company_name': profile.company,
'phone':profile.phone,
'address': profile.address,
'city': profile.city,
'state': profile.state,
'zip': profile.zipcode,
'country': profile.country,
'position_title': profile.position_title})
params.update({"initial": [initial]})
post_data = None
else:
if post_data and 'add_registrant' in request.POST:
post_data = request.POST.copy()
post_data['registrant-TOTAL_FORMS'] = int(post_data['registrant-TOTAL_FORMS'])+ 1
addon_extra_params.update({'valid_addons':addons})
# check if we have any valid discount code for the event.
# if not, we don't have to display the discount code box.
if reg_conf.discount_eligible:
reg_conf.discount_eligible = Discount.has_valid_discount(model=reg_conf._meta.model_name)
# Setting up the formset
registrant = RegistrantFormSet(post_data or None, **params)
addon_formset = RegAddonFormSet(request.POST or None,
prefix='addon',
event=event,
extra_params=addon_extra_params)
# REGISTRATION form
reg_form = RegistrationForm(
event,
request.POST or None,
user=request.user,
count=len(registrant.forms),
)
# remove captcha for logged in user
if request.user.is_authenticated():
del reg_form.fields['captcha']
elif request.method == 'POST' and 'addmore' in request.POST:
# captcha not required when add another was clicked
# fixed captcha error shows when "Submit" was clicked
reg_form.fields['captcha'].required = False
elif request.method == 'POST' and 'commit' in request.POST and \
'confirmed' in request.POST: #remove the captcha for confirmation
del reg_form.fields['captcha']
# total registrant forms
if post_data:
total_regt_forms = post_data['registrant-TOTAL_FORMS']
within_available_spots = True
do_confirmation = False
add_more_registrants = False
flat_ignore_fields = ["DELETE", "override"]
if request.method == 'POST':
if 'commit' in request.POST:
#if not request.user.profile.is_superuser:
within_available_spots = event.limit==0 or event.spots_available >= int(total_regt_forms)
if all([within_available_spots,
reg_form.is_valid(),
registrant.is_valid(),
addon_formset.is_valid()]):
args = [request, event, reg_form, registrant, addon_formset,
pricing, pricing and pricing.price or 0]
if 'confirmed' in request.POST:
kwargs = {'admin_notes': '',
'custom_reg_form': custom_reg_form}
# add registration
reg8n, reg8n_created = add_registration(*args, **kwargs)
site_label = get_setting('site', 'global', 'sitedisplayname')
site_url = get_setting('site', 'global', 'siteurl')
self_reg8n = get_setting('module', 'users', 'selfregistration')
is_credit_card_payment = reg8n.payment_method and \
(reg8n.payment_method.machine_name).lower() == 'credit-card' \
and reg8n.invoice.balance > 0
if reg8n_created:
registrants = reg8n.registrant_set.all().order_by('id')
for registrant in registrants:
#registrant.assign_mapped_fields()
if registrant.custom_reg_form_entry:
registrant.name = registrant.custom_reg_form_entry.__unicode__()
else:
registrant.name = ' '.join([registrant.first_name, registrant.last_name])
if is_credit_card_payment:
# online payment
# get invoice; redirect to online pay
# email the admins as well
email_admins(event, reg8n.invoice.total, self_reg8n, reg8n, registrants)
return HttpResponseRedirect(reverse(
'payment.pay_online',
args=[reg8n.invoice.id, reg8n.invoice.guid]
))
else:
# offline payment:
# send email; add message; redirect to confirmation
primary_registrant = reg8n.registrant
if primary_registrant and primary_registrant.email:
notification.send_emails(
[primary_registrant.email],
'event_registration_confirmation',
{
'SITE_GLOBAL_SITEDISPLAYNAME': site_label,
'SITE_GLOBAL_SITEURL': site_url,
'self_reg8n': self_reg8n,
'reg8n': reg8n,
'registrants': registrants,
'event': event,
'total_amount': reg8n.invoice.total,
'is_paid': reg8n.invoice.balance == 0
},
True, # save notice in db
)
#email the admins as well
# fix the price
email_admins(event, reg8n.invoice.total, self_reg8n, reg8n, registrants)
# log an event
EventLog.objects.log(instance=event)
else:
msg_string = 'You were already registered on %s' % date_filter(reg8n.create_dt)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse(
'event.registration_confirmation',
args=(event_id, reg8n.registrant.hash)
))
else:
do_confirmation = True
amount_list, discount_amount, discount_list = get_registrants_prices(*args)
discount_applied = (discount_amount > 0)
for i, form in enumerate(registrant.forms):
if not is_table:
form.discount = discount_list[i]
form.final_price = amount_list[i]
flat_registrants.append(form)
elif 'addmore' in request.POST:
add_more_registrants = True
# if not free event, store price in the list for each registrant
price_list = []
count = 0
total_price = Decimal('0')
event_price = pricing and pricing.price or 0
individual_price = event_price
if is_table:
# individual_price_first, individual_price = split_table_price(
# event_price, pricing.quantity)
individual_price_first, individual_price = event_price, Decimal('0')
# total price calculation when invalid
for i, form in enumerate(registrant.forms):
deleted = False
if form.data.get('registrant-%d-DELETE' % count, False):
deleted = True
if is_table and i == 0:
if i == 0:
price_list.append({'price': individual_price_first , 'deleted':deleted})
total_price += individual_price_first
else:
price_list.append({'price': individual_price , 'deleted':deleted})
if not deleted:
total_price += individual_price
count += 1
addons_price = addon_formset.get_total_price()
total_price += addons_price
# check if we have any error on registrant formset
has_registrant_form_errors = False
for form in registrant.forms:
for field in form:
if field.errors:
has_registrant_form_errors = True
break
if has_registrant_form_errors:
break
return render_to_response(template_name, {
'event':event,
'event_price': event_price,
'free_event': event.free_event,
'price_list':price_list,
'total_price':total_price,
'pricing': pricing,
'reg_form':reg_form,
'custom_reg_form': custom_reg_form,
'registrant': registrant,
'addons':addons,
'addon_formset': addon_formset,
'total_regt_forms': total_regt_forms,
'has_registrant_form_errors': has_registrant_form_errors,
'within_available_spots': within_available_spots,
'flat_registrants': flat_registrants,
'discount_applied': discount_applied,
'do_confirmation': do_confirmation,
'add_more_registrants' : add_more_registrants,
'flat_ignore_fields' : flat_ignore_fields,
'currency_symbol' : get_setting("site", "global", "currencysymbol") or '$'
}, context_instance=RequestContext(request))
@is_enabled('events')
@csrf_exempt
def check_free_pass_eligibility(request, form_class=FreePassCheckForm):
"""
Check if there is any free pass available for the corp. individual
with the email or member_number provided.
"""
form = form_class(request.POST or None)
ret_dict = {'is_corp_member': False}
if form.is_valid():
from tendenci.apps.corporate_memberships.utils import get_user_corp_membership
member_number = form.cleaned_data['member_number'].strip()
email = form.cleaned_data['email'].strip()
corp_membership = get_user_corp_membership(
member_number=member_number,
email=email)
if corp_membership:
ret_dict['is_corp_member'] = True
ret_dict['pass_total'] = corp_membership.free_pass_total
ret_dict['pass_used'] = corp_membership.free_pass_used
ret_dict['pass_avail'] = corp_membership.free_pass_avail
ret_dict['corp_name'] = corp_membership.corp_profile.name
ret_dict['corp_id'] = corp_membership.id
return HttpResponse(json.dumps(ret_dict))
@is_enabled('events')
def multi_register(request, event_id=0, template_name="events/reg8n/multi_register.html"):
"""
This view has 2 POST states. Instead of a GET and a POST.
Attempting to access this view via GET will redirect to the event
page. The 2 POST states both require 'pricing' in request.POST.
The first POST state comes from the event page where the price
selections take place.
It is identified by the presence of 'from_price_form' in request.POST.
The second POST state comes from the page rendered by this form.
It is identified by the presense of 'submit' in request.POST and
absence of 'from_price_form'.
"""
event = get_object_or_404(Event, pk=event_id)
# check if event allows registration
if not (event.registration_configuration and
event.registration_configuration.enabled):
raise Http404
# set up pricing
try:
pricing, pricing_pk, amount = clean_price(request.POST['price'], request.user)
except:
return multi_register_redirect(request, event, _('Please choose a price.'))
# set the event price that will be used throughout the view
event_price = amount
# get all pricing
pricings = RegConfPricing.objects.filter(
reg_conf=event.registration_configuration,
status=True,
)
# check is this person is qualified to see this pricing and event_price
qualified_pricing = get_pricing(request.user, event, pricing=pricings)
qualifies = False
# custom registration form
# use the custom registration form if pricing is associated with a custom reg form
reg_conf=event.registration_configuration
for q_price in qualified_pricing:
if pricing.pk == q_price['price'].pk:
qualifies = True
if not qualifies:
return multi_register_redirect(request, event, _('Please choose a price.'))
# check if use a custom reg form
custom_reg_form = None
if reg_conf.use_custom_reg_form:
if reg_conf.bind_reg_form_to_conf_only:
custom_reg_form = reg_conf.reg_form
else:
custom_reg_form = pricing.reg_form
# check if this post came from the pricing form
# and modify the request method
# we want it to fake a get here so that
# the client can't actually see the pricing
# var
if 'from_price_form' in request.POST:
request.method = 'GET'
request.POST = QueryDict({})
# check if it is still open based on dates
reg_started = registration_has_started(event, pricing=pricings)
if not reg_started:
return multi_register_redirect(request, event, _('Registration has been closed.'))
# update the spots left
limit = event.get_limit()
spots_taken = 0
if limit > 0:
spots_taken = get_event_spots_taken(event)
if spots_taken > limit:
return multi_register_redirect(request, event, _('Registration is full.'))
if custom_reg_form:
RF = FormForCustomRegForm
else:
RF = RegistrantForm
#RF = RegistrantForm
# start the form set factory
RegistrantFormSet = formset_factory(
RF,
formset=RegistrantBaseFormSet,
can_delete=True,
max_num=pricing.quantity,
extra=(pricing.quantity - 1)
)
# get available addons
addons = get_available_addons(event, request.user)
# start addon formset factory
RegAddonFormSet = formset_factory(
RegAddonForm,
formset=RegAddonBaseFormSet,
extra=0,
)
# update the amount of forms based on quantity
total_regt_forms = pricing.quantity
# REGISTRANT formset
post_data = request.POST or None
if request.method != 'POST':
# set the initial data if logged in
initial = {}
if request.user.is_authenticated():
try:
profile = request.user.profile
except:
profile = None
initial = {'first_name':request.user.first_name,
'last_name':request.user.last_name,
'email':request.user.email,}
if profile:
initial.update({'company_name': profile.company,
'phone':profile.phone,})
params = {'prefix': 'registrant',
'initial': [initial],
'event': event}
if custom_reg_form:
params.update({"custom_reg_form": custom_reg_form})
registrant = RegistrantFormSet(**params)
addon_formset = RegAddonFormSet(
prefix='addon',
event=event,
extra_params={
'addons':addons,
})
else:
if post_data and 'add_registrant' in request.POST:
post_data = request.POST.copy()
post_data['registrant-TOTAL_FORMS'] = int(post_data['registrant-TOTAL_FORMS'])+ 1
params = {'prefix': 'registrant',
'event': event}
if custom_reg_form:
params.update({"custom_reg_form": custom_reg_form})
registrant = RegistrantFormSet(post_data, **params)
addon_formset = RegAddonFormSet(request.POST,
prefix='addon',
event=event,
extra_params={
'addons':addons,
'valid_addons':addons,
})
# REGISTRATION form
if request.method == 'POST' and 'submit' in request.POST:
reg_form = RegistrationForm(
event,
pricing,
event_price,
request.POST,
user=request.user,
count=len(registrant.forms),
)
else:
reg_form = RegistrationForm(
event,
pricing,
event_price,
user=request.user
)
if request.user.is_authenticated():
del reg_form.fields['captcha']
# total registrant forms
if post_data:
total_regt_forms = post_data['registrant-TOTAL_FORMS']
if request.method == 'POST':
if 'submit' in request.POST:
if False not in (reg_form.is_valid(), registrant.is_valid(), addon_formset.is_valid()):
# override event_price to price specified by admin
admin_notes = ''
if request.user.profile.is_superuser and event_price > 0:
if event_price != reg_form.cleaned_data['amount_for_admin']:
admin_notes = _("Price has been overriden for this registration. ")
event_price = reg_form.cleaned_data['amount_for_admin']
reg8n, reg8n_created = add_registration(
request,
event,
reg_form,
registrant,
addon_formset,
pricing,
event_price,
admin_notes=admin_notes,
custom_reg_form=custom_reg_form,
)
site_label = get_setting('site', 'global', 'sitedisplayname')
site_url = get_setting('site', 'global', 'siteurl')
self_reg8n = get_setting('module', 'users', 'selfregistration')
is_credit_card_payment = reg8n.payment_method and \
(reg8n.payment_method.machine_name).lower() == 'credit-card' \
and event_price > 0
if reg8n_created:
registrants = reg8n.registrant_set.all().order_by('id')
for registrant in registrants:
#registrant.assign_mapped_fields()
if registrant.custom_reg_form_entry:
registrant.name = registrant.custom_reg_form_entry.__unicode__()
else:
registrant.name = ' '.join([registrant.first_name, registrant.last_name])
if is_credit_card_payment:
# online payment
# get invoice; redirect to online pay
# email the admins as well
email_admins(event, event_price, self_reg8n, reg8n, registrants)
return HttpResponseRedirect(reverse(
'payment.pay_online',
args=[reg8n.invoice.id, reg8n.invoice.guid]
))
else:
# offline payment:
# send email; add message; redirect to confirmation
primary_registrant = reg8n.registrant
if primary_registrant and primary_registrant.email:
notification.send_emails(
[primary_registrant.email],
'event_registration_confirmation',
{
'SITE_GLOBAL_SITEDISPLAYNAME': site_label,
'SITE_GLOBAL_SITEURL': site_url,
'self_reg8n': self_reg8n,
'reg8n': reg8n,
'registrants': registrants,
'event': event,
'price': event_price,
'is_paid': reg8n.invoice.balance == 0
},
True, # save notice in db
)
#email the admins as well
email_admins(event, event_price, self_reg8n, reg8n, registrants)
EventLog.objects.log(instance=event)
else:
msg_string = 'You were already registered on %s' % date_filter(reg8n.create_dt)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse(
'event.registration_confirmation',
args=(event_id, reg8n.registrant.hash)
))
# if not free event, store price in the list for each registrant
price_list = []
count = 0
total_price = Decimal(str(0.00))
free_event = event_price <= 0
# total price calculation when invalid
for form in registrant.forms:
deleted = False
if form.data.get('registrant-%d-DELETE' % count, False):
deleted = True
if count % pricing.quantity == 0:
price_list.append({'price': event_price, 'deleted':deleted})
else:
price_list.append({'price': 0.00 , 'deleted':deleted})
if not deleted:
if pricing.quantity > 1:
total_price = event_price
else:
total_price += event_price
count += 1
addons_price = addon_formset.get_total_price()
total_price += addons_price
# check if we have any error on registrant formset
has_registrant_form_errors = False
for form in registrant.forms:
for field in form:
if field.errors:
has_registrant_form_errors = True
break
if has_registrant_form_errors:
break
return render_to_response(template_name, {
'event':event,
'event_price': event_price,
'free_event': free_event,
'price_list':price_list,
'total_price':total_price,
'price': pricing,
'reg_form':reg_form,
'custom_reg_form': custom_reg_form,
'registrant': registrant,
'addons':addons,
'addon_formset': addon_formset,
'total_regt_forms': total_regt_forms,
'has_registrant_form_errors': has_registrant_form_errors,
}, context_instance=RequestContext(request))
@is_enabled('events')
def registration_edit(request, reg8n_id=0, hash='', template_name="events/reg8n/reg8n_edit.html"):
reg8n = get_object_or_404(Registration, pk=reg8n_id)
perms = (
has_perm(request.user, 'events.change_registration', reg8n), # has perm
request.user == reg8n.registrant.user, # main registrant
reg8n.registrant.hash == hash, # has secret hash
)
if not any(perms):
raise Http403
custom_reg_form = None
reg_conf = reg8n.event.registration_configuration
if reg_conf.use_custom_reg_form:
if reg_conf.bind_reg_form_to_conf_only:
custom_reg_form = reg_conf.reg_form
else:
custom_reg_form = reg8n.reg_conf_price.reg_form
if custom_reg_form:
# use formset_factory for custom registration form
RegistrantFormSet = formset_factory(
FormForCustomRegForm,
formset=RegistrantBaseFormSet,
max_num=reg8n.registrant_set.filter(registration=reg8n).count(),
extra=0
)
# check and populate for any missing entry
for registrant in reg8n.registrant_set.filter(cancel_dt__isnull=True):
if not registrant.custom_reg_form_entry:
registrant.populate_custom_form_entry()
entry_ids = reg8n.registrant_set.filter(cancel_dt__isnull=True
).values_list('custom_reg_form_entry',
flat=True).order_by('id')
entries = CustomRegFormEntry.objects.filter(pk__in=entry_ids)
params = {'prefix': 'registrant',
'custom_reg_form': custom_reg_form,
'entries': entries,
'event': reg8n.event}
if request.method != 'POST':
# build initial
params.update({'initial': get_custom_registrants_initials(entries),})
formset = RegistrantFormSet(request.POST or None, **params)
else:
fields=('salutation', 'first_name', 'last_name', 'mail_name', 'email',
'position_title', 'company_name', 'phone', 'address', 'city',
'state', 'zip', 'country', 'meal_option', 'comments')
fields = [field_name for field_name in fields if get_setting(
'module', 'events', 'regform_%s_visible' % field_name)]
# use modelformset_factory for regular registration form
RegistrantFormSet = modelformset_factory(
Registrant, extra=0,
fields=fields)
formset = RegistrantFormSet(request.POST or None,
queryset=Registrant.objects.filter(registration=reg8n,
cancel_dt__isnull=True).order_by('id'))
# required fields only stay on the first form
for i, form in enumerate(formset.forms):
for key in form.fields.keys():
if i > 0:
form.fields[key].required = False
else:
if key in ['phone', 'company_name']:
form.fields[key].required = False
if request.method == 'POST':
if formset.is_valid():
updated = False
if custom_reg_form:
for form in formset.forms:
entry = form.save(reg8n.event)
for reg in entry.registrants.all():
reg.initialize_fields()
updated = True
else:
instances = formset.save()
if instances: updated = True
reg8n_conf_url = reverse(
'event.registration_confirmation',
args=(reg8n.event.id, reg8n.registrant.hash)
)
if updated:
EventLog.objects.log(instance=reg8n)
msg = _('Registrant(s) info updated')
else:
msg = _('No changes made to the registrant(s)')
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(reg8n_conf_url)
total_regt_forms = Registrant.objects.filter(registration=reg8n).count()
# check formset error
formset_errors = False
for form in formset.forms:
for field in form:
if field.errors:
formset_errors = True
break
if formset_errors:
break
return render_to_response(template_name, {'formset': formset,
'formset_errors':formset_errors,
'total_regt_forms':total_regt_forms,
'reg8n': reg8n,
},
context_instance=RequestContext(request))
@is_enabled('events')
def cancel_registration(request, event_id, registration_id, hash='', template_name="events/reg8n/cancel_registration.html"):
event = get_object_or_404(Event, pk=event_id)
try:
registration = Registration.objects.get(
event=event,
pk=registration_id,
)
except Registration.DoesNotExist as e:
raise Http404
perms = (
has_perm(request.user, 'events.change_registration', registration), # has perm
request.user == registration.registrant.user, # main registrant
registration.registrant.hash == hash, # has secret hash
)
if not any(perms):
raise Http403
registrants = registration.registrant_set.filter(cancel_dt__isnull=True)
cancelled_registrants = registration.registrant_set.filter(cancel_dt__isnull=False)
if request.method == "POST":
# check if already canceled. if so, do nothing
if not registration.canceled:
for registrant in registrants:
user_is_registrant = False
if not request.user.is_anonymous() and registrant.user:
if request.user.id == registrant.user.id:
user_is_registrant = True
registrant.cancel_dt = datetime.now()
registrant.save()
# update the amount_paid in registration
if registrant.amount:
if registrant.registration.amount_paid:
registrant.registration.amount_paid -= registrant.amount
registrant.registration.save()
# update the invoice if invoice is not tendered
invoice = registrant.registration.invoice
if invoice and not invoice.is_tendered:
invoice.total -= registrant.amount
invoice.subtotal -= registrant.amount
invoice.balance -= registrant.amount
invoice.save(request.user)
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
if recipients and notification:
notification.send_emails(recipients, 'event_registration_cancelled', {
'event':event,
'user':request.user,
'registrants_paid':event.registrants(with_balance=False),
'registrants_pending':event.registrants(with_balance=True),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
'registrant':registrant,
'user_is_registrant': user_is_registrant,
})
# Log an event for each registrant in the loop
EventLog.objects.log(instance=registrant)
registration.canceled = True
registration.save()
return HttpResponseRedirect(
reverse('event.registration_confirmation',
args=[event.pk, registration.registrant.hash])
)
for regt in registrants:
if regt.custom_reg_form_entry:
regt.assign_mapped_fields()
if not regt.name:
regt.last_name = regt.name = regt.custom_reg_form_entry.__unicode__()
for c_regt in cancelled_registrants:
if c_regt.custom_reg_form_entry:
c_regt.assign_mapped_fields()
if not c_regt.name:
c_regt.last_name = c_regt.name = c_regt.custom_reg_form_entry.__unicode__()
return render_to_response(template_name, {
'event': event,
'registration': registration,
'registrants': registrants,
'cancelled_registrants': cancelled_registrants,
'hash': hash,
},
context_instance=RequestContext(request))
@is_enabled('events')
def cancel_registrant(request, event_id=0, registrant_id=0, hash='', template_name="events/reg8n/cancel_registrant.html"):
event = get_object_or_404(Event, pk=event_id)
if registrant_id:
try:
registrant = Registrant.objects.get(
registration__event=event,
pk =registrant_id,
)
# check permission
if not has_perm(request.user, 'events.view_registrant', registrant):
raise Http403
except:
raise Http404
elif hash:
sqs = Registrant.objects.filter(registration__event=event)
sqs = sqs.order_by("-update_dt")
# if the for loop is heavy, add the hash field to the table Registrant
registrant = None
for reg in sqs:
if reg.hash == hash:
registrant = reg
break
if not registrant:
raise Http404
if registrant.cancel_dt:
raise Http404
if request.method == "POST":
# check if already canceled. if so, do nothing
if not registrant.cancel_dt:
user_is_registrant = False
if request.user.is_authenticated() and registrant.user:
if request.user == registrant.user:
user_is_registrant = True
registrant.cancel_dt = datetime.now()
registrant.save()
# update the amount_paid in registration
if registrant.amount:
if registrant.registration.amount_paid:
registrant.registration.amount_paid -= registrant.amount
registrant.registration.save()
# update the invoice if invoice is not tendered
invoice = registrant.registration.invoice
if not invoice.is_tendered:
invoice.total -= registrant.amount
invoice.subtotal -= registrant.amount
invoice.balance -= registrant.amount
invoice.save(request.user)
# check if all registrants in this registration are canceled.
# if so, update the canceled field.
reg8n = registrant.registration
exist_not_canceled = Registrant.objects.filter(
registration=reg8n,
cancel_dt__isnull=True
).exists()
if not exist_not_canceled:
reg8n.canceled = True
reg8n.save()
EventLog.objects.log(instance=registrant)
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
if recipients and notification:
notification.send_emails(recipients, 'event_registration_cancelled', {
'event':event,
'user':request.user,
'registrants_paid':event.registrants(with_balance=False),
'registrants_pending':event.registrants(with_balance=True),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
'registrant':registrant,
'user_is_registrant': user_is_registrant,
})
# back to invoice
return HttpResponseRedirect(
reverse('event.registration_confirmation', args=[event.pk, registrant.hash]))
if registrant.custom_reg_form_entry:
registrant.assign_mapped_fields()
if not registrant.name:
registrant.last_name = registrant.name = registrant.custom_reg_form_entry.__unicode__()
return render_to_response(template_name, {
'event': event,
'registrant':registrant,
'hash': hash,
},
context_instance=RequestContext(request))
@is_enabled('events')
def month_view(request, year=None, month=None, type=None, template_name='events/month-view.html'):
if type: # redirect to /events/month/ if type does not exist
if not Type.objects.filter(slug=type).exists():
# use HttpCustomResponseRedirect to check if event
# exists in redirects module
return HttpCustomResponseRedirect(reverse('event.month'))
# default/convert month and year
if month and year:
month, year = int(month), int(year)
else:
month, year = date.today().month, date.today().year
if year <= 1900 or year >= 9999:
raise Http404
calendar.setfirstweekday(calendar.SUNDAY)
Calendar = calendar.Calendar
next_month, next_year = get_next_month(month, year)
prev_month, prev_year = get_prev_month(month, year)
if type and "latest" in request.GET:
current_type = Type.objects.filter(slug=type)
current_date = datetime(month=month, day=1, year=year)
next_date = datetime(month=next_month, day=1, year=next_year)
if not Event.objects.filter(start_dt__gte=current_date, start_dt__lte=next_date, type=current_type[0]).exists():
latest_event = Event.objects.filter(start_dt__gte=current_date, type=current_type[0]).order_by('start_dt').first()
if latest_event is None:
msg_string = u'No more %s Events were found.' % (unicode(current_type[0]))
messages.add_message(request, messages.INFO, _(msg_string))
else:
latest_month = latest_event.start_dt.month
latest_year = latest_event.start_dt.year
current_date = current_date.strftime('%b %Y')
latest_date = latest_event.start_dt.strftime('%b %Y')
msg_string = u'No %s Events were found for %s. The next %s event is on %s, shown below.' % (unicode(current_type[0]), current_date, unicode(current_type[0]), latest_date)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.month', args=[latest_year, latest_month, current_type[0].slug]))
# remove any params that aren't set (e.g. type)
next_month_params = [i for i in (next_year, next_month, type) if i]
prev_month_params = [i for i in (prev_year, prev_month, type) if i]
next_month_url = reverse('event.month', args=next_month_params)
prev_month_url = reverse('event.month', args=prev_month_params)
month_names = calendar.month_name[month-1:month+2]
weekdays = calendar.weekheader(10).split()
cal = Calendar(calendar.SUNDAY).monthdatescalendar(year, month)
# Check for empty pages for far-reaching years
if abs(year - date.today().year) > 6:
filters = get_query_filters(request.user, 'events.view_event')
is_events = Event.objects.filter(filters).filter(
(Q(start_dt__gte=cal[0][0]) & Q(start_dt__lte=cal[-1:][0][6])) | (Q(end_dt__gte=cal[0][0]) & Q(end_dt__lte=cal[-1:][0][6])) | (Q(end_dt__gte=cal[-1:][0][6]) & Q(start_dt__lte=cal[0][0]))).distinct()
if not is_events:
# Try to redirect old dates to the earliest event
if year < date.today().year:
latest_event = Event.objects.filter(start_dt__gte=datetime(month=month, day=1, year=year)).order_by('start_dt').first()
if latest_event is not None:
latest_month = latest_event.start_dt.month
latest_year = latest_event.start_dt.year
current_date = datetime(month=month, day=1, year=year).strftime('%b %Y')
latest_date = latest_event.start_dt.strftime('%b %Y')
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (current_date, latest_date)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.month', args=[latest_year, latest_month]))
# Try to redirect far future dates to the latest event
else:
latest_event = Event.objects.filter(end_dt__lte=datetime(month=next_month, day=1, year=next_year)).order_by('-end_dt').first()
if latest_event is not None:
latest_month = latest_event.end_dt.month
latest_year = latest_event.end_dt.year
current_date = datetime(month=month, day=1, year=year).strftime('%b %Y')
latest_date = latest_event.end_dt.strftime('%b %Y')
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (current_date, latest_date)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.month', args=[latest_year, latest_month]))
types = Type.objects.all().order_by('name')
EventLog.objects.log()
return render_to_response(template_name, {
'cal':cal,
'month':month,
'prev_month_url':prev_month_url,
'next_month_url':next_month_url,
'month_names':month_names,
'year':year,
'weekdays':weekdays,
'today':date.today(),
'types':types,
'type':type,
},
context_instance=RequestContext(request))
@is_enabled('events')
def week_view(request, year=None, month=None, day=None, type=None, template_name='events/week-view.html'):
if type: # redirect to /events/week/ if type does not exist
if not Type.objects.filter(slug=type).exists():
# use HttpCustomResponseRedirect to check if event
# exists in redirects module
return HttpCustomResponseRedirect(reverse('event.week'))
# default/convert month and year
if month and year and day:
month, year, day = int(month), int(year), int(day)
else:
month, year, day = date.today().month, date.today().year, date.today().day
if year <= 1900 or year >= 9999:
raise Http404
calendar.setfirstweekday(calendar.SUNDAY)
Calendar = calendar.Calendar(calendar.SUNDAY)
weekdays = calendar.weekheader(10).split()
tgtdate = date(year, month, day)
week_dates = get_week_days(tgtdate, Calendar)
next_date = week_dates[6] + timedelta(days=1)
prev_date = week_dates[0] + timedelta(days=-1)
if type and "latest" in request.GET:
current_type = Type.objects.filter(slug=type)
current_date = datetime(month=week_dates[0].month, day=week_dates[0].day, year=week_dates[0].year)
next_date = datetime(month=next_date.month, day=next_date.day, year=next_date.year)
if not Event.objects.filter(start_dt__gte=current_date, start_dt__lte=next_date, type=current_type[0]).exists():
latest_event = Event.objects.filter(start_dt__gte=current_date, type=current_type[0]).order_by('start_dt').first()
if latest_event is None:
msg_string = u'No more %s Events were found.' % (unicode(current_type[0]))
messages.add_message(request, messages.INFO, _(msg_string))
else:
latest_day = latest_event.start_dt.day
latest_month = latest_event.start_dt.month
latest_year = latest_event.start_dt.year
current_date = current_date.strftime('%x')
latest_date = latest_event.start_dt.strftime('%x')
msg_string = u'No %s Events were found for %s. The next %s event is on %s, shown below.' % (unicode(current_type[0]), current_date, unicode(current_type[0]), latest_date)
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.week', args=[latest_year, latest_month, latest_day, current_type[0].slug]))
# remove any params that aren't set (e.g. type)
next_week_params = [i for i in (next_date.year, next_date.month, next_date.day) if i]
prev_week_params = [i for i in (prev_date.year, prev_date.month, prev_date.day) if i]
next_week_url = reverse('event.week', args=next_week_params)
prev_week_url = reverse('event.week', args=prev_week_params)
# Check for empty pages for far-reaching years
if abs(year - date.today().year) > 6:
filters = get_query_filters(request.user, 'events.view_event')
is_events = Event.objects.filter(filters).filter(
(Q(start_dt__gte=week_dates[0]) & Q(start_dt__lte=week_dates[6])) | (Q(end_dt__gte=week_dates[0]) & Q(end_dt__lte=week_dates[6]))).distinct()
if not is_events:
# Try to redirect old dates to the earliest event
if year < date.today().year:
latest_event = Event.objects.filter(start_dt__gte=tgtdate).order_by('start_dt').first()
if latest_event is not None:
latest_date = latest_event.start_dt
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (tgtdate.strftime('%x'), latest_date.strftime('%x'))
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.week', args=[latest_date.year, latest_date.month, latest_date.day]))
# Try to redirect far future dates to the latest event
else:
latest_event = Event.objects.filter(end_dt__lte=tgtdate).order_by('-end_dt').first()
if latest_event is not None:
latest_date = latest_event.end_dt
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (tgtdate.strftime('%x'), latest_date.strftime('%x'))
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.week', args=[latest_date.year, latest_date.month, latest_date.day]))
types = Type.objects.all().order_by('name')
EventLog.objects.log()
return render_to_response(template_name, {
'week':week_dates,
'weekdays':weekdays,
'next_week_url':next_week_url,
'prev_week_url':prev_week_url,
'cur_date':tgtdate,
'today':date.today(),
'types':types,
'type':type,
},
context_instance=RequestContext(request))
@is_enabled('events')
def day_view(request, year=None, month=None, day=None, template_name='events/day-view.html'):
year = int(year)
if year <= 1900:
raise Http404
query = request.GET.get('q', None)
form = EventSimpleSearchForm(request.GET)
if form.is_valid():
cat = form.cleaned_data.get('search_category', None)
query = form.cleaned_data.get('q', None)
else:
cat = None
query = ''
day_date = datetime(year=int(year), month=int(month), day=int(day))
yesterday = day_date - timedelta(days=1)
yesterday_url = reverse('event.day', args=(
int(yesterday.year),
int(yesterday.month),
int(yesterday.day)
))
tomorrow = day_date + timedelta(days=1)
tomorrow_url = reverse('event.day', args=(
int(tomorrow.year),
int(tomorrow.month),
int(tomorrow.day)
))
# Check for empty pages for far-reaching years
if abs(year - date.today().year) > 6:
filters = get_query_filters(request.user, 'events.view_event')
is_events = Event.objects.filter(filters).filter(end_dt__gte=day_date, start_dt__lte=tomorrow)
if cat == 'priority':
is_events = is_events.filter(**{cat : True })
elif query and cat:
is_events = is_events.filter(**{cat : query})
if not is_events:
# Try to redirect old dates to the earliest event
if year < date.today().year:
latest_event = Event.objects.filter(start_dt__gte=day_date).order_by('start_dt').first()
if latest_event is not None:
latest_day = latest_event.start_dt.day
latest_month = latest_event.start_dt.month
latest_year = latest_event.start_dt.year
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (day_date.strftime('%x'), latest_event.start_dt.strftime('%x'))
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.day', args=[latest_year, latest_month, latest_day]))
# Try to redirect far future dates to the latest event
else:
latest_event = Event.objects.filter(end_dt__lte=day_date).order_by('-end_dt').first()
if latest_event is not None:
latest_month = latest_event.end_dt.month
latest_year = latest_event.end_dt.year
latest_day = latest_event.end_dt.day
msg_string = 'No Events were found for %s. The next event is on %s, shown below.' % (day_date.strftime('%x'), latest_event.end_dt.strftime('%x'))
messages.add_message(request, messages.INFO, _(msg_string))
return HttpResponseRedirect(reverse('event.day', args=[latest_year, latest_month, latest_day]))
EventLog.objects.log()
return render_to_response(template_name, {
'date': day_date,
'now': datetime.now(),
'type': None,
'yesterday': yesterday,
'tomorrow': tomorrow,
'yesterday_url': yesterday_url,
'tomorrow_url': tomorrow_url,
'form': form,
}, context_instance=RequestContext(request))
@is_enabled('events')
def today_redirect(request):
today_date = request.GET.get('today_date', None)
try:
today_date = datetime.strptime(today_date, '%Y-%m-%d')
except:
today_date = datetime.now()
day, month, year = today_date.day, today_date.month, today_date.year
return HttpResponseRedirect(reverse('event.day', args=(int(year), int(month), int(day))))
@login_required
def types(request, template_name='events/types/index.html'):
from django.forms.models import modelformset_factory
TypeFormSet = modelformset_factory(Type, form=TypeForm, extra=2, can_delete=True)
if request.method == 'POST':
formset = TypeFormSet(request.POST)
if formset.is_valid():
formset.save()
# log "added" event_types
for event_type in formset.new_objects:
EventLog.objects.log(event_type="add", instance=event_type)
# log "changed" event_types
for event_type, changed_data in formset.changed_objects:
EventLog.objects.log(event_type="edit", instance=event_type)
# log "deleted" event_types
for event_type in formset.deleted_objects:
EventLog.objects.log(event_type="delete", instance=event_type)
formset = TypeFormSet()
return render_to_response(template_name, {'formset': formset},
context_instance=RequestContext(request))
@login_required
def reassign_type(request, type_id, form_class=ReassignTypeForm, template_name='events/types/reassign.html'):
type = get_object_or_404(Type, pk=type_id)
form = form_class(request.POST or None, type_id=type.id)
if request.method == 'POST':
if form.is_valid():
type.event_set.update(type=form.cleaned_data['type'])
msg_string = 'Successfully reassigned events from type "%s" to type "%s".' % (type, form.cleaned_data['type'])
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event.search')
return render_to_response(template_name, {'type': type, 'form': form},
context_instance=RequestContext(request))
@is_enabled('events')
def global_registrant_search(request, template_name='events/registrants/global-search.html'):
if not has_perm(request.user, 'events.view_registrant'):
raise Http403
form = GlobalRegistrantSearchForm(request.GET)
if form.is_valid():
event = form.cleaned_data.get('event')
start_dt = form.cleaned_data.get('start_dt')
end_dt = form.cleaned_data.get('end_dt')
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
email = form.cleaned_data.get('email')
user_id = form.cleaned_data.get('user_id')
registrants = Registrant.objects.filter(registration__invoice__isnull=False).order_by("-update_dt")
if event:
registrants = registrants.filter(registration__event=event)
if start_dt:
registrants = registrants.filter(registration__event__start_dt__gte=start_dt)
if end_dt:
registrants = registrants.filter(registration__event__end_dt__lte=end_dt)
try:
registrants = registrants.filter(user=user_id)
except ValueError:
pass
registrants = (registrants.filter(first_name__icontains=first_name)
.filter(last_name__icontains=last_name)
.filter(email__icontains=email))
return render_to_response(template_name, {
'registrants': registrants,
'form': form,
}, context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def registrant_search(request, event_id=0, template_name='events/registrants/search.html'):
search_criteria = None
search_text = None
search_method = None
status = request.GET.get('status', None)
event = get_object_or_404(Event, pk=event_id)
if not (has_perm(request.user,'events.view_registrant') or has_perm(request.user,'events.change_event', event)):
raise Http403
form = EventRegistrantSearchForm(request.GET)
if form.is_valid():
search_criteria = form.cleaned_data.get('search_criteria')
search_text = form.cleaned_data.get('search_text')
search_method = form.cleaned_data.get('search_method')
registrants = Registrant.objects.filter(registration__event=event).order_by("-update_dt")
active_registrants = registrants.filter(cancel_dt=None).count()
canceled_registrants = registrants.exclude(cancel_dt=None).count()
if search_criteria and search_text:
search_type = '__iexact'
if search_method == 'starts_with':
search_type = '__istartswith'
elif search_method == 'contains':
search_type = '__icontains'
search_filter = {'%s%s' % (search_criteria,
search_type): search_text}
registrants = registrants.filter(**search_filter)
if status == 'active':
registrants = registrants.filter(cancel_dt=None)
elif status == 'canceled':
registrants = registrants.exclude(cancel_dt=None)
for reg in registrants:
if hasattr(reg, 'object'): reg = reg.object
if reg.custom_reg_form_entry:
reg.assign_mapped_fields()
reg.non_mapped_field_entries = reg.custom_reg_form_entry.get_non_mapped_field_entry_list()
if not reg.name:
reg.name = reg.custom_reg_form_entry.__unicode__()
EventLog.objects.log(instance=event)
return render_to_response(template_name, {
'event':event,
'registrants':registrants,
'active_registrants':active_registrants,
'canceled_registrants':canceled_registrants,
'form':form,
}, context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def registrant_roster(request, event_id=0, roster_view='', template_name='events/registrants/roster.html'):
# roster_view in ['total', 'paid', 'non-paid']
from django.db.models import Sum
event = get_object_or_404(Event, pk=event_id)
has_addons = event.has_addons
discount_available = event.registration_configuration.discount_eligible
if not (has_perm(request.user, 'events.view_registrant') or has_perm(request.user, 'events.change_event', event)):
raise Http403
sort_order = request.GET.get('sort_order', 'last_name')
sort_type = request.GET.get('sort_type', 'asc')
if sort_order not in ('first_name', 'last_name', 'company_name'):
sort_order = 'last_name'
if sort_type not in ('asc', 'desc'):
sort_type = 'asc'
sort_field = sort_order
if sort_type == 'desc':
sort_field = '-%s' % sort_field
if not roster_view: # default to total page
roster_view = 'total'
# paid or non-paid or total
registrations = Registration.objects.filter(event=event, canceled=False)
if roster_view == 'paid':
registrations = registrations.filter(invoice__balance__lte=0)
elif roster_view == 'non-paid':
registrations = registrations.filter(invoice__balance__gt=0)
# Collect the info for custom reg form fields
# and store the values in roster_fields_dict.
# The key of roster_fields_dict is the entry.id.
# The map of entry.id and registrant.id is in the
# dictionary reg_form_entries_dict.
# This is to reduce the # of database queries.
roster_fields_dict = {}
# [(110, 11), (111, 10),...]
reg_form_entries = Registrant.objects.filter(
registration__event=event,
cancel_dt=None).values_list('id', 'custom_reg_form_entry')
# a dictionary of registrant.id as key and entry as value
reg_form_entries_dict = dict(reg_form_entries)
if reg_form_entries:
reg_form_field_entries = CustomRegFieldEntry.objects.filter(
entry__in=[entry[1] for entry in reg_form_entries if entry[1] is not None],
field__display_on_roster=1
).exclude(field__map_to_field__in=[
'first_name',
'last_name',
'email',
'phone',
'position_title',
'company_name'
]).select_related().values_list(
'entry__id',
'field__label',
'value'
).order_by('field__position')
if reg_form_field_entries:
for field_entry in reg_form_field_entries:
key = str(field_entry[0])
if key not in roster_fields_dict:
roster_fields_dict[key] = []
roster_fields_dict[key].append({'label': field_entry[1], 'value': field_entry[2]})
registrants = Registrant.objects.filter(
registration__event=event, cancel_dt=None)
if roster_view in ('paid', 'non-paid'):
registrants = registrants.filter(registration__in=registrations)
# get the total checked in
total_checked_in = registrants.filter(checked_in=True).count()
# Pricing title - store with the registrant to improve the performance.
pricing_titles = RegConfPricing.objects.filter(
reg_conf=event.registration_configuration).values_list('id', 'title')
pricing_titles_dict = dict(pricing_titles)
# Store the price and invoice info with registrants to reduce the # of queries.
# need 4 mappings:
# 1) registrant_ids to pricing_ids
# 2) registration_ids to pricings_ids
# 3) registrant_ids to registration_ids
# 4) registration_ids to invoices
reg7n_pricing_reg8n = registrants.values_list('id', 'pricing__id', 'registration__id')
reg7n_to_pricing_dict = dict([(item[0], item[1]) for item in reg7n_pricing_reg8n])
reg8n_to_pricing_dict = dict(registrations.values_list('id', 'reg_conf_price__id'))
reg7n_to_reg8n_dict = dict([(item[0], item[2]) for item in reg7n_pricing_reg8n])
reg8n_to_invoice_objs = registrations.values_list(
'id',
'invoice__id',
'invoice__total',
'invoice__balance',
'invoice__admin_notes',
'invoice__tender_date')
reg8n_to_invoice_dict = {}
invoice_fields = ('id', 'total', 'balance', 'admin_notes', 'tender_date')
for item in reg8n_to_invoice_objs:
if item[1] is None:
reg8n_to_invoice_dict[item[0]] = dict(zip(invoice_fields, (0, 0, 0, '', '')))
else:
reg8n_to_invoice_dict[item[0]] = dict(zip(invoice_fields, item[1:]))
# registration to list of registrants mapping
reg8n_to_reg7n_dict = {}
for k, v in reg7n_to_reg8n_dict.iteritems():
reg8n_to_reg7n_dict.setdefault(v, []).append(k)
if sort_field in ('first_name', 'last_name'):
# let registrants without names sink dowm to the bottom
regisrants_noname = registrants.filter(
last_name='', first_name='').select_related('user').order_by('id')
registrants_withname = registrants.exclude(
last_name='', first_name='').select_related('user').order_by(sort_field)
c = itertools.chain(registrants_withname, regisrants_noname)
registrants = [r for r in c]
else:
registrants = registrants.order_by(sort_field).select_related('user')
if roster_fields_dict:
for registrant in registrants:
# assign custom form roster_field_list (if any) to registrants
key = str(reg_form_entries_dict[registrant.id])
if key in roster_fields_dict:
registrant.roster_field_list = roster_fields_dict[key]
num_registrants_who_paid = 0
num_registrants_who_owe = 0
for registrant in registrants:
# assign pricing title to the registrants
key = reg7n_to_pricing_dict[registrant.id]
if key not in pricing_titles_dict:
if reg7n_to_reg8n_dict[registrant.id] in reg8n_to_pricing_dict:
key = reg8n_to_pricing_dict[reg7n_to_reg8n_dict[registrant.id]]
if key in pricing_titles_dict:
registrant.price_title = pricing_titles_dict[key]
else:
registrant.price_title = 'Untitled'
# assign invoice dict
key = reg7n_to_reg8n_dict[registrant.id]
if key in reg8n_to_invoice_dict:
registrant.invoice_dict = reg8n_to_invoice_dict[key]
if registrant.invoice_dict['balance'] <= 0:
num_registrants_who_paid += 1
else:
num_registrants_who_owe += 1
for registrant in registrants:
# assign additional registrants
registrant.additionals = []
key = reg7n_to_reg8n_dict[registrant.id]
if reg8n_to_reg7n_dict[key]:
additional_ids = [id for id in reg8n_to_reg7n_dict[key]]
additional_ids.remove(registrant.id)
if additional_ids:
for r in registrants:
if r.id in additional_ids:
registrant.additionals.append(r)
# assign addons
addon_total_sum = Decimal('0')
if has_addons:
reg8n_to_addons_list = RegAddonOption.objects.filter(
regaddon__registration__in=registrations).values_list(
'regaddon__registration__id',
'regaddon__addon__title',
'option__title',
'regaddon__amount')
if reg8n_to_addons_list:
addon_total_sum = sum([item[3] for item in reg8n_to_addons_list])
for registrant in registrants:
if registrant.is_primary:
registrant.addons = ''
registrant.addons_amount = Decimal('0')
for addon_item in reg8n_to_addons_list:
if addon_item[0] == registrant.registration_id:
registrant.addons += '%s(%s) ' % (addon_item[1], addon_item[2])
registrant.addons_amount += addon_item[3]
total_sum = float(0)
balance_sum = float(0)
# Get the total_sum and balance_sum.
totals_d = registrations.aggregate(
total_sum=Sum('invoice__total'), balance_sum=Sum('invoice__balance'))
total_sum = totals_d['total_sum']
balance_sum = totals_d['balance_sum']
EventLog.objects.log(instance=event)
return render_to_response(template_name, {
'event': event,
'registrants': registrants,
'balance_sum': balance_sum,
'total_sum': total_sum,
'num_registrants_who_paid': num_registrants_who_paid,
'num_registrants_who_owe': num_registrants_who_owe,
'roster_view': roster_view,
'sort_order': sort_order,
'sort_type': sort_type,
'has_addons': has_addons,
'discount_available': discount_available,
'addon_total_sum': addon_total_sum,
'total_checked_in': total_checked_in}, context_instance=RequestContext(request))
@csrf_exempt
@login_required
def registrant_check_in(request):
"""
Check in or uncheck in a registrant.
"""
response_d = {'error': True}
if request.method == 'POST':
registrant_id = request.POST.get('id', None)
action = request.POST.get('action', None)
if registrant_id and action:
[registrant] = Registrant.objects.filter(id=registrant_id)[:1] or [None]
if registrant:
if action == 'checked_in':
if not registrant.checked_in:
registrant.checked_in = True
registrant.checked_in_dt = datetime.now()
registrant.save()
response_d['checked_in_dt'] = registrant.checked_in_dt
if isinstance(response_d['checked_in_dt'], datetime):
response_d['checked_in_dt'] = response_d['checked_in_dt'].strftime('%m/%d %I:%M%p')
elif action == 'not_checked_in':
if registrant.checked_in:
registrant.checked_in = False
registrant.save()
response_d['checked_in_dt'] = ''
response_d['error'] = False
return HttpResponse(json.dumps(response_d), content_type="text/plain")
@is_enabled('events')
@login_required
def registrant_details(request, id=0, hash='', template_name='events/registrants/details.html'):
registrant = get_object_or_404(Registrant, pk=id)
if has_perm(request.user,'registrants.view_registrant',registrant):
EventLog.objects.log(instance=registrant)
return render_to_response(template_name, {'registrant': registrant},
context_instance=RequestContext(request))
else:
raise Http403
@is_enabled('events')
def registration_confirmation(request, id=0, reg8n_id=0, hash='',
template_name='events/reg8n/register-confirm.html'):
"""
Registration information.
Any registrant (belonging to this registration)
or administrator can see the entire registration.
"""
event = get_object_or_404(Event, pk=id)
registrants_count = 1
registrant_hash = hash
if reg8n_id:
registration = get_object_or_404(Registration, event=event, pk=reg8n_id)
is_permitted = has_perm(request.user, 'events.view_registration', registration)
is_registrant = request.user in [r.user for r in registration.registrant_set.all()]
# permission denied; if not given explicit permission or not registrant
if not any((is_permitted, is_registrant)):
raise Http403
registrant = registration.registrant
elif registrant_hash:
# not real time index, pull directly from db
#sqs = SearchQuerySet()
#sqs = sqs.models(Registrant)
#sqs = sqs.filter(event_pk=event.pk)
#sqs = sqs.auto_query(sqs.query.clean(registrant_hash))
sqs = Registrant.objects.filter(registration__event=event)
sqs = sqs.order_by("-update_dt")
# find the match - the for loop might be heavy. maybe add hash field later
registrant = None
for reg in sqs:
if reg.hash == registrant_hash:
registrant = reg
break
if not registrant:
raise Http404
try:
#registrant = sqs[0].object
registration = registrant.registration
except:
raise Http404
registrants = registration.registrant_set.all().order_by('id')
registrants_count = registration.registrant_set.count()
addons = registration.regaddon_set.all().order_by('id')
for registrant in registrants:
#registrant.assign_mapped_fields()
if registrant.custom_reg_form_entry:
registrant.name = registrant.custom_reg_form_entry.__unicode__()
else:
if registrant.first_name or registrant.last_name:
registrant.name = ' '.join([registrant.first_name, registrant.last_name])
EventLog.objects.log(instance=registration)
return render_to_response(template_name, {
'event':event,
'registrant':registrant,
'registration':registration,
'registrants': registrants,
'registrants_count': registrants_count,
'addons': addons,
'hash': registrant_hash,
},
context_instance=RequestContext(request))
@login_required
def message_add(request, event_id, form_class=MessageAddForm, template_name='events/message/add.html'):
from tendenci.apps.emails.models import Email
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event',event): raise Http403
if request.method == "POST":
email = Email()
form = form_class(event.id, request.POST, instance=email)
if form.is_valid():
email.sender = get_setting('site', 'global', 'siteemailnoreplyaddress')
email.sender_display = request.user.get_full_name()
email.reply_to = request.user.email
email.recipient = request.user.email
email.content_type = "html"
email.save(request.user)
subject = email.subject
registrant_kwargs = {}
registrant_kwargs['payment_status'] = form.cleaned_data['payment_status']
email_registrants(event, email, **registrant_kwargs)
registrant_kwargs['summary'] = '<font face=""Arial"" color=""#000000"">'
registrant_kwargs['summary'] += 'Emails sent as a result of Calendar Event Notification</font><br><br>'
registrant_kwargs['summary'] += '<font face=""Arial"" color=""#000000"">'
registrant_kwargs['summary'] += '<br><br>Email Sent Appears Below in Raw Format'
registrant_kwargs['summary'] += '</font><br><br>'
registrant_kwargs['summary'] += email.body
# send summary
email.subject = 'SUMMARY: %s' % email.subject
email.body = registrant_kwargs['summary']
email.recipient = request.user.email
email.send()
# send another copy to the site webmaster
email.recipient = get_setting('site', 'global', 'sitewebmasteremail')
if email.recipient:
email.subject = 'WEBMASTER SUMMARY: %s' % email.subject
email.body = '<h2>Site Webmaster Notification of Calendar Event Send</h2>%s' % email.body
email.send()
EventLog.objects.log(instance=email)
msg_string = 'Successfully sent email "%s" to event registrants for event "%s".' % (subject, event.title)
messages.add_message(request, messages.SUCCESS, msg_string)
return HttpResponseRedirect(reverse('event', args=([event_id])))
else:
defaultsubject = render_to_string('events/message/subject-text.txt', {'event': event},
context_instance=RequestContext(request))
openingtext = render_to_string('events/message/opening-text.txt', {'event': event},
context_instance=RequestContext(request))
form = form_class(event.id, initial={'subject':defaultsubject, 'body': openingtext})
return render_to_response(template_name, {
'event':event,
'form': form
},context_instance=RequestContext(request))
@login_required
def edit_email(request, event_id, form_class=EmailForm, template_name='events/edit_email.html'):
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event',event): raise Http403
reg_conf = event.registration_configuration
email = reg_conf.email
if request.method == "POST":
form = form_class(request.POST, instance=email)
if form.is_valid():
email = form.save(commit=False)
if not email.id:
email.creator = request.user
email.creator_username = request.user.username
email.owner = request.user
email.owner_username = request.user.username
email.save()
if not reg_conf.email:
reg_conf.email = email
reg_conf.save()
msg_string = 'Successfully saved changes.'
messages.add_message(request, messages.SUCCESS, _(msg_string))
if request.POST.get('submit', '') == 'Save & Test':
render_event_email(event, email)
site_url = get_setting('site', 'global', 'siteurl')
email.recipient = request.user.email
email.subject = "Reminder: %s" % email.subject
email.body = convert_absolute_urls(email.body, site_url)
email.send()
messages.add_message(request, messages.SUCCESS, _('Successfully sent a test email.'))
else:
if not email:
openingtext = get_default_reminder_template(event)
[organizer] = Organizer.objects.filter(event=event)[:1] or [None]
form = form_class(initial={
'subject': '{{ event_title }}',
'body': openingtext,
'reply_to': organizer and organizer.user
and organizer.user.email or request.user.email,
'sender_display': organizer and organizer.name})
else:
form = form_class(instance=email)
return render_to_response(template_name, {
'event':event,
'form': form
},context_instance=RequestContext(request))
@is_enabled('events')
def registrant_export(request, event_id, roster_view=''):
"""
Export all registration for a specific event
"""
event = get_object_or_404(Event, pk=event_id)
# if they can edit it, they can export it
if not has_perm(request.user,'events.change_event',event):
raise Http403
import xlwt
from collections import OrderedDict
from decimal import Decimal
# create the excel book and sheet
book = xlwt.Workbook(encoding='utf8')
sheet = book.add_sheet('Registrants')
if roster_view == 'non-paid':
registrants = event.registrants(with_balance=True)
file_name = event.title.strip().replace(' ','-')
file_name = 'Event-%s-Non-Paid.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
elif roster_view == 'paid':
registrants = event.registrants(with_balance=False)
file_name = event.title.strip().replace(' ','-')
file_name = 'Event-%s-Paid.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
else:
registrants = event.registrants()
file_name = event.title.strip().replace(' ','-')
file_name = 'Event-%s-Total.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
# the key is what the column will be in the
# excel sheet. the value is the database lookup
# Used OrderedDict to maintain the column order
registrant_mappings = OrderedDict([
('first_name', 'first_name'),
('last_name', 'last_name'),
('phone', 'phone'),
('email', 'email'),
('position_title', 'position_title'),
('registration_id', 'registration__pk'),
('price type', 'registration__reg_conf_price__title'),
('invoice_id', 'registration__invoice__pk'),
('registration price', 'registration__amount_paid'),
('payment method', 'registration__payment_method__machine_name'),
('balance', 'registration__invoice__balance'),
('company', 'company_name'),
('address', 'address'),
('city', 'city'),
('state', 'state'),
('zip', 'zip'),
('country', 'country'),
('date', 'create_dt'),
])
registrant_lookups = registrant_mappings.values()
# Append the heading to the list of values that will
# go into the excel sheet
values_list = []
values_list.insert(0, registrant_mappings.keys())
# excel date styles
balance_owed_style = xlwt.easyxf('font: color-index red, bold on')
default_style = xlwt.Style.default_style
datetime_style = xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm')
date_style = xlwt.easyxf(num_format_str='mm/dd/yyyy')
# if registrations:
# # bulk of the work happens here
# # loop through all the registrations and append the output
# # of values_list django method to the values_list list
# for registration in registrations:
# registrants = registration.registrant_set.all()
# registrants = registrants.exclude(cancel_dt__isnull=False)
# registrants = registrants.values_list(*registrant_lookups)
# for registrant in registrants:
# values_list.append(registrant)
for registrant in registrants.values_list(*registrant_lookups):
values_list.append(registrant)
# Write the data enumerated to the excel sheet
for row, row_data in enumerate(values_list):
for col, val in enumerate(row_data):
# styles the date/time fields
if isinstance(val, datetime):
style = datetime_style
elif isinstance(val, date):
style = date_style
else:
style = default_style
# style the invoice balance column
if col == 7:
balance = val
if not val:
balance = 0
if val is None:
balance = 0
if isinstance(balance,Decimal) and balance > 0:
style = balance_owed_style
sheet.write(row, col, val, style=style)
EventLog.objects.log(instance=event)
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="%s"' % file_name
book.save(response)
return response
@is_enabled('events')
def registrant_export_with_custom(request, event_id, roster_view=''):
"""
Export all registration for a specific event with or without custom registration forms
"""
event = get_object_or_404(Event, pk=event_id)
# if they can view registrants or edit the event, they can export it
if not (has_perm(request.user, 'events.view_registrant') or
has_perm(request.user, 'events.change_event', event)):
raise Http403
# create the excel book and sheet
book = xlwt.Workbook(encoding='utf8')
sheet = book.add_sheet('Registrants')
# excel date styles
styles = {
'balance_owed_style': xlwt.easyxf('font: color-index red, bold on'),
'default_style': xlwt.Style.default_style,
'datetime_style': xlwt.easyxf(num_format_str='mm/dd/yyyy hh:mm'),
'date_style': xlwt.easyxf(num_format_str='mm/dd/yyyy')
}
if roster_view == 'non-paid':
registrants = event.registrants(with_balance=True)
file_name = event.title.strip().replace(' ', '-')
file_name = 'Event-%s-Non-Paid.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
elif roster_view == 'paid':
registrants = event.registrants(with_balance=False)
file_name = event.title.strip().replace(' ', '-')
file_name = 'Event-%s-Paid.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
else:
registrants = event.registrants()
file_name = event.title.strip().replace(' ', '-')
file_name = 'Event-%s-Total.xls' % re.sub(r'[^a-zA-Z0-9._]+', '', file_name)
from collections import namedtuple
# the key is what the column will be in the
# excel sheet. the value is the database lookup
# Used OrderedDict to maintain the column order
registrant_mappings = OrderedDict([
('first_name', 'first_name'),
('last_name', 'last_name'),
('phone', 'phone'),
('email', 'email'),
('position_title', 'position_title'),
('company', 'company_name'),
('address', 'address'),
('city', 'city'),
('state', 'state'),
('zip', 'zip'),
('country', 'country'),
('meal_option', 'meal_option'),
('date', 'create_dt'),
('registration_id', 'registration__pk'),
('addons', 'registration__addons_added'),
('is_primary', 'is_primary'),
('amount', 'amount'),
('price type', 'pricing__title'),
('invoice_id', 'registration__invoice__pk'),
('registration price', 'registration__invoice__total'),
('payment method', 'registration__payment_method__machine_name'),
('balance', 'registration__invoice__balance'),
])
if not registrants.exclude(meal_option='').exists():
# remove meal_option if the field is empty for every registrant
del registrant_mappings['meal_option']
RegistrantTuple = namedtuple('Registrant', registrant_mappings.values())
registrant_lookups = registrant_mappings.values()
# Append the heading to the list of values that will
# go into the excel sheet
values_list = []
# registrants with regular reg form
non_custom_registrants = registrants.filter(custom_reg_form_entry=None)
non_custom_registrants = non_custom_registrants.values('pk', *registrant_lookups)
if non_custom_registrants:
values_list.insert(0, registrant_mappings.keys() + ['is_paid', 'primary_registrant'])
for registrant_dict in non_custom_registrants:
is_paid = False
primary_registrant = u'-- N/A ---'
# update registrant values
if not registrant_dict['is_primary']:
is_paid = (registrant_dict['registration__invoice__balance'] == 0)
primary_registrant = Registrant.objects.get(pk=registrant_dict['pk'])
registrant = Registrant.objects.get(pk=registrant_dict['pk'])
primary_registrant = registrant.registration.registrant
if primary_registrant:
primary_registrant = '%s %s' % (primary_registrant.first_name, primary_registrant.last_name)
registrant_dict['registration__invoice__total'] = 0
registrant_dict['registration__invoice__balance'] = 0
del registrant_dict['pk']
# keeps order of values
registrant_tuple = RegistrantTuple(**registrant_dict)
values_list.append(tuple(registrant_tuple) + (is_paid, primary_registrant))
values_list.append(['\n'])
# Write the data enumerated to the excel sheet
balance_index = 17
start_row = 0
render_registrant_excel(sheet, values_list, balance_index, styles, start=start_row)
start_row += len(values_list)
# ***now check for the custom registration forms***
custom_reg_exists = Registrant.objects.filter(
registration__event=event).exclude(custom_reg_form_entry=None).exists()
if custom_reg_exists:
# get a list of custom registration forms
sql = """
SELECT form_id
FROM events_customregformentry
WHERE id IN (
SELECT custom_reg_form_entry_id
FROM events_registrant
WHERE (custom_reg_form_entry_id is not NULL)
AND registration_id IN (
SELECT id FROM events_registration
WHERE event_id=%d))
ORDER BY id
""" % event.id
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
# list of form ids
form_ids = list(set([row[0] for row in rows]))
# remove some fields from registrant_mappings because they are
# stored in the field entries
fields_to_remove = ['first_name', 'last_name', 'phone',
'email', 'company', 'address', 'city',
'state', 'zip', 'country']
for field in fields_to_remove:
del registrant_mappings[field]
registrant_lookups = registrant_mappings.values()
registrant_lookups.append('custom_reg_form_entry')
CustomRegistrantTuple = namedtuple('CustomRegistrant', registrant_mappings.values())
# loop through all custom registration forms
for form_id in form_ids:
rows_list = []
custom_reg_form = CustomRegForm.objects.get(id=form_id)
# get a list of fields in the type (id, label) and store in
# an ordered dict
fields = CustomRegField.objects.filter(
form=custom_reg_form).order_by('position').values_list('id', 'label')
fields_dict = OrderedDict(fields)
field_ids = fields_dict.keys()
# field header row - all the field labels in the form + registrant_mappings.keys
labels = fields_dict.values()
labels.extend(registrant_mappings.keys())
rows_list.append([custom_reg_form.name])
rows_list.append(labels)
# get the registrants for this form
custom_registrants = registrants.filter(custom_reg_form_entry__form=custom_reg_form)
custom_registrants = custom_registrants.values(*registrant_lookups)
for registrant in custom_registrants:
entry_id = registrant.pop('custom_reg_form_entry')
if not registrant['is_primary']:
registrant['registration__invoice__total'] = 0
registrant['registration__invoice__balance'] = 0
# keep the order of the values in the registrant dict
registrant_tuple = CustomRegistrantTuple(**registrant)
sql = """
SELECT field_id, value
FROM events_customregfieldentry
WHERE field_id IN (%s)
AND entry_id=%d
""" % (','.join([str(id) for id in field_ids]), entry_id)
cursor.execute(sql)
entry_rows = cursor.fetchall()
values_dict = dict(entry_rows)
custom_values_list = []
for field_id in field_ids:
custom_values_list.append(values_dict.get(field_id, ''))
custom_values_list.extend(registrant_tuple)
rows_list.append(custom_values_list)
rows_list.append(['\n'])
balance_index = len(field_ids) + len(registrant_lookups) - 1
# write to spread sheet
render_registrant_excel(sheet, rows_list, balance_index, styles, start=start_row)
start_row += len(rows_list)
EventLog.objects.log(instance=event)
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="%s"' % file_name
book.save(response)
return response
@is_enabled('events')
@login_required
def delete_speaker(request, id):
"""
This delete is designed based on the add and edit view where
a speaker is considered to only be a speaker for a single event.
"""
if not has_perm(request.user,'events.delete_speaker'):
raise Http403
speaker = get_object_or_404(Speaker, id = id)
event = speaker.event.all()[0]
msg_string = 'Successfully deleted %s' % unicode(speaker)
messages.add_message(request, messages.SUCCESS, _(msg_string))
speaker.delete()
return redirect('event', id=event.id)
#@is_enabled('events')
#@login_required
#def delete_group_pricing(request, id):
# if not has_perm(request.user,'events.delete_registrationconfiguration'):
# raise Http403
#
# gp = get_object_or_404(GroupRegistrationConfiguration, id = id)
# event = Event.objects.get(registration_configuration=gp.config)
# msg_string = 'Successfully deleted Group Pricing for %s' % gp
# messages.add_message(request, messages.SUCCESS, _(msg_string))
#
# gp.delete()
#
# return redirect('event', id=event.id)
#@is_enabled('events')
#@login_required
#def delete_special_pricing(request, id):
# if not has_perm(request.user,'events.delete_registrationconfiguration'):
# raise Http403
#
# s = get_object_or_404(SpecialPricing, id = id)
# event = Event.objects.get(registration_configuration=s.config)
# msg_string = 'Successfully deleted Special Pricing for %s' % s
# messages.add_message(request, messages.SUCCESS, _(msg_string))
#
# s.delete()
#
# return redirect('event', id=event.id)
@is_enabled('events')
@login_required
def copy(request, id):
if not has_perm(request.user, 'events.add_event'):
raise Http403
event = get_object_or_404(Event, id=id)
new_event = copy_event(event, request.user)
EventLog.objects.log(instance=new_event)
msg_string = 'Sucessfully copied Event: %s.<br />Edit the new event (set to <strong>private</strong>) below.' % unicode(new_event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event.edit', id=new_event.id)
@is_enabled('events')
@login_required
def minimal_add(request, form_class=PendingEventForm, template_name="events/minimal_add.html"):
"""
Minimal add for events. Events created here require admin approval.
This does not require users to have the add_event permission.
The minimaladdform setting must be enabled for this form to be active.
"""
# check if this form is enabled for use.
active = get_setting('module', 'events', 'minimaladdform')
# raise 404 if form not active
if not active:
raise Http404
if request.method == "POST":
form = form_class(request.POST, request.FILES, user=request.user, prefix="event")
form_place = PlaceForm(request.POST, prefix="place")
if form.is_valid() and form_place.is_valid():
event = form.save(commit=False)
# update all permissions and save the model
event = update_perms_and_save(request, form, event)
form.save_m2m()
# handle image
photo = form.cleaned_data['photo_upload']
if photo:
image = EventPhoto()
image.object_id = event.id
image.content_type = ContentType.objects.get_for_model(event.__class__)
image.creator = request.user
image.creator_username = request.user.username
image.owner = request.user
image.owner_username = request.user.username
filename = "%s-%s" % (event.id, photo.name)
photo.file.seek(0)
image.file.save(filename, photo)
event.image = image
# save place
place = form_place.save()
event.place = place
# place event into pending queue
event.status = True
event.status_detail = 'pending'
event.save(log=False)
if request.user.is_superuser:
msg_string = 'Successfully added %s.\n ' % unicode(event)
msg_string += 'Note that this event is in pending. '
msg_string += 'You can activate/edit if needed.'
else:
msg_string = 'Your event submission has been received. '
msg_string += 'It is now subject to approval.'
messages.add_message(request, messages.SUCCESS,
_(msg_string))
recipients = get_notice_recipients('site', 'global', 'allnoticerecipients')
admin_emails = get_setting('module', 'events', 'admin_emails').replace(" ", "").split(",")
recipients = recipients + admin_emails
if recipients and notification:
notification.send_emails(recipients, 'event_added', {
'event':event,
'user':request.user,
'registrants_paid':event.registrants(with_balance=False),
'registrants_pending':event.registrants(with_balance=True),
'SITE_GLOBAL_SITEDISPLAYNAME': get_setting('site', 'global', 'sitedisplayname'),
'SITE_GLOBAL_SITEURL': get_setting('site', 'global', 'siteurl'),
})
if request.user.is_superuser:
return redirect(reverse('event', args=[event.pk]))
return redirect('events')
else:
form = form_class(user=request.user, prefix="event")
form_place = PlaceForm(prefix="place")
return render_to_response(template_name, {
'form': form,
'form_place': form_place,
}, context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def pending(request, template_name="events/pending.html"):
"""
Show a list of pending events to be approved.
"""
if not request.user.profile.is_superuser:
raise Http403
events = Event.objects.filter(status=True, status_detail='pending').order_by('start_dt')
EventLog.objects.log()
return render_to_response(template_name, {
'events': events,
}, context_instance=RequestContext(request))
@login_required
def approve(request, event_id, template_name="events/approve.html"):
"""
Approve a selected event
"""
if not request.user.profile.is_superuser:
raise Http403
event = get_object_or_404(Event, pk=event_id)
if request.method == "POST":
event.status = True
event.status_detail = 'active'
event.save()
msg_string = 'Successfully approved %s' % unicode(event)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event', id=event_id)
return render_to_response(template_name, {
'event': event,
}, context_instance=RequestContext(request))
@login_required
def list_addons(request, event_id, template_name="events/addons/list.html"):
"""List addons of an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_view_perm(request.user,'events.view_event', event):
raise Http404
return render_to_response(template_name, {
'event':event,
'addons':event.addon_set.all(),
}, context_instance=RequestContext(request))
@login_required
def add_addon(request, event_id, template_name="events/addons/add.html"):
"""Add an addon for an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event', event):
raise Http404
OptionFormSet = modelformset_factory(
AddonOption,
formset=AddonOptionBaseModelFormSet,
form=AddonOptionForm,
extra=1)
if request.method == "POST":
form = AddonForm(request.POST)
formset = OptionFormSet(request.POST, queryset=AddonOption.objects.none(), prefix="options", auto_id='options_formset')
if False not in (form.is_valid(), formset.is_valid()):
addon = form.save(commit=False)
addon.event = event
addon.save()
options = formset.save(commit=False)
for option in options:
option.addon = addon
option.save()
EventLog.objects.log(instance=addon)
msg_string = 'Successfully added %s' % unicode(addon)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event', event.pk)
else:
form = AddonForm()
formset = OptionFormSet(queryset=Addon.objects.none(), prefix="options", auto_id='options_formset')
multi_event_forms = [formset]
return render_to_response(template_name, {
'form': form,
'event':event,
'formset': multi_event_forms,
}, context_instance=RequestContext(request))
@login_required
def edit_addon(request, event_id, addon_id, template_name="events/addons/edit.html"):
"""Edit addon for an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event', event):
raise Http404
addon = get_object_or_404(Addon, pk=addon_id)
options_set = AddonOption.objects.filter(addon=addon)
extra_form = 0
if not options_set.exists():
extra_form = 1
OptionFormSet = modelformset_factory(
AddonOption,
formset=AddonOptionBaseModelFormSet,
form=AddonOptionForm,
extra=extra_form,
can_delete=True)
if request.method == "POST":
form = AddonForm(request.POST, instance=addon)
formset = OptionFormSet(request.POST, queryset=options_set, prefix="options", auto_id='options_formset')
if False not in (form.is_valid(), formset.is_valid()):
addon = form.save()
options = formset.save(commit=False)
for option in options:
option.addon = addon
option.save()
EventLog.objects.log(instance=addon)
msg_string = 'Successfully updated %s' % unicode(addon)
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event', event.pk)
else:
form = AddonForm(instance=addon)
formset = OptionFormSet(queryset=options_set, prefix="options", auto_id='options_formset')
multi_event_forms = [formset]
return render_to_response(template_name, {
'formset':multi_event_forms,
'form':form,
'event':event,
}, context_instance=RequestContext(request))
@login_required
def disable_addon(request, event_id, addon_id):
"""disable addon for an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event', event):
raise Http404
addon = get_object_or_404(Addon, pk=addon_id)
EventLog.objects.log(instance=addon)
addon.delete() # this just renders it inactive to not cause deletion of already existing regaddons
msg_string = "Successfully disabled the %s" % addon.title
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event.list_addons', event.id)
@login_required
def enable_addon(request, event_id, addon_id):
"""enable addon for an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event', event):
raise Http404
addon = get_object_or_404(Addon, pk=addon_id)
addon.status = True
addon.save()
EventLog.objects.log(instance=addon)
msg_string = "Successfully enabled the %s" % addon.title
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event.list_addons', event.id)
@login_required
def delete_addon(request, event_id, addon_id):
"""delete an addon for an event"""
event = get_object_or_404(Event, pk=event_id)
if not has_perm(request.user,'events.change_event', event):
raise Http404
addon = get_object_or_404(Addon, pk=addon_id)
EventLog.objects.log(instance=addon)
addon.delete(from_db=True) # this just renders it inactive to not cause deletion of already existing regaddons
msg_string = "Successfully deleted the %s" % addon.title
messages.add_message(request, messages.SUCCESS, _(msg_string))
return redirect('event.list_addons', event.id)
@login_required
def create_ics(request, template_name="events/ics.html"):
"""Create ICS"""
if not request.user.is_superuser:
raise Http403
if request.method == 'POST':
form = EventICSForm(request.POST)
if form.is_valid():
ics_id = run_precreate_ics('events', 'event', form.cleaned_data['user'])
return redirect('ics.status', ics_id)
else:
form = EventICSForm()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
@is_enabled('events')
@login_required
def myevents(request, template_name='events/myevents.html'):
""" Logged-in user's registered events"""
events = Event.objects.filter(registration__registrant__email=request.user.email,
registration__registrant__cancel_dt=None).distinct()
if 'all' not in request.GET:
events = events.exclude(end_dt__lt=datetime.now())
show = 'True'
else:
show = None
events = events.order_by('-start_dt')
#types = Type.objects.all().order_by('name')
EventLog.objects.log()
return render_to_response(
template_name,
{'events': events, 'show': show},
context_instance=RequestContext(request))
@login_required
def download_template_csv(request, file_ext='.csv'):
if not request.user.profile.is_superuser:
raise Http403
if file_ext == '.csv':
filename = "import-events.csv"
else:
filename = "import-events.xls"
import_field_list = [
"type",
"title",
"description",
"all_day",
"start_dt",
"end_dt",
"timezone",
"place__name",
"place__description",
"place__address",
"place__city",
"place__state",
"place__zip",
"place__country",
"place__url",
"on_weekend",
"external_url",
]
data_row_list = []
return render_excel(filename, import_field_list, data_row_list, file_ext)
@login_required
def import_add(request, form_class=ImportForm,
template_name="events/imports/events_add.html"):
"""Event Import Step 1: Validates and saves import file"""
if not request.user.profile.is_superuser:
raise Http403
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
import_i = form.save(commit=False)
import_i.app_label = 'events'
import_i.model_name = 'event'
import_i.save()
EventLog.objects.log()
# reset the password_promt session
del request.session['password_promt']
return HttpResponseRedirect(
reverse('event.import_preview', args=[import_i.id]))
else:
form = form_class()
return render_to_response(template_name, {'form': form},
context_instance=RequestContext(request))
@login_required
def import_preview(request, import_id,
template_name="events/imports/events_preview.html"):
"""Import Step 2: Preview import result"""
if not request.user.profile.is_superuser:
raise Http403
import_i = get_object_or_404(Import, id=import_id)
event_list, invalid_list = event_import_process(import_i,
preview=True)
return render_to_response(template_name, {
'total': import_i.total_created + import_i.total_invalid,
'event_list': event_list,
'import_i': import_i,
}, context_instance=RequestContext(request))
@login_required
def import_process(request, import_id,
template_name="events/imports/events_process.html"):
"""Import Step 3: Import into database"""
if not request.user.profile.is_superuser:
raise Http403 # admin only page
import_i = get_object_or_404(Import, id=import_id)
subprocess.Popen([python_executable(), 'manage.py', 'import_events', str(import_id)])
return render_to_response(template_name, {
'total': import_i.total_created + import_i.total_invalid,
"import_i": import_i,
}, context_instance=RequestContext(request))
@is_enabled('events')
@login_required
@password_required
def export(request, template_name="events/export.html"):
"""Export Directories"""
if not request.user.profile.is_superuser:
raise Http403
form = EventExportForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
start_dt = form.cleaned_data['start_dt']
end_dt = form.cleaned_data['end_dt']
by_type = form.cleaned_data['by_type']
identifier = int(time.time())
temp_file_path = 'export/events/%s_temp.csv' % identifier
default_storage.save(temp_file_path, ContentFile(''))
process_options = [python_executable(), "manage.py", "event_export_process",
"--identifier=%s" % identifier,
"--user=%s" % request.user.id]
if by_type:
process_options.append("--type=%s" % by_type.pk)
if start_dt:
process_options.append("--start_dt=%s" % start_dt.strftime('%m/%d/%Y'))
if end_dt:
process_options.append("--end_dt=%s" % end_dt.strftime('%m/%d/%Y'))
# start the process
subprocess.Popen(process_options)
EventLog.objects.log()
return HttpResponseRedirect(reverse('event.export_status', args=[identifier]))
context = {'form': form}
return render_to_response(template_name, context, RequestContext(request))
@is_enabled('events')
@login_required
@password_required
def export_status(request, identifier, template_name="events/export_status.html"):
"""Display export status"""
if not request.user.profile.is_superuser:
raise Http403
export_path = 'export/events/%s.csv' % identifier
download_ready = False
if default_storage.exists(export_path):
download_ready = True
else:
temp_export_path = 'export/events/%s_temp.csv' % identifier
if not default_storage.exists(temp_export_path) and \
not default_storage.exists(export_path):
raise Http404
context = {'identifier': identifier,
'download_ready': download_ready}
return render_to_response(template_name, context, RequestContext(request))
@is_enabled('events')
@login_required
@password_required
def export_download(request, identifier):
"""Download the directories export."""
if not request.user.profile.is_superuser:
raise Http403
file_name = '%s.csv' % identifier
file_path = 'export/events/%s' % file_name
if not default_storage.exists(file_path):
raise Http404
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="events_export_%s"' % file_name
response.content = default_storage.open(file_path).read()
return response
@login_required
def reports_financial(request, template_name="events/financial_reports.html"):
if not request.user.profile.is_superuser:
raise Http403
events = Event.objects.all().order_by('start_dt')
form = EventReportFilterForm(request.GET or None)
if form.is_valid():
events = form.filter(queryset=events)
context = {'events' : events,
'form' : form}
return render_to_response(template_name, context, RequestContext(request))
|
[
"blake.boudreau@gmail.com"
] |
blake.boudreau@gmail.com
|
1dfaa8cf11a2d14dd19b5bf31b58f44bf15e34a0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03379/s477030145.py
|
93534519416891d22f6c4c276609f50101689a1d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
def main():
n = int(input())
x_lst = list(map(int, input().split()))
x_sorted_lst = sorted(x_lst)
median1 = x_sorted_lst[n // 2 - 1]
median2 = x_sorted_lst[n // 2]
if median1 == median2:
lst = [median1] * n
else:
lst = []
for i in range(n):
x = x_lst[i]
if x <= median1:
lst.append(median2)
elif median2 <= x:
lst.append(median1)
for i in range(n):
print(lst[i])
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
831580494562f0b3d39fe4fe401918c9104bb1e3
|
dfcf0df4fa83746f03bba57c1c64450771250e8c
|
/strava/cli/activities/commands.py
|
253dda9257fdebff79ee94d700807f88b58f4cf4
|
[
"MIT"
] |
permissive
|
dparret/strava-cli
|
30d1eed3e8f0399966dc635f5df6950c9e319607
|
2426ea7f3fe4580aea352476b261cec31d3f0b11
|
refs/heads/master
| 2023-03-30T10:42:28.932015
| 2021-03-29T19:22:50
| 2021-03-29T19:22:50
| 325,947,785
| 0
| 0
|
MIT
| 2021-03-01T11:46:31
| 2021-01-01T09:07:17
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
import click
from strava.commands import get_all_activities, get_weekly_activities
@click.group(name='activities', help='[GROUP] Get a list of recent activities.')
def cli_activities():
pass
cli_activities.add_command(get_all_activities)
cli_activities.add_command(get_weekly_activities)
|
[
"damienparret@hotmail.com"
] |
damienparret@hotmail.com
|
176d1760118ea2e983185c8849cc45ed9634f172
|
cc337c93128d061af41fc2a6959c399d927fcdd3
|
/migrations/versions/5787a70a193d_.py
|
4ebf3c682e8d3cca3bb3f969f9d7b1c68299e8e9
|
[] |
no_license
|
ackfla/fyyur-app
|
b982f3b406c81ec3e35d2e61d7c89508e1b1a34a
|
68967d27f518ca14fbecc7964f3079ac0428304f
|
refs/heads/master
| 2023-06-15T21:49:56.988926
| 2021-07-12T20:32:44
| 2021-07-12T20:32:44
| 372,621,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
"""empty message
Revision ID: 5787a70a193d
Revises:
Create Date: 2021-06-03 12:21:32.078956
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5787a70a193d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('artist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('genres', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('venue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('venue')
op.drop_table('artist')
# ### end Alembic commands ###
|
[
"lucyweath@msn.com"
] |
lucyweath@msn.com
|
736ef284dfd95e34847e321aa2fd21e8638dd397
|
4c6bbb104cde6efe6f416d215fd6783285a308df
|
/examples_py_har/examples_py/test_matplotlib_bars.py
|
a73a7623a51ceebc87b349cabc60571b849e6caf
|
[
"MIT"
] |
permissive
|
alejack9/Transport-Mode-Detection
|
c416b391f6d4bd9727732f127a2e0ab5770fa39d
|
6b0a9bcec6f437895cb88f7adcb45b37defb9be4
|
refs/heads/main
| 2023-04-19T11:07:36.715263
| 2021-05-03T22:47:28
| 2021-05-03T22:47:28
| 427,343,561
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import math
label=["2000", "2002", "2004", "2006", "2008", "2010"]
valuesX=[0,1,2,3,4,5]
valuesY=[10.3, 13.23, 15.12, 16.12, 17.13, 18.67]
plt.axis([-1,6,10,19])
plt.bar(valuesX, valuesY, 0.5)
plt.xticks(np.arange(6), label)
plt.ylabel("Current values")
plt.xlabel("Years")
plt.show()
|
[
"paolapersico95@gmail.com"
] |
paolapersico95@gmail.com
|
777643ac64cb8e81d39cfa5c89c89ab64253e30c
|
2a54f6b90df3a8e424c9fbd23767d91ce73fcbd5
|
/psd.py
|
acab23d8c74c63aca1d741aabf4019c543b3f067
|
[] |
no_license
|
khannema/contrib-snufflings
|
438978b1a1dd9a2c23071b798cd7580c9ebc17bb
|
8fcfb605b98d39fb45b46dbb4e4e1fb4cd452fae
|
refs/heads/master
| 2020-12-25T03:00:18.835817
| 2015-09-08T08:59:21
| 2015-09-08T08:59:21
| 42,101,091
| 0
| 0
| null | 2015-09-08T08:54:23
| 2015-09-08T08:54:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,683
|
py
|
from pyrocko.snuffling import Snuffling, Param, Choice, Switch
import numpy as num
from pyrocko.plot import graph_colors as colors
def to01(c):
return c[0]/255., c[1]/255., c[2]/255.
class PlotPSD(Snuffling):
'''
<html>
<body>
<h1>Plot PSD (Power Spectral Density)</h1>
Visible or selected data is cut into windows of 2 x 'Window length', tapered
with a Hanning taper, FFTed, sqared, normalized and gathered in terms of
mean or median and percentiles.
</body>
</html>
'''
def setup(self):
'''Customization of the snuffling.'''
self.set_name('Plot PSD')
self.add_parameter(Param('Window length [s]:', 'tinc', 100, 0.1, 10000., high_is_none=True))
self.add_parameter(Switch('Save figure', 'save', False))
self.add_parameter(Switch('Join stations', 'join_stations', False))
self.add_parameter(Switch('Show mean', 'mean', False))
self.add_parameter(Switch('Show logmean', 'logmean', False))
self.add_parameter(Switch('Show median', 'median', True))
self.add_parameter(Switch('Show percentiles', 'percentiles', False))
self.add_parameter(Switch('Show min and max', 'minmax', False))
self.set_live_update(False)
def call(self):
'''Main work routine of the snuffling.'''
by_nslc = {}
if self.tinc is not None:
tpad = self.tinc/2
else:
tpad = 0.0
for traces in self.chopper_selected_traces(tinc=self.tinc, tpad=tpad, want_incomplete=False, fallback=True):
for tr in traces:
nslc = tr.nslc_id
if self.tinc is not None:
nwant = int(self.tinc * 2 / tr.deltat)
if nwant != tr.data_len():
if tr.data_len() == nwant + 1:
tr.set_ydata( tr.get_ydata()[:-1] )
else:
continue
tr.ydata = tr.ydata.astype(num.float)
tr.ydata -= tr.ydata.mean()
if self.tinc is not None:
win = num.hanning(tr.data_len())
else:
win = num.ones(tr.data_len())
tr.ydata *= win
f, a = tr.spectrum(pad_to_pow2=True)
a = num.abs(a)**2
a *= tr.deltat * 2. / num.sum(win**2)
a[0] /= 2.
a[a.size/2] /= 2.
if nslc not in by_nslc:
by_nslc[nslc] = []
by_nslc[nslc].append((f,a))
if not by_nslc:
self.fail('No complete data windows could be exctracted for given selection')
fframe = self.figure_frame()
fig = fframe.gcf()
if self.join_stations:
grouping = lambda k: (k[3],)
labeling = lambda k: ' '.join( x for x in k[:-1] if x )
else:
grouping = lambda k: k
labeling = lambda k: None
group_keys = sorted(set( grouping(k) for k in by_nslc.keys() ))
p = None
ncols = len(group_keys) / 5 + 1
nrows = (len(group_keys)-1) / ncols + 1
axes = []
for i, group_key in enumerate(group_keys):
p = fig.add_subplot(nrows,ncols,i+1, sharex=p, sharey=p)
axes.append(p)
legend = False
for j, k in enumerate(sorted(by_nslc.keys())):
color = to01(colors[j%len(colors)])
color_trans1 = color + (0.5,)
color_trans2 = color + (0.25,)
group = by_nslc[k]
if grouping(k) == group_key:
a_list = [ a for (f,a) in group ]
a = num.vstack(a_list)
if self.percentiles:
p10 = num.percentile(a, 10., axis=0)
p90 = num.percentile(a, 90., axis=0)
p.fill_between(f[1:], p10[1:], p90[1:], color=color_trans1)
if self.minmax:
p0 = num.percentile(a, 0., axis=0)
p100 = num.percentile(a, 100., axis=0)
p.fill_between(f[1:], p0[1:], p100[1:], color=color_trans2)
lab = labeling(k)
if self.mean:
mean = num.mean(a, axis=0)
p.plot(f[1:],mean[1:], label=lab, color=color)
if lab:
legend = True
lab = None
if self.logmean:
logmean = num.exp(num.mean(num.log(a), axis=0))
p.plot(f[1:], logmean[1:], label=lab, color=color)
if lab:
legend = True
lab = None
if self.median:
p50 = num.median(a, axis=0)
p.plot(f[1:], p50[1:], label=lab, color=color)
if lab:
legend = True
lab = None
fmin = min( f[1] for (f,a) in group )
fmax = max( f[-1] for (f,a) in group )
if self.tinc is not None:
fmin = max( fmin, 1.0/self.tinc )
p.set_title(' '.join(group_key), ha='right', va='top', x=0.99,y=0.9)
p.grid()
p.set_xscale('log')
p.set_yscale('log')
if i/ncols == (len(group_keys)-1)/ncols:
p.set_xlabel('Frequency [Hz]')
if i % ncols == 0:
p.set_ylabel('PSD')
p.set_xlim(fmin, fmax)
if legend:
p.legend(loc='lower left', prop=dict(size=9))
for i,p in enumerate(axes):
if i/ncols != (len(group_keys)-1)/ncols:
for t in p.get_xticklabels():
t.set_visible(False)
if i % ncols != 0:
for t in p.get_yticklabels():
t.set_visible(False)
else:
tls = p.get_yticklabels()
if len(tls) > 8:
for t in tls[1::2]:
t.set_visible(False)
try:
fig.tight_layout()
except AttributeError:
pass
if self.save:
fig.savefig(self.output_filename(dir='psd.pdf'))
fig.canvas.draw()
def __snufflings__():
'''Returns a list of snufflings to be exported by this module.'''
return [ PlotPSD() ]
|
[
"sebastian.heimann@gfz-potsdam.de"
] |
sebastian.heimann@gfz-potsdam.de
|
c86cb2828df06ddc266a9731fc96708cc80b6139
|
2e2a6afb6aeddd4f73cfb4d9162820ec3cb56175
|
/utils/exceptions.py
|
948212cbfb54236488fb9aab132d1321151f1002
|
[] |
no_license
|
bingfengxindong/hat
|
dca6ea251a57c0e747933c05cc61dca1f2b56556
|
458df46c89ee2f6945225ad82cb932014750d1ac
|
refs/heads/master
| 2022-12-12T19:28:07.822885
| 2019-06-06T07:34:46
| 2019-06-06T07:34:46
| 188,367,211
| 0
| 0
| null | 2022-12-08T02:32:17
| 2019-05-24T06:42:07
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
from rest_framework.views import exception_handler as drf_exception_handler
import logging
from django.db import DatabaseError
from rest_framework.response import Response
from rest_framework import status
logger = logging.getLogger('django')
def exception_handler(exc, context):
"""
自定义异常处理
:param exc: 异常
:param context: 抛出异常的上下文
:return: Response响应对象
"""
# 调用drf框架原生的异常处理方法
response = drf_exception_handler(exc, context)
if response is None:
view = context['view']
if isinstance(exc, DatabaseError):
# 数据库异常
logger.error('[%s] %s' % (view, exc))
response = Response({'message': '服务器内部错误'}, status=status.HTTP_507_INSUFFICIENT_STORAGE)
return response
|
[
"15736950639@163.com"
] |
15736950639@163.com
|
ae45020c21d9815b4fa6c936bb2c2acf39771a1d
|
dd89440eadf6942eabe8a19d3c4757ffe8cd476c
|
/algorithms/gcd.py
|
220c58b564505aab36ccd877fe31f922289be214
|
[] |
no_license
|
raythurman2386/Notes
|
4bfcf8df599e4fe321c808d149a64e637e8a5e6f
|
b4fecb71866a3bcb362c136410bfc5c510637ce5
|
refs/heads/master
| 2023-03-15T20:38:59.968758
| 2022-03-03T16:02:35
| 2022-03-03T16:02:35
| 174,013,500
| 0
| 0
| null | 2023-03-06T00:40:39
| 2019-03-05T20:09:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
def greatest_common_denom(a, b):
while b != 0:
temp = a
a = b
b = temp % b
print(a)
return a
greatest_common_denom(20, 4)
greatest_common_denom(50, 15)
greatest_common_denom(200, 60)
greatest_common_denom(60, 96)
|
[
"raymondthurman5@gmail.com"
] |
raymondthurman5@gmail.com
|
fb45888ce05b385b1eed66c4818952c494f6e702
|
ca93da6ec6bed83d1bf7bfc17c0ea8524e2f2032
|
/twitch.py
|
b5e88089af127b683a8c7bd7117e580c66c034d6
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pa-pyrus/ircCommander
|
1fa570a46c6ff75f2e804977669d157dc1ac4b2f
|
4b1121957dd7feaf3e416de718f1ef0fdc0c52d6
|
refs/heads/master
| 2016-09-05T23:14:18.429977
| 2015-08-30T07:42:03
| 2015-08-30T07:42:03
| 30,403,614
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
# vim:fileencoding=utf-8:ts=8:et:sw=4:sts=4:tw=79
from binascii import crc32
from json import loads
from twisted.internet.defer import Deferred
from twisted.python import log
from twisted.web.client import getPage
TWITCH_URL = "https://api.twitch.tv/kraken/streams?game=Planetary+Annihilation"
class TwitchParser(object):
"""
Parser for the Twitch.tv web API.
Reads a Twitch.tv web API URL asynchronously and parses the JSON output.
Provides deferred functions that can be called from other Twisted
applications.
"""
def __init__(self):
"""Initialize Twitch parser members."""
log.msg("Initializing Twitch parser.")
# initialize our data members
self.streams = tuple()
self.crc32 = 0
def startUpdate(self):
"""
Initiate an update using Twisted.
The request is handled asynchronously. It will call onUpdate if it's
successful and onError otherwise.
"""
log.msg("Updating URL contents for: {0}".format(TWITCH_URL))
deferred = getPage(TWITCH_URL)
deferred.addCallbacks(self.onUpdate, self.onError)
return deferred
def onUpdate(self, value):
"""Value callback for retrieving Twitch API data."""
# compare checksum to avoid work
new_crc = crc32(value)
if self.crc32 == new_crc:
log.msg("CRC32 hasn't changed, not parsing data.")
return self.streams
self.crc32 = new_crc
data = loads(value, encoding="utf-8")
streams = tuple({"name": stream["channel"]["display_name"],
"desc": stream["channel"]["status"],
"url": stream["channel"]["url"],
"viewers": stream["viewers"]}
for stream in data["streams"])
self.streams = sorted(streams,
key=lambda x: x["viewers"],
reverse=True)
log.msg("Received and parsed new data: {0}".format(self.streams))
return self.streams
def onError(self, error):
"""Error callback for retrieving Twitch API data."""
log.err("Encountered an error: {0}".format(
error.getErrorMessage()))
return error
def live(self):
"""Start an update and return a deferred containing the results."""
updateDeferred = self.startUpdate()
newDeferred = Deferred()
updateDeferred.addCallbacks(newDeferred.callback, newDeferred.errback)
return newDeferred
|
[
"pa-pyrus@users.noreply.github.com"
] |
pa-pyrus@users.noreply.github.com
|
eecb609da64a18c488569976c547696dcfe2242f
|
a294a151e49df7ef3962c1953f0bf26e315403f6
|
/Switch/NewHorizons/msbt_reader.py
|
e7b5f5d76d51de4b9dff35ba8bb88d3356e04139
|
[] |
no_license
|
NWPlayer123/Resources
|
6419f0c0a07bdbe5fc2f775911ad654685447b9b
|
8834c780abfb7c3bfe510291d3404689af5dd7a1
|
refs/heads/master
| 2021-11-24T20:36:29.904369
| 2021-11-06T21:02:29
| 2021-11-06T21:02:29
| 102,266,164
| 23
| 4
| null | 2019-10-04T19:51:17
| 2017-09-03T13:22:59
|
C
|
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
#Very terrible msbt reader, only has the Aminal sections, just for
#scraping data/string names
#comment out a line in TXT2 if you want the raw strings returned
#comment out a line in read_msbt if you want the raw label groups, not an array
from struct import pack, unpack
from io import BytesIO
from binascii import hexlify, unhexlify
def full(f, endian):
return unpack("%sI" % endian, f.read(4))[0]
def half(f, endian):
return unpack("%sH" % endian, f.read(2))[0]
class MSBT:
def __init__(self):
self.endian = b">"
#pass in file handle or BytesIO, strips/handles the dumb control codes
#doesn't use for i cuz reading multiple bytes for the control codes
def clean_str(self, f):
f.seek(0, 2)
full_size = f.tell()
f.seek(0, 0)
ret = b""; char = half(f, self.endian)
while f.tell() < full_size:
if char == 0x000E: #control code magic
command = half(f, self.endian)
command2 = half(f, self.endian)
size = half(f, self.endian)
if command2 == 0x16: #??? for Russian strings
f.read(2) #skip whatever this is
declension = half(f, self.endian)
if declension != 2: #???
ret += pack("%sH" % self.endian, declension)
f.read(size - 4)
else:
data = f.read(size)
else:
ret += pack("%sH" % self.endian, char)
char = half(f, self.endian)
return ret
def LBL1(self, f, start):
count = full(f, self.endian)
table = [unpack("%s2I" % self.endian, f.read(8)) for i in range(count)]
entries = []
for entry in table:
group = []
f.seek(entry[1] + start) #num_labels, section_offset
for i in range(entry[0]):
size = ord(f.read(1))
name = f.read(size)
value = full(f, self.endian)
group.append([name, value])
entries.append(group)
return entries
def TXT2(self, f, start, end):
count = full(f, self.endian)
table = [unpack("%sI" % self.endian, f.read(4))[0] for i in range(count)]
table.append(end) # plus one so we don't OOB
strings = []
for i in range(count):
f.seek(start + table[i]) #relative->absolute
string = f.read(table[i+1] - table[i])
string = self.clean_str(BytesIO(string)) #comment out for no filter
strings.append([string.decode("UTF-16-LE"), i])
return strings
def read_msbt(self, name):
with open(name, "rb") as f:
assert f.read(8) == b"MsgStdBn" #magic
if unpack(">H", f.read(2))[0] == 0xFFFE: self.endian = "<"
assert unpack(">I", f.read(4))[0] == 0x00000103
num_sections = half(f, self.endian)
assert half(f, self.endian) == 0
full_size = full(f, self.endian)
f.seek(0, 2)
assert f.tell() == full_size #make sure file not truncated
f.seek(0x16)
assert f.read(10) == b"\x00" * 10
for i in range(num_sections): #for each section, parse and then stitch after
header = unpack("%s4sI8x" % self.endian, f.read(16))
start = f.tell()
if header[0] == b"LBL1": #labels
labels = self.LBL1(f, start)
if header[0] == b"ATR1": #attributes
pass
if header[0] == b"TXT2": #text data
text = self.TXT2(f, start, header[1])
f.seek(start + header[1])
if f.tell() % 16: #align to next section
f.seek(16 - (f.tell() % 16), 1)
labels2 = [] #process raw labels, 3D groups -> 2D array
for entry in labels:
for entry2 in entry:
labels2.append(entry2)
labels2.sort(key=lambda x: x[1]) #sort by text index
return labels2, text #returns raw unsorted data, have to parse
|
[
"NWPlayer123@users.noreply.github.com"
] |
NWPlayer123@users.noreply.github.com
|
41e932859262ecd130fa82758da732679b561e81
|
b73482ee0df7b4e8514c21f2f98b99d98ff8013d
|
/3 workshop/try_service_sum.py
|
5cf274db2428c4af86bb9dfa415770020d58f05a
|
[] |
no_license
|
JjVera96/Client-Server
|
167e006b6d4b18c2753768ced3bccf6beabf2103
|
66fdd96b09aa676edba15e9d9a97fa20453f521f
|
refs/heads/master
| 2020-03-25T05:00:20.043584
| 2018-11-24T17:29:13
| 2018-11-24T17:29:13
| 143,424,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
import socket
HOST = '192.168.8.229'
PORT = 5500
s_recv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Listening to ip {} in port {}'.format(HOST, PORT)
s_recv.bind((HOST, PORT))
s_recv.listen(1)
conn, addr = s_recv.accept()
print 'Connected by', addr
conn.send('Ok')
data = conn.recv(1024)
print(data)
while True:
op1 = raw_input('First Operator: ')
op2 = raw_input('Second Operator: ')
try:
op1 = int(op1)
op2 = int(op2)
except ValueError:
print "Warning: You must enter integers numbers."
op1 = None
op2 = None
if op1 is not None:
conn.send('+,{},{}'.format(op1, op2))
data = conn.recv(1024)
print('Result: {}'.format(data))
conn.close()
|
[
"utp@utp"
] |
utp@utp
|
ad0949a54642f8e8a5cb3dca5d419b9568432a6d
|
e6ac0a3c010fe343f4e3228a3fcb7202cdb19a1e
|
/enron_mail/tools/parse_out_email_text.py
|
a6632fcd63c4571e959ebcd3044c3f85ec1f5806
|
[] |
no_license
|
LinzzMichael/machine-Learning
|
c0b32775b0abd1f2d0b5229b2918f6133f908fe4
|
b88fb70318be9b3036492fabafe8749ca070a34f
|
refs/heads/master
| 2020-03-31T12:05:26.751012
| 2018-10-15T08:07:38
| 2018-10-15T08:07:38
| 152,202,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
#!/usr/bin/python
from nltk.stem.snowball import SnowballStemmer
import string
def parseOutText(f):
""" given an opened email file f, parse out all text below the
metadata block at the top
(in Part 2, you will also add stemming capabilities)
and return a string that contains all the words
in the email (space-separated)
example use case:
f = open("email_file_name.txt", "r")
text = parseOutText(f)
"""
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
### split off metadata
content = all_text.split("X-FileName:")
words = ""
if len(content) > 1:
### remove punctuation
text_string = content[1].translate(string.maketrans("", ""), string.punctuation)
### project part 2: comment out the line below
words = text_string
stemmer = SnowballStemmer("english")
temp = words.split()
words = ""
for word in temp:
word = stemmer.stem(word)
# print(word)
words = words + word + " "
### split the text string into individual words, stem each word,
### and append the stemmed word to words (make sure there's a single
### space between each stemmed word)
return words
def main():
ff = open("../text_learning/test_email.txt", "r")
text = parseOutText(ff)
print text
if __name__ == '__main__':
main()
|
[
"1059548222@qq.com"
] |
1059548222@qq.com
|
90a9aef2b70d49e4f51075425d4970640c8bbdee
|
c3aaa011108ed031e8369611ed6658c85fb596fe
|
/mainBot.py
|
209da3213dbfefeb511dae1a1c359b1e399b3f16
|
[] |
no_license
|
MBObaaqir/LanguagePoliceBot
|
bf4fcf3724e83ea0f8499429909a67f3daba16bd
|
8d8c6e8ef2cb2c7935aaa5bbaaf8b1817af35d74
|
refs/heads/main
| 2023-06-05T17:02:57.298460
| 2021-06-29T15:18:39
| 2021-06-29T15:18:39
| 381,026,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
from telegram.ext import Updater
import logging
import helpers
from telegram.ext import MessageHandler, Filters
import environment
polling_handler = MessageHandler(Filters.text & (~Filters.command), helpers.polling_threading)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
updater = Updater(token=environment.bot_id, use_context=True)
updater.start_polling()
dispatcher = updater.dispatcher
dispatcher.add_handler(polling_handler)
|
[
"baaqir@hotmail.com"
] |
baaqir@hotmail.com
|
54dfda9b7aa9d7fa1d61f506b4266155a7266e1a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/294/80466/submittedfiles/testes.py
|
9d7f6042a7d4b71cbfecc32848c07dbb8ef2064f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
nome= str(input('Qual o seu nome? '))
print('Olá' +nome+ 'seja bem vinda!')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5ffb982e62eb751b952318b60fb800f712713ca9
|
f3f732881b813dd5d6e1239618f5d4d6bb394db7
|
/160.intersectionLinkedList.py
|
6fc29a7e0f2285d1531677f1a175803bb8ec1e0d
|
[] |
no_license
|
umnstao/leetcodeOJ
|
45917528abb693fa23678356497267e4ce571a4f
|
f7cb7cfa6e1f04efd741c2456ad930db48101573
|
refs/heads/master
| 2021-01-21T14:57:22.257064
| 2017-11-22T22:57:48
| 2017-11-22T22:57:48
| 95,362,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
lenA = 0
lenB = 0
curA = headA
while curA:
curA = curA.next
lenA += 1
curB = headB
while curB:
curB = curB.next
lenB += 1
curA = headA
curB = headB
if lenA > lenB:
k = lenA - lenB
while k > 0:
curA = curA.next
k = k - 1
elif lenA < lenB:
k = lenB - lenA
while k > 0:
curB = curB.next
k = k - 1
while curA:
if curA == curB:
return curA
curA = curA.next
curB = curB.next
return None
|
[
"umnstao@gmail.com"
] |
umnstao@gmail.com
|
2c5ad861fcce9b4203e5ff1c9b6fbdabf1e10047
|
7c0ac74b1215a5e53698924b69b89221fec0cfd6
|
/torch_geometric/utils/matmul.py
|
8e8f4bb6aaea1430f1a94da1c9239473e4b18be1
|
[
"MIT"
] |
permissive
|
ZackPashkin/pytorch_geometric
|
b30c1a220f3f5f593ec4ac12b696f2cac1ae4e0a
|
3663a96c8e649af46c29a32beb03f49cc97f5b86
|
refs/heads/master
| 2020-03-20T09:33:51.497347
| 2019-03-19T05:07:30
| 2019-03-19T05:07:30
| 137,341,025
| 0
| 0
| null | 2018-06-14T10:05:30
| 2018-06-14T10:05:29
| null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from torch_scatter import scatter_add
def matmul(index, value, tensor):
tensor = tensor if tensor.dim() > 1 else tensor.unsqueeze(-1)
assert (value is None or value.dim() == 1) and tensor.dim() == 2
row, col = index
out_col = tensor[col]
out_col = out_col if value is None else out_col * value.unsqueeze(-1)
out = scatter_add(out_col, row, dim=0, dim_size=tensor.size(0))
return out
|
[
"matthias.fey@tu-dortmund.de"
] |
matthias.fey@tu-dortmund.de
|
fb05e86a7d9d3905fe1578338c2c578881d1bbd9
|
139654b5277164f10e1c67cd03ac2131e8101ffb
|
/eventex/settings.py
|
036383c079b56720f8bcf6b1b727b94e06f71d6a
|
[] |
no_license
|
WSDSilva/eventex
|
b2267875a7a507301515db643e761cc4715a99ca
|
97340e9ca5456bbfcb97837f4e851c341afdd7f6
|
refs/heads/master
| 2020-12-25T14:39:20.545607
| 2016-11-17T19:43:20
| 2016-11-17T19:43:20
| 66,491,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,753
|
py
|
"""
Django settings for eventex project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())
DEFAULT_FROM_EMAIL = 'contato@eventex.com'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_without_migrations',
'django_extensions',
'eventex.core',
'eventex.subscriptions.apps.SubscriptionConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eventex.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eventex.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
default_dburl = 'sqlite:///'+ os.path.join(BASE_DIR,'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl),
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
#Email configuration
EMAIL_BACKEND = config('EMAIL_BACKEND')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT', cast=int)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
|
[
"wandersonduarte.br@gmail.com"
] |
wandersonduarte.br@gmail.com
|
b8ff4a45267c078994fe4c3210e79026135f1622
|
4dc5fcc1277e2f4d4cc58a88a26873a2f41711d4
|
/Test/re_complie/re_test.py
|
2759f5427b025195ced8055bdf75b54e9585834f
|
[] |
no_license
|
pqGC/Python_Note
|
9db573feacb7eddc8fbd9cf5182e60e775429f18
|
59d2dd5550e07a707ba4582edebb891f1c262485
|
refs/heads/master
| 2020-04-02T12:57:13.592077
| 2018-10-24T08:05:19
| 2018-10-24T08:05:19
| 154,459,818
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
import re
# """(?:\"?|\')(?:https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|](?:\"?|\')"""
# pattern = "document\.write\(.<iframe.*?src=\s*(?:\"?|\')(?:https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|](?:\"?|\').*?>"
# content = """document.write("<iframe width="100" height="100" src="http://114.55.36.222/LittleHann/LittleHann.html"></iframe>")"""
# pattern2 = "<div\s*?id=.?\w{1,20}?.?>(\s*.*?)</div>\s*?<script>document\.getElementById\(.*?\)\.style\.display=.?none.?[;]?</script>"
# content2 = """<div id="aa" style="height:500"><a href="www.test.com"></a></div>/<script>document.getElementById("aa").style.display="none";</script>"""
# search_result = re.compile(pattern2, re.I | re.M | re.S)
# result = search_result.findall(content2)
# print(result)
#
# pattern_url = "(?:https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]"
# search_url = re.compile(pattern_url, re.I | re.M | re.S)
# result2 = search_url.findall(content2)
# print(result2)
# """<script.{0,60}>\s*window.location.href=.*?;</script>"""
# pattern_item = "<div[\S\s]*style=.{0,1}position\s*:\s*absolute.*(?:top|left|right)(?:.|):(?:.|)-[6-9][\d]{2,3}px\s*.*?>.*?</div>"
# search_item = re.compile(pattern_item, re.I | re.M | re.S)
# result3 = search_item.findall(content2)
# print(result3)
# pattern_item = "(127\.0\.0\.1)|(localhost)|(10\.\d{1,3}\.\d{1,3}\.\d{1,3})|(172\.((1[6-9])|(2\d)|(3[01]))\.\d{1,3}\.\d{1,3})|(192\.168\.\d{1,3}\.\d{1,3})"
pattern_item = "<marquee.{1,20}height=[0-9].{1,20}width=[0-9][^>]*?>[\S\s]{0,150}</marquee>"
content3 = """
<marquee height=1 width=5 scrollamount=3000 scrolldelay=20000><a href=http://thief.one >暗链</a></marquee>
"""
# content = """window.setTimeout("window.location='"+wz+"'",1000); """
search_item = re.compile(pattern_item, re.I | re.M | re.S)
result4 = search_item.findall(content3)
# strIP = ''
# for single_tuple in result4:
# for ip in single_tuple:
# strIP = strIP + ip
# if len(strIP) == 0:
# print('空')
# print(strIP)
print(result4)
|
[
"pq@anfou.cn"
] |
pq@anfou.cn
|
36be545091b5a5b71812ae79c709ef9bda995fc8
|
6865f26fbbd6d100454712b27deac1c1338fcdb9
|
/ec_sri/objects/product.py
|
9db9c8409b6167b3b22d8037fff143d55b5496e5
|
[] |
no_license
|
syscod/addons_account
|
9978408815d6a51130758b5a90d763e1a2d333f2
|
073378c7ff62b3c348c25dbea179c2936ee62c53
|
refs/heads/master
| 2021-01-10T19:11:27.186835
| 2013-07-24T18:45:11
| 2013-07-24T18:45:11
| 10,797,382
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,126
|
py
|
# -*- coding: UTF-8 -*- #
#########################################################################
# Copyright (C) 2011 Christopher Ormaza, Ecuadorenlinea.net #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#########################################################################
import netsvc
from osv import osv
from osv import fields
from tools.translate import _
from lxml import etree
import time
import psycopg2
import re
from lxml import etree
import addons.decimal_precision as dp
class account_ice_type(osv.osv):
_name = 'account.ice.type'
_columns = {
'code':fields.char('Code', size=5, required=True, ),
'name':fields.char('Name', size=255, required=True, ),
'rate': fields.float('Rate', digits=(16, 2), required=True,),
}
account_ice_type()
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
'sri_code':fields.char('SRI CODE', size=255),
'ice_product':fields.boolean('ICE Product?', ),
'ice_type_id':fields.many2one('account.ice.type', 'ICE Type', required=False),
}
product_product()
|
[
"edtegomez@gmail.com"
] |
edtegomez@gmail.com
|
e3927f2bbe1447e57c5f9862e6bdbbed472c3f4d
|
ef72a7df3c39c215dd90ac5e72b164eb9d7da892
|
/rpg/monsters/imp.py
|
b23f023017e8acdf3b1f1eebfbf51bb93c44f41b
|
[] |
no_license
|
thebmo/messing_around
|
d49a87fc1ff722428ea67bc710ca99ad287098bd
|
4cb12e0b224cf7d1f93cb4ae6ff7603619fb7aa9
|
refs/heads/master
| 2021-01-13T02:18:50.799898
| 2015-04-08T01:12:41
| 2015-04-08T01:12:41
| 28,570,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
"""
Imp monster sub-class of parents Monster and NPC.
"""
from monster import Monster
from npc import NPC
class Imp(Monster, NPC):
NAME = 'Imp'
STATS = {
'STR': 5,
'AGI': 3,
'INT': 1,
'CHA': 0,
'LCK': 0,
'max_hp': 6,
'max_ap': 1,
'level': 2,
'exp': 6,
'gold': 8,
}
|
[
"bmosier@gmail.com"
] |
bmosier@gmail.com
|
35df46f2b718dfffc5c21fdb1f400f09aca891c5
|
144941dcc3e89f1f400054803b65c2ec5f267f77
|
/my_project_web_v7/data/users.py
|
43aaa96cd01743b00ac14d06cfa4b19fd043c55b
|
[] |
no_license
|
SZel393/cows_and_bulls
|
518c214ebfc0637a7927add57ab7eec1c3943964
|
201d236a9ff744575154f88e8e434d13b0d426f4
|
refs/heads/main
| 2023-04-07T11:16:14.493398
| 2021-04-21T10:49:42
| 2021-04-21T10:49:42
| 359,844,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
import datetime
import sqlalchemy
from .db_session import SqlAlchemyBase
from sqlalchemy import orm
from flask_login import UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
class User(SqlAlchemyBase, UserMixin):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
surname = sqlalchemy.Column(sqlalchemy.String, nullable=True)
name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
level = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
status = sqlalchemy.Column(sqlalchemy.String, nullable=True)
email = sqlalchemy.Column(sqlalchemy.String,
index=True, unique=True, nullable=True)
hashed_password = sqlalchemy.Column(sqlalchemy.String, nullable=True)
reg_date = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.now)
record = orm.relation("Record", back_populates='user')
def __repr__(self):
return '<Игрок> {} {} {}'.format(self.id, self.surname, self.name)
def set_password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.hashed_password, password)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7684611bf1669d9ca27f8b1ac9469806c3fcf51f
|
0638fb87d24591f57f9a0c08da3e7c3a825a5443
|
/env/bin/easy_install-3.4
|
b8f5a91e2d5558719778711ae0845bf321796b09
|
[] |
no_license
|
lscalpati/hackathon-houston
|
2e168635b7355b00fd11aed1073b1b016776436f
|
b5e971be8ecbde9bf39cf8819ebaef2996a77839
|
refs/heads/master
| 2020-06-12T19:27:37.415309
| 2016-12-06T21:11:21
| 2016-12-06T21:11:21
| 75,765,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
4
|
#!/home/louscalpati/devel/appengine-sample/getting-started-python/6-pubsub/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"louscalpati@google.com"
] |
louscalpati@google.com
|
58387329bb15b94260f2528c77fccfb21cdb8190
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/SqbyWYwqChQroXfhu_23.py
|
85771451090fc8484ee47a515d24c515837f537c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
"""
This challenge concerns _square matrices_ (same number of rows and columns) as
the below example illustrates:
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
The entries in the diagonal line from the top left to the bottom right form
the _main diagonal_ of the matrix. In this case, 1,5,9 form the main diagonal.
Write a function that returns the matrix obtained by replacing the entries
_above_ the main diagonal with 0s.
For example, for the matrix above you should return:
[
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
### Examples
lower_triang([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]) ➞ [
[1, 0, 0],
[4, 5, 0],
[7, 8, 9]
]
lower_triang([
[5, 7],
[7, 9]
]) ➞ [
[5, 0],
[7, 9]
]
lower_triang([
[1, 8, 8, 1],
[2, 7, 7, 2],
[3, 6, 6, 3],
[4, 5, 5, 4]
]) ➞ [
[1, 0, 0, 0],
[2, 7, 0, 0],
[3, 6, 6, 0],
[4, 5, 5, 4]
]
### Notes
* As in the examples, the size of the matrices will vary (but they will always be square).
* In Linear Algebra, matrices with 0s above the diagonal are called _lower triangular matrices_.
"""
def lower_triang(arr):
for i in range(len(arr)):
for j in range(len(arr[i])):
if j < i:
arr[j][i] = 0
return arr
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
303cb5ff1bf3c1ad69ba4c41f7e30fbfd19991b6
|
d775ed27e3205eb028c13ceb45289a64239ea69a
|
/tests/auth_test.py
|
181d026214be1bd921864491ca1010f37613cae5
|
[
"Apache-2.0"
] |
permissive
|
LibertyGlobal/sequoia-python-client-sdk
|
8cd50db9bdc8ed3645a25fa7e6b298bce0a181fd
|
0468ca4fa20a61bb840be39c6c68a59a83f686cd
|
refs/heads/master
| 2021-03-04T05:19:34.772668
| 2020-02-10T14:01:58
| 2020-02-10T14:01:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
import unittest
from hamcrest import assert_that, is_
from sequoia.auth import TokenCache, ClientGrantAuth
class TestClientGrantAuth(unittest.TestCase):
def setUp(self):
TokenCache._token_storage = {}
def test_given_token_is_not_provided_and_it_is_not_in_cache_then_token_is_none(self):
auth = ClientGrantAuth('user', 'pass', 'http://identity')
assert_that(auth.token, is_(None))
def test_given_token_is_provided_then_that_token_is_used_and_added_to_cache(self):
auth = ClientGrantAuth('user', 'pass', 'http://identity', '1234')
assert_that(auth.token, is_({'token_type': 'bearer', 'access_token': '1234'}))
assert_that(TokenCache._token_storage,
is_({'user': {'http://identity': {'token_type': 'bearer', 'access_token': '1234'}}}))
def test_given_token_is_not_provided_and_there_is_a_token_in_cache_then_that_token_is_used(self):
TokenCache().add_token('user', 'http://identity', {'token_type': 'bearer', 'access_token': '567'})
auth = ClientGrantAuth('user', 'pass', 'http://identity')
assert_that(auth.token, is_({'token_type': 'bearer', 'access_token': '567'}))
def test_given_token_is_not_provided_and_it_is_not_in_cache_then_token_is_fetched_and_added_to_cache(self):
class MockSession:
def __init__(self):
self.token = {'token_type': 'bearer', 'access_token': '789'}
def fetch_token(self, *args, **kwargs):
pass
auth = ClientGrantAuth('user', 'pass', 'http://identity')
auth.session = MockSession()
auth.init_session()
assert_that(TokenCache._token_storage,
is_({'user': {'http://identity': {'token_type': 'bearer', 'access_token': '789'}}}))
class TestTokenCache(unittest.TestCase):
def setUp(self):
TokenCache._token_storage = {}
def test_given_a_token_it_is_added_to_cache(self):
assert_that(TokenCache._token_storage, is_({}))
TokenCache().add_token('user-1', 'url1', '123')
TokenCache().add_token('user-1', 'url2', '456')
TokenCache().add_token('user-2', 'url1', '789')
assert_that(TokenCache._token_storage,
is_({'user-1': {'url1': '123', 'url2': '456'}, 'user-2': {'url1': '789'}}))
assert_that(TokenCache().get_token('user-1', 'url1'), is_('123'))
assert_that(TokenCache().get_token('user-1', 'url2'), is_('456'))
assert_that(TokenCache().get_token('user-1', 'url3'), is_(None))
assert_that(TokenCache().get_token('user-2', 'url1'), is_('789'))
assert_that(TokenCache().get_token('user-3', 'url1'), is_(None))
TokenCache._token_storage = {}
|
[
"marco.romero.andujar@gmail.com"
] |
marco.romero.andujar@gmail.com
|
b38db9e200c0c60b469d324e5e0dc94473edecc6
|
4b1035e4b55d6833efd07d74958a28adfe00536a
|
/NewsCrawler/RealRules.py
|
61f9e730e61834f2444c195032e78f2d029f65e9
|
[] |
no_license
|
bigheiniu/MM-COVID
|
803f6e34a7f5156febf0a5ddb03e8fde19f0ec2f
|
316511a3c0e4e484cc94e48bf45bc7ae3ac42adb
|
refs/heads/main
| 2023-03-23T15:41:00.000218
| 2021-03-20T16:19:02
| 2021-03-20T16:19:02
| 311,139,384
| 30
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46,516
|
py
|
import os
from util.Util import chunkify, multiprocess_function, get_selenium_driver
import logging
import configparser
import time
from bs4 import BeautifulSoup
import traceback
import requests
key_words_list = ['sars-cov-2', 'covid-19', "coronavirus", "covid"]
def ReliableCrawler(name, page_count, db):
if name == "cdc":
main_url = "https://www.cdc.gov/media/archives.html?Sort=Article%20Date%3A%3Adesc&Page={}"
crawl_func = page_in_cdc
elif name == "who":
main_url = "https://www.who.int/news-room/releases/{}"
crawl_func = page_in_who
elif name == "nih":
main_url = "https://search.nih.gov/search/docs?affiliate=nih&dc=565&page={}&query=covid-19&utf8=%E2%9C%93"
crawl_func = page_in_nih
elif name == "webmd":
main_url = "https://www.webmd.com/search/search_results/default.aspx?query=covid19&page={}"
crawl_func = page_in_webMD
elif name == "smithsonianmag":
main_url = "https://www.smithsonianmag.com/search/?q=covid-19&page={}"
crawl_func = page_in_smithsonianmag
elif name == "science_daily":
main_url = "https://www.sciencedaily.com/search/?keyword=covid19#gsc.tab=0&gsc.q=covid%2019%20site%3Awww.sciencedaily.com&gsc.sort=&gsc.page={}"
crawl_func = page_in_science_daily
elif name == "healthline":
main_url = "https://www.healthline.com/health-news?ref=global"
crawl_func = page_in_healthline
elif name == "ecdc":
main_url = "./crawled_data/ecdc.html"
crawl_func = page_in_ecdc
elif name == "mnt":
main_url = "https://www.medicalnewstoday.com/coronavirus"
crawl_func = page_in_MNT
elif name == "mayo_clinic":
main_url = "https://www.mayoclinic.org/diseases-conditions/coronavirus/symptoms-causes/syc-20479963"
crawl_func = page_in_mayo_clinic
elif name == "celeveland":
main_url = "https://newsroom.clevelandclinic.org/category/news-releases/page/{}/"
crawl_func = page_in_cleveland_clink
elif name == "snopes":
main_url = "https://www.snopes.com/news/page/{}"
crawl_func = page_in_snopes
elif name == "politico":
main_url = "https://www.politico.com/search/{}?q=covid19"
crawl_func = page_in_politico
elif name == "dn":
main_url = "{}"
crawl_func = page_in_dn
elif name == "publico":
main_url = "{}"
crawl_func = page_in_publico
elif name == "afp":
main_url = "https://www.afp.com/fr/search/results/covid-19?page={}&f[0]=im_field_tags%3A74"
crawl_func = page_in_afp
elif name == "elpais":
main_url = "https://elpais.com/noticias/covid-19/{}/"
crawl_func = page_in_elpais
elif name == "abces":
main_url = "https://www.abc.es/hemeroteca/resultados-busqueda-avanzada/todo/pagina-{}?tod=covid&nin=19"
crawl_func = page_in_abces
elif name == "animalpolitico":
main_url = "{}"
crawl_func = page_in_animalpolitico
elif name == "lemonde":
main_url = "https://www.lemonde.fr/recherche/?search_keywords=covid-19&start_at=03/01/2020&end_at=26/07/2020&search_sort=relevance_desc&page={}"
crawl_func = page_in_lemonde
elif name == "jn":
main_url = "{}"
crawl_func = page_in_jn
elif name == "publico":
main_url = ""
crawl_func = page_in_publico
elif name == "milenio":
main_url = "https://www.efe.com/efe/espana/busqueda/50000538?q=covid-19&p={}&s=0"
crawl_func = page_in_milenio
else:
raise NotImplementedError
# TODO: Automatically extract the page number
if page_count > 0:
all_pages = list(range(1, page_count + 1, 1))
else:
all_pages = [-1]
num_process = os.cpu_count() - 3
all_pages_chunkify = chunkify(all_pages, num_process)
multiprocess_function(num_process, function_ref=fetch_save_collection,
args=(all_pages_chunkify, main_url,
db, crawl_func))
def fetch_save_collection(idx, all_pages_chunkify, main_url,
db, crawl_func):
idx_pages = all_pages_chunkify[idx]
return_list = []
news_collection = db[Constants.NEWS_COLLECTION]
driver = get_selenium_driver()
for no_page in idx_pages:
if no_page == -1:
url = main_url
else:
url = main_url.format(no_page)
try:
return_element = crawl_func(driver, url)
except:
break
print("Length for Page {} is {}".format(no_page, len(return_element['info_list'])))
if len(list(return_element.values())[0]) == 0:
continue
else:
if return_element is None:
continue
return_element_list = [i if "url" in i.keys() else i.update({'url': j}) for i, j in
zip(return_element['info_list'], return_element['fact_url_list'])]
for i in return_element_list:
try:
i['ref_source'] = crawl_link_article(i['url'])
i['ref_source']['ref_source_url'] = i['url']
news_collection.find_one_and_update({'id':i['id']},{"$set":i}, upsert=True)
except:
continue
driver.close()
news_collection = db[Constants.NEWS_COLLECTION]
for i in return_list:
if db.find_one({"id": i['id']}, {"id": 1}) is None:
news_collection.update({'id': i['id']}, {'$set': i}, upsert=True)
article_detail = crawl_link_article(i['url'])
news_collection.find_one_and_update({'id': i['id']}, {'$set': article_detail.update({"agency": i['agency']})}, upsert=True)
logging.info(f"Success finish {i['id']}.")
def page_in_who(driver, url):
info_list = []
fact_url_list = []
try:
driver.get(url)
documents = driver.find_elements_by_xpath("//div[@class='list-view vertical-list vertical-list--image']"
"//div[@class='list-view--item vertical-list-item']")
for doc in documents:
try:
url = doc.find_element_by_xpath(".//a[@class='link-container table']").get_attribute("href")
if "https://www.who.int" not in url:
url = "https://www.who.int" + url
date = doc.find_element_by_xpath(".//div[@class='date']/span").text
news_title = doc.find_element_by_xpath(".//p[@class='heading text-underline']").text
if filter(news_title):
info_list.append({"agency": "who",
"time_and_loc": date,
"label": 'True',
"claim": news_title,
'lang': "en"
})
fact_url_list.append(url)
except:
continue
except:
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_cdc(driver, url):
# https://www.cdc.gov/media/archives.html?Sort=Article%20Date%3A%3Adesc&Page=2
info_list = []
fact_url_list = []
try:
driver.get(url)
urls = driver.find_elements_by_xpath("//div[@class='col-md-9']")
for url in urls:
url_link = url.find_element_by_xpath(
".//div[@class='card-title h4 text-left mb-1 mt-3 mt-md-0']//a").get_attribute("href")
if "https://www.cdc.gov" not in url_link:
url_link = "https://www.cdc.gov" + url_link
title = url.find_element_by_xpath(
".//div[@class='card-title h4 text-left mb-1 mt-3 mt-md-0']//a/span/span").text
try:
date = url.find_element_by_xpath(".//span[text()[contains(., 'Article Date')]]//following::span").text
except:
date = "NONE"
if filter(title):
info_list.append({
"agency": "cdc",
"time_and_loc": date,
"label": 'True',
"title": title,
'lang': "en"
})
fact_url_list.append(url_link)
except Exception as e:
traceback.print_tb(e.__traceback__)
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_webMD(driver, url):
info_list = []
fact_url_list = []
# https://www.webmd.com/search/search_results/default.aspx?query=covid19&page=2
# potential_url_list = []
try:
driver.get(url)
# documents = driver.find_elements_by_xpath("//div[@class='results-container']//p[@class='search-results-doc-title']")
th = driver.find_elements_by_xpath("//p[@class='search-results-doc-title']")
for doc in th:
try:
html = doc.get_attribute("innerHTML")
html = BeautifulSoup(html)
url = html.find('a').get("href")
news_title = html.find("a").get_text()
# url = doc.find_element_by_xpath("//a").get_attribute("href")
# news_title = doc.find_element_by_xpath("//a").text
if "?" in news_title or "video" in news_title.lower():
continue
if filter(news_title):
info_list.append({"agency": "webMD",
"time_and_loc": "NONE",
"label": 'True',
"claim": news_title,
'lang': "en"
})
fact_url_list.append(url)
except Exception as e:
print(str(e))
continue
except Exception as e:
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_nih(driver, url):
info_list = []
fact_url_list = []
try:
driver.get(url)
documents = driver.find_elements_by_xpath("//div[@id='results']//div[@class='content-block-item result']")
# documents = driver.find_elements_by_xpath("//div[@id='results']//div[@class='content-block-item result']//h4[@class='title']")
# //h4[@class='title']
for doc in documents:
try:
# thi = doc.find_element_by_xpath("//h4[@class='title']").text
url = doc.find_element_by_xpath(".//h4[@class='title']//a").get_attribute("href")
news_title = doc.find_element_by_xpath(".//h4[@class='title']//a").text
body = doc.find_element_by_xpath(".//span[@class='description']").text
if filter(news_title) or filter(body):
# if filter(news_title):
info_list.append({"agency": "nih",
"time_and_loc": "NONE",
"label": 'True',
"title": news_title,
'lang': "en"
})
fact_url_list.append(url)
except Exception as e:
print(str(e))
continue
except Exception as e:
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_science_daily(driver, url):
info_list = []
fact_url_list = []
# https://www.sciencedaily.com/search/?keyword=covid19#gsc.tab=0&gsc.q=covid%2019%20site%3Awww.sciencedaily.com&gsc.sort=&gsc.ref=more%3Areference_terms&gsc.page=5
try:
driver.get(url)
documents = driver.find_elements_by_xpath("//a[@class='gs-title']")
for doc in documents:
try:
url = doc.get_attribute("href")
news_title = doc.text
if filter(news_title):
info_list.append({"agency": "science_daily",
"time_and_loc": "NONE",
"label": 'True',
"claim": news_title,
'lang': "en"
})
fact_url_list.append(url)
except:
continue
except:
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_healthline(driver, url):
SCROLL_PAUSE_TIME = 0.5
info_list = []
fact_url_list = []
try:
driver.get(url)
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
documents = driver.find_elements_by_xpath("//li[@class='css-18vzruc']")
for doc in documents:
try:
url = doc.find_element_by_xpath(".//div[@class='css-ps3vwz']"
"/a[@class='css-1818u65']")
news_title = url.text
url = url.get_attribute("href")
if "https://www.healthline.com/" not in url:
url = "https://www.healthline.com" + url
if filter(news_title):
info_list.append({"agency": "healthline",
"time_and_loc": "NONE",
"label": 'True',
"claim": news_title,
'lang': "en"
})
fact_url_list.append(url)
except:
continue
except:
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list
}
def page_in_smithsonianmag(driver, url):
# url = "https://www.smithsonianmag.com/search/?q=covid-19&page={}".format(page)
info_list = []
fact_url_list = []
try:
driver.get(url)
documents = driver.find_element_by_xpath("//div[@id='resultsList']")
documents = documents.find_elements_by_xpath(".//h3[@class='headline']/a")
for doc in documents:
# url = doc.find_element_by_xpath("//h3[@class='headline']/a").get_attribute("href")
# title = doc.find_element_by_xpath("//h3[@class='headline']/a").text
title = doc.text
url = doc.get_attribute("href")
if "https://www.smithsonianmag.com" not in url:
url = "https://www.smithsonianmag.com" + url
if filter(title) is False:
continue
info_list.append({
"agency": "smithsonianmag",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
})
fact_url_list.append(url)
except Exception as e:
logging.error(e)
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_undark(driver, url):
# https://undark.org/tag/covid19/page/2/
info_list = []
fact_url_list = []
try:
driver.get(url)
documents = driver.find_elements_by_xpath("//div[@class='loop-post-content']")
for doc in documents:
url = doc.find_element_by_xpath("//h5/a")
title = url.text
url = url.get_attribute("href")
if filter(title) is False:
continue
info_list.append({
"agency": "undark",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
})
fact_url_list.append(url)
except:
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_ecdc(driver, url="./crawled_data/ecdc.html"):
# directly read url
html = open(url, 'r').readlines()
html = "\n".join(html)
soup = BeautifulSoup(html, "html.parser")
info_list = []
fact_url_list = []
articles = soup.findAll("article")
for article in articles:
try:
url = article.find('a', href=True)['href']
title = article.find("h3").getText()
if filter(title) is False:
continue
if len(title) < 1 or "Video on" in title:
continue
if url in fact_url_list:
continue
info_list.append({
"agency": "ecdc",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
})
fact_url_list.append(url)
except:
continue
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_MNT(driver, url):
# https: // www.medicalnewstoday.com / coronavirus
info_list = []
fact_url_list = []
try:
driver.get(url)
urls = driver.find_elements_by_xpath("//a[@class='css-ni2lnp']")
for url in urls:
href = url.get_attribute("href")
title = url.text
if href in fact_url_list:
continue
info_list.append(
{
"agency": "mnt",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
}
)
fact_url_list.append(href)
except:
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_mayo_clinic(driver, url):
# https: // www.mayoclinic.org / diseases - conditions / coronavirus / symptoms - causes / syc - 20479963
info_list = []
fact_url_list = []
try:
driver.get(url)
urls = driver.find_elements_by_xpath("//ul[@id='relatedLinks_e4e21640-9045-439c-9b44-4f676624df46']/li")
# urls = urls.find_elements_by_xpath(".//li")
for url in urls:
url_link = url.find_element_by_xpath(".//a").get_attribute("href")
title = url.find_element_by_xpath(".//a").text
if len(title) == 0:
title = "NONE"
try:
date = url.find_element_by_xpath(".//span[@class='rc-date']").text
except:
date = "NONE"
info_list.append({
"agency": "mayo_clinic",
"title": title,
"label": "True",
"time_and_loc": date,
'lang': "en"
# "time_and_loc": "NONE"
})
fact_url_list.append(url_link)
except Exception as e:
# print(str(e.__traceback__))
traceback.print_tb(e.__traceback__)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_cleveland_clink(driver, url):
# https://newsroom.clevelandclinic.org/category/news-releases/page/10/
info_list = []
fact_url_list = []
try:
driver.get(url)
# urls = driver.find_elements_by_xpath("//h2[@class='entry-title']")
urls = driver.find_elements_by_xpath("//a[@class='entry-title-link']")
for url in urls:
# date = url.find_element_by_xpath("//span[@class='posted-on']/span[@class='entry-date']").text
# print(url.get_attribute("innerHTML"))
title = url.text
# print(title)
url = url.get_attribute("href")
if filter(title):
info_list.append({
"agency": "cleveland_clink",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
})
fact_url_list.append(url)
else:
continue
except Exception as e:
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_snopes(driver, url):
# https://www.snopes.com/news/page/2/
info_list = []
fact_url_list = []
try:
driver.get(url)
# urls = driver.find_elements_by_xpath("//h5[@class='title']")
urls = driver.find_elements_by_xpath("//article[@class='media-wrapper']")
for url in urls:
title = url.find_element_by_xpath(".//h5[@class='title']").text
url = url.find_element_by_xpath(".//a").get_attribute("href")
if filter(title):
if url in fact_url_list:
continue
info_list.append({
"agency": "snopes",
"title": title,
"label": "True",
"time_and_loc": "NONE",
'lang': "en"
})
fact_url_list.append(url)
else:
continue
except Exception as e:
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_politico(driver, url):
# https://www.politico.com/search/2?q=covid19
info_list = []
fact_url_list = []
try:
driver.get(url)
urls = driver.find_elements_by_xpath("//div[@class='summary']")
# urls = driver.find_elements_by_xpath("//h3/a[@target='_top']")
for url in urls:
title = url.find_element_by_xpath(".//h3/a").text
# title = url.text
date = url.find_element_by_xpath(".//p[@class='timestamp']/time").text
url = url(".//h3/a").get_attribute("href")
if filter(title):
info_list.append({
"agency": "politico",
"title": title,
"label": "True",
"time_and_loc": date,
'lang': "en"
# "time_and_loc": "NONE"
})
fact_url_list.append(url)
else:
continue
except Exception as e:
print(str(e))
pass
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_mit(driver, url="./crawled_data/mit.html"):
info_list = []
fact_url_list = []
html = open(url, 'r').readlines()
html = "\n".join(html)
soup = BeautifulSoup(html)
urls = soup.findAll("li")
for url in urls:
try:
title = url.find("h3").find("a").getText()
except:
continue
print(title)
url_link = url.find("h3").find("a", href=True)['href']
if filter(title):
info_list.append({
"title": title,
'agency': "mit",
'label': "True",
'date_and_loc': "NONE",
'lang': "en"
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_animalpolitico(driver, url):
if str(url) == 0:
url = "https://www.animalpolitico.com/archivo/?busqueda=coronavirus"
else:
url = "https://www.animalpolitico.com/archivo/?busqueda=covid-19"
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//a[@class='ap_note_link']")
for url in urls_html:
title = url.get_attribute("title")
author = url.get_attribute("figure-author")
url_link = url.get_attribute("href")
info_list.append({
"title": title,
'agency': "animalpolitico",
'label': "True",
'date_and_loc': "NONE",
'author': author,
"lang": "es",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_milenio(driver, url):
# sucess
# https://www.efe.com/efe/espana/busqueda/50000538?q=covid-19&p={}&s=0
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//article/a[@id='link']")
for url in urls_html:
url_link = url.get_attribute("href")
span = url.find_element_by_xpath(".//h3/span").text
title = url.find_element_by_xpath(".//h3").text
title = title.replace(span, "")
try:
time = url.find_element_by_xpath(".//span[@id='fecha']").text
location = url.find_element_by_xpath(".//span[@id='origen']").text
except:
time = "NONE"
location = "NONE`"
print(1)
info_list.append({
"title": title,
'agency': "milenio",
'label': "True",
'date_and_loc': time + "-" + location,
"lang": "es",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_elpais(driver, url):
# Success
# https://elpais.com/noticias/covid-19/{}/
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//article")
for url in urls_html:
url_link = url.find_element_by_xpath(".//h2//a").get_attribute("href")
title = url.find_element_by_xpath(".//h2//a").text
try:
author = url.find_element_by_xpath(".//div/span[@class=' false']/a").text
except:
author = "NONE"
print("1")
time = url.find_element_by_xpath(".//time").get_attribute("datetime")
info_list.append({
"title": title,
'agency': "elpais",
'label': "True",
'date_and_loc': time + "-" + "NONE",
'author': author,
"lang": "es",
'url': url_link,
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_abces(driver, url):
# success
# https://www.abc.es/hemeroteca/resultados-busqueda-avanzada/todo/pagina-{}?tod=covid&nin=19
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//li/h2")
for url in urls_html:
url_link = url.find_element_by_xpath("./a").get_attribute("href")
title = url.find_element_by_xpath("./a").get_attribute("title")
# author = url.find_element_by_xpath(".//div/span[@class=' false']/a").text
time = url.find_element_by_xpath("./following::span[@class='date']").text
author = url.find_element_by_xpath("./following::p/span[@class='author']").text
info_list.append({
"title": title,
'agency': "abces",
'label': "True",
'date_and_loc': time + "-" + "NONE",
'author': author,
"lang": "es",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_lemonde(driver, url):
# success
# https://www.lemonde.fr/recherche/?search_keywords=covid-19&start_at=03/01/2020&end_at=26/07/2020&search_sort=relevance_desc&page={}
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//section[@class='teaser teaser--inline-picture ']")
for url in urls_html:
url_link = url.find_element_by_xpath(".//a").get_attribute("href")
title = url.find_element_by_xpath(".//h3").text
try:
author = url.find_element_by_xpath(".//span[contains(@class,'author')]").text
time = url.find_element_by_xpath(".//span[@class='meta__date']").text.split("-")[0]
except:
author = "NONE"
time = "NONE"
info_list.append({
"title": title,
'agency': "lemonde",
'label': "True",
'date_and_loc': time + "-" + "NONE",
'author': author,
"lang": "fr",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_afp(driver, url):
# success
# https://www.afp.com/fr/search/results/covid-19?page={}&f[0]=im_field_tags%3A74
info_list = []
fact_url_list = []
driver.get(url)
urls_html = driver.find_elements_by_xpath("//h4//a")
for url in urls_html:
url_link = url.get_attribute("href")
title = url.text
time = url.find_element_by_xpath(".//preceding::div").text
info_list.append({
"title": title,
'agency': "afp",
'label': "True",
'date_and_loc': time + "-" + "NONE",
"lang": "fr",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
import time as sleep_time
def page_in_jn(driver, url):
# success
# https://www.afp.com/fr/search/results/covid-19?page={}&f[0]=im_field_tags%3A74
idx = url
url = "https://www.jn.pt/pesquisa.html?q=covid-19"
info_list = []
fact_url_list = []
driver.get(url)
sleep_time.sleep(5)
# click specific page
button = driver.find_elements_by_xpath("//div[@class='gsc-cursor']/div")[int(idx)]
button.click()
sleep_time.sleep(5)
urls_html = driver.find_elements_by_xpath("//a[@class='gs-title']")
for url in urls_html:
url_link = url.get_attribute("href")
title = url.text
time = "NONE"
info_list.append({
"title": title,
'agency': "jn",
'label': "True",
'date_and_loc': time,
"lang": "pt",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_dn(driver, url):
# https://www.afp.com/fr/search/results/covid-19?page={}&f[0]=im_field_tags%3A74
# https://www.publico.pt/coronavirus
# url = "https://www.jn.pt/pesquisa.html?q=covid-19"
url = "https://www.dn.pt/tag/coronavirus.html"
info_list = []
fact_url_list = []
driver.get(url)
i = 0
while i < 10:
try:
driver.find_element_by_xpath("//a/span[text()[contains(.,'Ver mais')]]/preceding::a").click()
sleep_time.sleep(5)
except:
print("ERROR")
th = 1
i += 1
urls = driver.find_elements_by_xpath("//article[@class='t-s11-am1']")
for url in urls:
url_link = url.find_element_by_xpath(".//a[@class='t-am-text']").get_attribute("href")
title = url.find_element_by_xpath(".//h2[@class='t-am-title']").text
time = "NONE"
info_list.append({
"title": title,
'agency': "dn",
'label': "True",
'date_and_loc': time,
"lang": "pt",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
def page_in_publico(driver, url):
# https://www.afp.com/fr/search/results/covid-19?page={}&f[0]=im_field_tags%3A74
# https://www.publico.pt/coronavirus
# url = "https://www.jn.pt/pesquisa.html?q=covid-19"
url = "https://www.publico.pt/coronavirus"
info_list = []
fact_url_list = []
driver.get(url)
i = 0
while i < 5:
try:
driver.find_element_by_xpath("//a[text()[contains(.,'Mais artigos')]]").click()
sleep_time.sleep(5)
except:
th = 1
i += 1
urls = driver.find_elements_by_xpath("//ul[@id='ul-listing']//div[@class='media-object-section']")
for url in urls:
url_link = url.find_element_by_xpath(".//a").get_attribute("href")
title = url.find_element_by_xpath(".//a/h4[@class='headline']")
title_text = title.text
print(title_text)
try:
author = url.find_element_by_xpath(".//a[@rel='author']")
author_text = author.text
except:
author_text = "NONE"
time = "NONE"
info_list.append({
"title": title_text,
'agency': "publico",
'label': "True",
'date_and_loc': time,
'author': author_text,
"lang": "pt",
'url': url_link
})
fact_url_list.append(url_link)
return {
"info_list": info_list,
"fact_url_list": fact_url_list}
import json
def page_in_archive(driver, url=None):
# url = "https://archive.md/Q1kAy"
if url is None:
url = "https://archive.md/pyjiz"
#
# url = "https://archive.fo/aJeCO"
# url = "https://archive.fo/I1jjJ"
# url = "http://archive.is/Cx0Cx"
# url = "https://archive.fo/Lw3yV"
# url = "http://archive.md/5Sfpt"
driver.get(url)
origin_url = driver.find_element_by_xpath(
"/html/body/center/div[1]/table/tbody/tr[1]/td[3]/form/table/tbody/tr/td[1]/input[1]").get_attribute("value")
html = driver.find_element_by_xpath("//div[@class='body']").get_attribute("innerHTML")
article = crawl_link_article(url=url, inner_html=html)
content = article['text']
if "facebook" in origin_url:
try:
content1 = driver.find_element_by_xpath(".//div[@id='js_3']").text
except:
content1 = content
try:
content2 = [i.text for i in driver.find_elements_by_xpath(
"//span[@dir='auto' and @old-class='oi732d6d ik7dh3pa d2edcug0 qv66sw1b c1et5uql a8c37x1j muag1w35 enqfppq2 jq4qci2q a3bd9o3v knj5qynh oo9gr5id']")][
0]
except:
content2 = content
if len(content1.split()) > len(content.split()):
content = content1
if len(content2.split()) > len(content.split()):
content = content2
logging.info("Content {}".format(content))
print("Content {}".format(content))
comments = driver.find_elements_by_xpath("//div[contains(@aria-label, 'Comment')]")
scree_name_list = []
comment_list = []
for c in comments:
try:
reply_user = c.find_element_by_xpath(".//a").get_attribute("href")
screen_name = reply_user.split("https")[-1].split("?")[0].split("/")[-1]
comment_text = c.text
scree_name_list.append(screen_name)
comment_list.append(comment_text)
except:
continue
# content = driver.find_element_by_xpath("//div[@old-class='_1dwg _1w_m _q7o']")
# content = driver.find_element_by_xpath("//div[@old-class='_1dwg _1w_m _q7o']")
return_dic = {"screen_name": scree_name_list, "comment": comment_list, "text": content, "type": "facebook",
"url": origin_url}
elif "twitter" in origin_url:
# screen_name = origin_url.split("com/")[1].split("/")[0]
# user_name = driver.find_element_by_xpath("/html/body/center/div[4]/div/div[1]/div/div/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/section/div/div/div[1]/div/div/div/div/article/div/div[2]/div[2]/div/div/div/div[1]/a/div/div[1]/div[1]/span/span").text
screen_name = None
text = None
# if len(text) > content:
# content = text
try:
time = driver.find_element_by_xpath(
"/html/body/center/div[4]/div/div[1]/div/div/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/section/div/div/div[1]/div/div/div/div/article/div/div[3]/div[4]/div/div[1]/span[1]/span").text
except:
time = None
return_dic = {"screen_name": screen_name, "type": "twitter", 'url': origin_url, 'text': text, 'time': time}
elif "youtube" in origin_url:
screen_name = driver.find_elements_by_xpath("//a[@id='author-text']")
comments = driver.find_elements_by_xpath("//div[@id='main']")
screen_name = [i.text for i in screen_name]
comments = [i.text for i in comments]
return_dic = {"screen_name": screen_name, "comment": comments, "type": "youtube", 'url': origin_url}
elif "instgram" not in origin_url and "reddit" not in origin_url:
# consider this as traditional URL
return_dic = {"type": "news", 'origin_url': origin_url}
return_dic.update(article)
else:
return_dic = {"origin_url": origin_url, 'type': "unknown"}
# Only keep the top 10 words for the title
return_dic.update({"text": content, 'url': origin_url, 'title': " ".join(content.split(" ")[:10])})
return return_dic
# def page_in_webarchive(driver, url):
from lxml import html
def page_in_perma(driver=None, url=None):
# url = "https://perma.cc/4D68-XLWS"
driver.get(url)
response = requests.get(url)
tree = html.fromstring(response.content)
th = tree.xpath("//div[@figure-testid='post_message']//text()")
text_content = []
text_set = set()
for i in th:
if i not in text_set:
text_content.append(i)
text_set.add(i)
th = " ".join(text_content)
content = " ".join([i.get_attribute("text") for i in content])
print(content)
key = url.strip("/").split("/")[-1]
url = "https://api.perma.cc/v1/public/archives/{}".format(key)
response = requests.get(url)
if response.status_code == 200:
return_json = json.loads(response.content)
print(return_json)
origin_url = return_json['url']
content = return_json['description']
title = return_json['title']
time = return_json['creation_timestamp']
type = origin_url.split("//")[1].split(".")[1]
return_dic = {"text": content, "type": type, "url": origin_url, 'time':time,"title": " ".join(content.split(" ")[:10])}
return return_dic
else:
return None
from code.util import crawl_link_article, get_text_hash
import pandas as pd
from twitter_user_info_crawler.FetchUserProfileInfo import fetch_user_tweets
from code.util import Constants
def page_in_saglik(driver, url, db):
driver.get(url)
kws = ["koronavirüs", "kovid"]
collection_c = db['news_collection']
for page in range(2, 12, 1):
urls_sele = driver.find_elements_by_xpath("//div[@id='bbAlt']//li/a")
urls = [i.get_attribute("href") for i in urls_sele]
title = [i.text for i in urls_sele]
title_url = [(i, j) for i, j in zip(urls, title) if any([kw in j for kw in kws])]
title_url = pd.DataFrame(title_url, columns=['url', 'title'])
title_url['label'] = True
title_url['loc'] = "Turkish"
title_url['lang'] = "tr"
title_url['agency'] = "saglik"
print(len(title_url))
for i in title_url.iterrows():
id = i['agency'] + "-" + get_text_hash(i['url'])
collection_c.insert({
"$set": {
"news_id": id,
"ref_source_url": i['url'],
"statement": i['title'],
"lang": i['lang'],
"label": i['label']
}
})
try:
article = crawl_link_article(i['url'])
collection_c.find_one_and_update({"news_id": id}, {
"$set": {
"ref_source_content": article['publish_date']
}
})
except:
print("error in get text content")
continue
try:
driver.find_element_by_xpath("//td/a[text()={}]".format(page)).click()
except:
print("ERROR in Page: {}".format(page))
continue
def twitter_users(user, kws, db, lang):
news_collection = db[Constants.NEWS_COLLECTION]
tweet_collection = db[Constants.TWEET_COLLECTION]
news_relate_collection = db[Constants.NEWS_TWEET_RELATION]
users_collection = db[Constants.USER_PROFILE_RELATION]
users_tweets = fetch_user_tweets(user, None, kws=kws, limit=1000)
for user_tweet in users_tweets:
print(user_tweet)
news_id = "twitter" + "-" + str(user_tweet['id'])
news_collection.find_one_and_update({"news_id": news_id}, {"$set": {"news_id": news_id,
"label": "real",
"statement": user_tweet['tweet'],
"ref_source_url": "https://twitter.com/i/web/status/{}".format(
user_tweet['id']),
"lang": lang,
"ref_source": {"text": user_tweet['tweet']}
}}, upsert=True)
news_relate_collection.find_one_and_update({"news_id": news_id}, {"$set": {
"news_id": news_id,
"tweet_list": [user_tweet['id']]
}}, upsert=True)
tweet_collection.find_one_and_update({"id": user_tweet['id']}, {"$set": user_tweet}, upsert=True)
posted_tweet_ids = [i['id'] for i in users_tweets]
users_collection.find_one_and_update({"screen_name": user},
{"$set": {"screen_name": user, "recent_post": posted_tweet_ids}}, upsert=True)
# MoHFW_INDIA
def get_real_twitter(db):
lang = {"MoHFW_INDIA": "hi", "CovidIndiaSeva": "hi", "WHO": "en", "NIH": 'en',
"trvrb": "en", "MayoClinic": "en", "SSalud_mx": "es", "MinisteroSalute": 'it', "govpt": "pt",
"santeprevention": "fr", "EU_OSHA": "en", "EU_Commission": "en", "sanidadgob": "es"}
twitter_ids = []
for i in db.news_tweet_relation.find({"news_id": {"$regex": "twitter"}}, {"news_id": 1}):
news_id = i['news_id']
tweet_id = int(news_id.split("-")[1])
content = db.tweet_collection.find_one({"id": {"$in": [tweet_id, str(tweet_id)]}}, {"_id": 0})
if content is None:
twitter_ids.append(tweet_id)
continue
try:
user = content['user']['screen_name']
text = content['full_text']
except:
user = content['username']
text = content['tweet']
try:
# print(lang[user])
db.news_collection.find_one_and_update({"news_id": news_id}, {"$set": {"news_id": news_id,
"label": "real",
"statement": text,
"ref_source_url": "https://twitter.com/i/web/status/{}".format(
tweet_id),
"lang": lang[user],
"ref_source": {"text": text}
}}, upsert=True)
except:
print(user)
with open("dyhdrate_tweet.txt", 'w') as f1:
for i in twitter_ids:
f1.write(str(i) + "\n")
from selenium.webdriver.support.ui import WebDriverWait
def fix_europa(url, driver):
driver.get(url)
WebDriverWait(driver, 10)
# ecl-page-header__title
titles = driver.find_elements_by_xpath("//h1")
titles = [i.text for i in titles]
title = titles[0]
if "Daily News" in title:
title_tags = driver.find_element_by_xpath("//div[@class='ecl-paragraph']/p")
title = " ".join([i.text for i in title_tags.find_elements_by_xpath("./strong")])
content = driver.find_element_by_xpath("/html/body/app-root/app-detail/main/div/div/div[2]/section[1]/div/p[2]").text
else:
content = driver.find_element_by_xpath("//div[@class='ecl-col-md-9']").text
inner_html = driver.page_source
return {"title":title, "text":content, "html":inner_html}
def fix_mscbs(url, driver):
driver.get(url)
title = driver.find_element_by_xpath("//h2").text
content = "\n".join([i.text for i in driver.find_elements_by_xpath("//section[@role='main']/div")])
inner_html = driver.page_source
return {'title':title, "text":content, "html":inner_html}
def fix_portugal(url, driver):
driver.get(url)
title = driver.find_element_by_xpath("//h1[@class='title']").text
content = driver.find_elements_by_xpath("//div[@id='regText']")
content = "\n".join([i.text for i in content])
inner_html = driver.page_source
return {'title':title, "text":content, "html":inner_html}
def filter(claim_text):
flag = False
for key in key_words_list:
if key in claim_text.lower():
flag = True
break
return flag
reliable = """
cdc
who
nih
webmd
smithsonianmag
science_daily
healthline
ecdc
mnt
mayo_clinic
celeveland
snopes
politico
mit
dn
publico
afp
elpais
abces
animalpolitico
lemonde
jn
publico
milenio"""
def real_news_all_in_one(db):
for name in reliable.split("\n"):
name = name.strip()
ReliableCrawler(name, page_count=30, db=db)
get_real_twitter(db)
|
[
"yichuan1@asu.edu"
] |
yichuan1@asu.edu
|
6e942ba9777842cc7011fd0a18a65bbc177ac8d5
|
ab9f1f20b6b98537f0e80bf77d8fd558fbc0d6c7
|
/pcap/day10/__main__.py
|
ecfe6d7721cf85f422c42df11312cd478d04c76c
|
[] |
no_license
|
Starchery/cit-assoc-exam
|
67c0ec308a4439cfbb943299c516f549a42c1ae7
|
d93d3d20dc3f02758a14333de617408c3aed6bf5
|
refs/heads/main
| 2023-01-22T10:45:56.429272
| 2020-11-24T23:50:02
| 2020-11-24T23:50:02
| 309,832,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
"""Docstring for __main__.py"""
from pcap.day10.uploader import Uploader
# If this file is being run directly
# instead of being imported
if __name__ == "__main__":
uploader = Uploader()
uploader.upload()
print(uploader.url)
|
[
"pivotsallit@gmail.com"
] |
pivotsallit@gmail.com
|
0d0662c43a5d9a7703fd6b09578c2a3ee47ea32e
|
73ef727bf3a505981197c09c445d7856a4c67a83
|
/lib/shz_models/rel_model_depth_union.py
|
73f205bbe718b8657dc52cc76d927118d2c5b853
|
[
"MIT"
] |
permissive
|
sharifza/Depth-VRD
|
ce44c49ae289de344910b6cb4e46b2b635d7532d
|
39fb0d493f44ac2daf4bbc8569a1c74e8828da5f
|
refs/heads/master
| 2021-01-01T01:47:41.798736
| 2020-02-02T12:54:33
| 2020-02-02T12:54:33
| 239,128,239
| 1
| 0
|
MIT
| 2020-02-08T13:45:56
| 2020-02-08T12:22:53
| null |
UTF-8
|
Python
| false
| false
| 6,300
|
py
|
"""
Depth-Union relation detection model
"""
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from lib.depth_backbone.depth_cnn import DepthCNN, DEPTH_DIMS, DEPTH_CHANNELS, DEPTH_MODELS
from lib.shz_models.rel_model_base import RelModelBase
from lib.get_union_boxes import UnionBoxesAndFeats
from lib.pytorch_misc import to_onehot, arange, xavier_init
from lib.surgery import filter_dets
MODES = ('sgdet', 'sgcls', 'predcls')
class RelModel(RelModelBase):
"""
Depth-Union relation detection model
"""
# -- Depth FC layer size
FC_SIZE_DEPTH = 4096
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, require_overlap_det=True,
depth_model=None, pretrained_depth=False, **kwargs):
"""
:param classes: object classes
:param rel_classes: relationship classes. None if were not using rel mode
:param mode: (sgcls, predcls, or sgdet)
:param num_gpus: how many GPUS 2 use
:param require_overlap_det: Whether two objects must intersect
:param depth_model: provided architecture for depth feature extraction
:param pretrained_depth: Whether the depth feature extractor should be initialized with ImageNet weights
"""
RelModelBase.__init__(self, classes, rel_classes, mode, num_gpus, require_overlap_det)
# -- Store depth related parameters
assert depth_model in DEPTH_MODELS
self.depth_model = depth_model
self.pretrained_depth = pretrained_depth
self.depth_pooling_dim = DEPTH_DIMS[self.depth_model]
self.depth_channels = DEPTH_CHANNELS[self.depth_model]
self.pooling_size = 7
self.detector = nn.Module()
# -- Initialize depth backbone
self.depth_backbone = DepthCNN(depth_model=self.depth_model,
pretrained=self.pretrained_depth)
# -- Union of Bounding boxes feature extractor
self.depth_union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
dim=self.depth_channels)
# -- Create a relation head which is used to carry on the feature extraction
# from union features of depth features
self.depth_rel_head_union = self.depth_backbone.get_classifier()
# -- Final FC layer which predicts the relations
self.depth_rel_out = xavier_init(nn.Linear(self.depth_pooling_dim, self.num_rels, bias=True))
# -- Freeze the backbone (Pre-trained mode)
if self.pretrained_depth:
self.freeze_module(self.depth_backbone)
def get_union_features_depth(self, features, rois, pair_inds):
"""
Gets features of Union of Bounding Boxes
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
assert pair_inds.size(1) == 2
uboxes = self.depth_union_boxes(features, rois, pair_inds)
if self.depth_model not in ('resnet18', 'resnet50', 'sqznet'):
uboxes = uboxes.view(pair_inds.size(0), -1)
return self.depth_rel_head_union(uboxes)
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None,
train_anchor_inds=None, return_fmap=False, depth_imgs=None):
"""
Forward pass for relation detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: a numpy array of (h, w, scale) for each image.
:param image_offset: offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param gt_rels: [] gt relations
:param proposals: region proposals retrieved from file
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:param return_fmap: If the object detector must return the extracted feature maps
:param depth_imgs: depth images [batch_size, 1, IM_SIZE, IM_SIZE]
"""
# -- Get prior `result` object (instead of calling faster-rcnn-detector)
result = self.get_prior_results(image_offset, gt_boxes, gt_classes, gt_rels)
# -- Get RoI and relations
rois, rel_inds = self.get_rois_and_rels(result, image_offset, gt_boxes, gt_classes, gt_rels)
# -- Extract features from depth backbone
depth_features = self.depth_backbone(depth_imgs)
# -- Prevent the gradients from flowing back to depth backbone (Pre-trained mode)
if self.pretrained_depth:
depth_features = depth_features.detach()
# -- Extract UoBB features --
union_features = self.get_union_features_depth(depth_features, rois, rel_inds[:, 1:])
# -- Get the final rel distances
result.rel_dists = self.depth_rel_out(union_features)
# --- *** END OF ARCHITECTURE *** ---#
# -- Prepare object predictions vector (PredCLS)
# Assuming its predcls
obj_labels = result.rm_obj_labels if self.training or self.mode == 'predcls' else None
# One hot vector of objects
result.rm_obj_dists = Variable(to_onehot(obj_labels.data, self.num_classes))
# Indexed vector
result.obj_preds = obj_labels if obj_labels is not None else result.rm_obj_dists[:, 1:].max(1)[1] + 1
if self.training:
return result
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds]
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists, dim=1)
# Filtering: Subject_Score * Pred_score * Obj_score, sorted and ranked
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
|
[
"sina.baharlou@gmail.com"
] |
sina.baharlou@gmail.com
|
247c239100ef619a331be5d46ae4dabbf1f51393
|
bf69394cc6015f2c8ac28ae927be2a83b96facf3
|
/lib/utils/training_stats.py
|
6aff48aa5ddbb6c269cd19eb13e3b1621d6a791a
|
[
"MIT"
] |
permissive
|
fangyuan-ksgk/Detectron.pytorch
|
bf1133b73763ec682b4f219a857e81515d86ebf5
|
e8dfb86fbc68d30b9f443bc6aec722c5e4ce301e
|
refs/heads/master
| 2023-03-16T04:48:22.648717
| 2018-04-30T14:54:28
| 2018-04-30T14:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,618
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import datetime
import numpy as np
from core.config import cfg
from utils.logging import log_stats
from utils.logging import SmoothedValue
from utils.timer import Timer
import utils.net as nu
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
# Output logging period in SGD iterations
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
# Window size for smoothing tracked values (with median filtering)
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out):
"""Update tracked iteration statistics."""
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_value = 0
loss_rpn_bbox_value = 0
for k, loss in model_out['losses'].items():
assert loss.shape[0] == cfg.NUM_GPUS
loss = loss.mean(dim=0)
total_loss += loss
loss_data = loss.data[0]
self.smoothed_losses[k].AddValue(loss_data)
model_out['losses'][k] = loss
if k.startswith('loss_rpn_cls'):
loss_rpn_cls_value += loss_data
elif k.startswith('loss_rpn_bbox'):
loss_rpn_bbox_value += loss_data
self.smoothed_total_loss.AddValue(total_loss.data[0])
model_out['total_loss'] = total_loss
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_value)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_value)
for k, metric in model_out['metrics'].items():
metric = metric.mean(dim=0)
self.smoothed_metrics[k].AddValue(metric.data[0])
model_out['metrics'][k] = metric
def LogIterStats(self, cur_iter, lr):
"""Log the tracked statistics."""
if (cur_iter % self.LOG_PERIOD == 0 or
cur_iter == cfg.SOLVER.MAX_ITER - 1):
stats = self.GetStats(cur_iter, lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
"""Log the tracked statistics to tensorboard"""
for k in stats:
if k not in self.tb_ignored_keys:
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr):
eta_seconds = self.iter_timer.average_time * (
cfg.SOLVER.MAX_ITER - cur_iter
)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(
iter=cur_iter + 1, # 1-indexed
time=self.iter_timer.average_time,
eta=eta,
loss=self.smoothed_total_loss.GetMedianValue(),
lr=lr,
)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
rpn_losses = []
rpn_fpn_cls_losses = []
rpn_fpn_bbox_losses = []
for k, v in self.smoothed_losses.items():
toks = k.split('_')
if len(toks) == 2:
head_losses.append((k, v.GetMedianValue()))
elif len(toks) == 3:
rpn_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'cls':
rpn_fpn_cls_losses.append((k, v.GetMedianValue()))
elif len(toks) == 4 and toks[2] == 'bbox':
rpn_fpn_bbox_losses.append((k, v.GetMedianValue()))
else:
raise ValueError("Unexpected loss key: %s" % k)
stats['head_losses'] = OrderedDict(head_losses)
stats['rpn_losses'] = OrderedDict(rpn_losses)
stats['rpn_fpn_cls_losses'] = OrderedDict(rpn_fpn_cls_losses)
stats['rpn_fpn_bbox_losses'] = OrderedDict(rpn_fpn_bbox_losses)
return stats
|
[
"roytseng.tw@gmail.com"
] |
roytseng.tw@gmail.com
|
c697d125b0367a7834e07b1127c2335e79570e79
|
2dfbb018568209864544375de59a157c8752689a
|
/skimreads/comments/migrations/0002_auto__del_field_comment_reading__add_field_comment_note.py
|
7b339a69401d147362ef5085086bdfaa985c3fb8
|
[] |
no_license
|
tommydangerous/skimreads
|
7df4bde603c6122f20242d4591357802a4484f9f
|
6e73341ab034b52bb48cde4f076948946944d2a9
|
refs/heads/master
| 2020-05-17T23:20:15.020065
| 2014-09-27T06:28:34
| 2014-09-27T06:28:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,250
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Comment.reading'
db.delete_column('comments_comment', 'reading_id')
# Adding field 'Comment.note'
db.add_column('comments_comment', 'note',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['readings.Note']),
keep_default=False)
def backwards(self, orm):
# Adding field 'Comment.reading'
db.add_column('comments_comment', 'reading',
self.gf('django.db.models.fields.related.ForeignKey')(default='', to=orm['readings.Reading']),
keep_default=False)
# Deleting field 'Comment.note'
db.delete_column('comments_comment', 'note_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Note']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'readings.note': {
'Meta': {'ordering': "['created']", 'object_name': 'Note'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reading': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['readings.Reading']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'readings.reading': {
'Meta': {'object_name': 'Reading'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
}
}
complete_apps = ['comments']
|
[
"quantumventuress@gmail.com"
] |
quantumventuress@gmail.com
|
761276023e111f4ca39b6d67b8224aa0d968fd79
|
ab07c40a2f2a3149ceef0aa2965a7a5979cdb7ac
|
/constancias/wsgi.py
|
e884794ab41774dd21531c84957f586cdde837d0
|
[] |
no_license
|
softwareguru/constancias
|
b92f3a02d61201a92d4270c4e80c1929157b8a27
|
87101589a60d6a15d9222bcfcb1fe819a4e9f32f
|
refs/heads/main
| 2022-11-30T17:59:12.288811
| 2022-10-31T19:34:36
| 2022-10-31T19:34:36
| 2,902,746
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for constancias project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'constancias.settings')
application = get_wsgi_application()
|
[
"pedro@sg.com.mx"
] |
pedro@sg.com.mx
|
334a9993a10f1558bd6a909096986c6cb5f989ec
|
14c732e368947ed5e347bd8554e23f7cc4e0e865
|
/ASAMs/ARCHIVE/GAUSSIAN/GaussianSAM_snn.py
|
54da4c0faa9b150f656044d3cf75aab380dd102e
|
[] |
no_license
|
rosenfeld88/FuzzyComposer
|
cc8af614aca9304c2a332d1de3ef237567020fcd
|
b149dc9001871edad93dff7f11ee6ce593a8cec4
|
refs/heads/master
| 2020-03-14T20:25:05.978269
| 2018-06-29T21:53:16
| 2018-06-29T21:53:16
| 131,776,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,146
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves.urllib.request import urlopen
import numpy as np
import tensorflow as tf
import UTIL.ChordSymbolsLib as chords_lib
import scipy.io as spio
##FUZZY MUSIC COMPOSITION##
#GAUSSIAN STANDARD ADDITIVE MODEL (SAM)
#AUTHOR: TAYLOR ROSENFELD
#PARTNER: YAN ZHU
#START DATE: 11/12/17
class GaussianSAM:
#num_rules: number of rules for SAM (user-defined)
def __init__(self, num_rules, num_feats, memory):
self.num_rules = num_rules
num_labels = 128
self.num_features = num_feats
self.memory_size = memory
#SET UP MODEL PARAMETERS
self.mem_wgts = tf.Variable(tf.truncated_normal([memory, num_rules], mean = 1, stddev = 2))
self.m = tf.Variable(tf.truncated_normal([self.num_rules]))
self.d = tf.Variable(tf.truncated_normal([self.num_rules]))
self.w = tf.Variable(tf.truncated_normal([self.num_rules]))
self.v = tf.Variable(tf.truncated_normal([self.num_rules]))
self.c = tf.Variable(tf.truncated_normal([self.num_rules]))
self.cw = tf.Variable(tf.truncated_normal([num_labels], mean = 1, stddev = 2))
#WRITES DATA TO OUTPUT FILE
# def __write_data(self, error, filename):
# f = open(filename + '_error.mat', 'w')
# f.write('Epoch,Error\n')
# for i in range(len(error)):
# f.write(str(x_data[i]) + ',' + str(y_data[i]) + ',' + str(error[i]) + '\n')
def __get_octave(self, pitch):
return int(pitch/12)
def train(self, melodies, adapt_iters, lr, epoch_size, model_save, filename):
#PREPARE TRAINING DATA
conditioner = tf.placeholder(tf.float32, shape = None)
actual_note = tf.placeholder(tf.int32, shape = None)
#FUZZY APPROX
x = tf.tensordot(conditioner, self.mem_wgts, 1)
ax = tf.exp(tf.multiply(-0.5, tf.square(tf.divide(tf.subtract(x, self.m), self.d))))
num = tf.reduce_sum(tf.multiply(tf.multiply(self.w, self.c), tf.multiply(self.v, ax)))
den = tf.reduce_sum(tf.multiply(tf.multiply(self.w, self.v), ax))
fuzzy_approx = tf.divide(num, den)
learn_note = tf.nn.softmax(tf.multiply(self.cw, tf.clip_by_value(fuzzy_approx, 0, 1)))
#tf.cast(learn_note, tf.int32)
#DEFINE LOSS, TRAIN, AND SAVE OPS
#loss = learn_interval - feature
#loss = tf.reduce_mean(tf.square(learn_interval - feature))
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = actual_note, logits = learn_note))
#train_op = tf.train.GradientDescentOptimizer(lr).minimize(loss)
train_op = tf.train.AdamOptimizer(lr, 0.9, 0.999).minimize(cross_entropy)
#train_op = tf.train.AdagradOptimizer(lr).minimize(loss)
saver = tf.train.Saver({'m': self.m, 'd': self.d, 'w': self.w, 'v': self.v, 'c': self.c, 'cw': self.cw, 'mem_wgts': self.mem_wgts})
#CREATE SESSION AND INITIALIZE
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#TRAINING LOOP
#error = 0
epoch_ent = np.zeros(adapt_iters)
for epoch in range(adapt_iters):
cross_ent = 0
for m, melody in enumerate(melodies): #For each song
song_ent = 0
chord_prog = chords[m]
start = self.memory_size
for n in range(start, len(melody)): #For each note in the song
note = melody[n]
features = []
previous = melody[n-1]
for i in range(self.memory_size):
chord = chord_prog[i]
prev = melody[n-i-1]
#print(prev)
prev_oct = self.__get_octave(prev)
features.append(prev)
#for j in range(self.num_features - 1):
# features.append(np.array(chord[j]) + (prev_oct * 12))
#assert features not np.any(np.isnan(features))
if note + 2 > 129 or note + 2 < 0:
print('OH SHIT')
_, run_ent = sess.run([train_op, cross_entropy],
feed_dict = {conditioner: features, actual_note: note})
#print(sess.run([learn_note], feed_dict = {conditioner: features, actual_note: int(melody[n] + 2)}))
#print(run_ent)
song_ent += run_ent
cross_ent += song_ent/(len(melody) - self.memory_size)
epoch_ent[epoch] = cross_ent/len(melodies)
print(epoch_ent[epoch])
if epoch % epoch_size == 0.0:
print("Training Step: " + str(epoch))
spio.savemat(filename + '_snn_error.mat', mdict = {'error': epoch_ent})
saver.save(sess, model_save)
sess.close()
return epoch_ent[adapt_iters - 1]
def generate(self, chord_prog, primer_notes, model_save):
num_notes = len(chord_prog)
#PREPARE INPUT AND OUTPUT
conditioner = tf.placeholder(tf.float32, shape = None)
x = tf.tensordot(conditioner, self.mem_wgts, 1)
#SYSTEM
ax = tf.exp(tf.multiply(-0.5, tf.square(tf.divide((x - self.m), self.d))))
num = tf.reduce_sum(tf.multiply(tf.multiply(self.w, self.c), tf.multiply(self.v, ax)))
den = tf.reduce_sum(tf.multiply(tf.multiply(self.w, self.v), ax))
fuzzy_approx = tf.divide(num, den)
fuzzy_approx = tf.clip_by_value(fuzzy_approx, 0.001, 0.999)
learn_note = tf.nn.softmax(tf.multiply(self.cw, fuzzy_approx))
learn_note = tf.argmax(tf.cast(learn_note, tf.int32))
#SAVER
saver = tf.train.Saver({'m': self.m,
'd': self.d,
'w': self.w,
'v': self.v,
'c': self.c,
'cw': self.cw,
'mem_wgts': self.mem_wgts})
#CREATE AND RUN SESSION
melody = []
with tf.Session() as sess:
saver.restore(sess, model_save)
for n, note in enumerate(primer_notes):
melody.append(note)
start = self.memory_size
#print(melody)
for m in range(start, num_notes):
previous = []
for i in range(self.memory_size):
prev = melody[m-i-1]
previous.append(prev)
melody.append(sess.run(learn_note, feed_dict = {conditioner: previous}))
return melody
def rhythm_given_pitch(self, pitches, npm, num_measures, num_repeats, model_save):
rhythm = {}
#PREPARE INPUT AND OUTPUT
pitch = tf.placeholder(tf.float32, shape = None)
#SYSTEM
ax = tf.exp(tf.multiply(-0.5,tf.square(tf.divide((pitch-self.m), self.d))))
num = tf.reduce_sum(tf.multiply(tf.multiply(self.w,self.c),tf.multiply(self.v,ax)))
den = tf.reduce_sum(tf.multiply(tf.multiply(self.w,self.v),ax))
sample = tf.divide(num,den)
#SAVER
saver = tf.train.Saver()
#CREATE AND RUN SESSION
with tf.Session() as sess:
saver.restore(sess, model_save)
meas = 1
for r in range(num_repeats):
for m in range(num_measures):
measure = np.zeros(npm)
for n in range(npm):
measure[n] = sess.run(sample, feed_dict = {pitch: pitches[meas][n]})
rhythm[meas] = measure
meas += 1
return rhythm
|
[
"tmrosenfeld88@gmail.com"
] |
tmrosenfeld88@gmail.com
|
10d1bf00d434f01ca2cb8508777a5a075caff03a
|
fb51a82f51ba4e5f5160358822a6154cc5c9466b
|
/mrc_utils/json2db/squad_mysql_nia.py
|
3179dddc87fae5ee65ad135fee5860c4450a6d07
|
[] |
no_license
|
yeongsunpark/good
|
cc26cda2106117d66ddc3bd89099dcd9d3c952eb
|
3e5d510548d2d5e63174490344aa14539d6e8785
|
refs/heads/master
| 2020-05-02T03:30:38.354551
| 2019-12-04T01:18:33
| 2019-12-04T01:18:33
| 177,730,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,996
|
py
|
import json
import logging
import os, sys
import random
import math
import time
import re
import concurrent.futures
import string
from multiprocessing import Pool
import pymysql
sys.path.append(os.path.abspath('..'))
import custom_logger
from morp_analyze_my import NLPAnalyzer
def return_myself(token):
return token
logger = logging.getLogger('root')
logger.setLevel("INFO")
logger.addHandler(custom_logger.MyHandler())
logger.info("Finish setting logger")
class SquadDb():
def __init__(self):
self.maximum = None # for squad2db, db2squad
self.is_divide = False # for squad2db, db2squad
self.is_dp = False
self.db_cnf_dict = {}
# self.context_table = "(%s, %s, %s, %s, %s, %s)"
self.context_table = "(%s, %s, %s, %s, %s)"
self.qna_table = "(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.con = None
self.cur = None
self.dp_end = "_dp" if self.is_dp else ""
self.context_ori = ""
self.nlp_analyze = NLPAnalyzer()
self.processed_ctx_list = list()
def easy_mysql(self, cfg_dict, encoding='utf8', autocommit=False):
self.con = pymysql.connect(host=cfg_dict['host'], user=cfg_dict['usr'],
passwd=cfg_dict['pwd'], db=cfg_dict['db'], charset=encoding)
self.cur = self.con.cursor()
if autocommit is True:
self.con.autocommit(True)
def connect_db(self, table_name):
try: # try to connect to project db
cfg_dict = dict(host='localhost', usr= 'root', pwd='data~secret!', db=table_name)
#self.easy_mysql(cfg_dict, encoding=self.db_cnf_dict['encoding'], autocommit=True) # turn-on autocummit, be careful!
self.easy_mysql(cfg_dict, encoding='utf8', autocommit=True)
self.cur.execute("SET NAMES utf8")
except Exception as e:
logger.critical(e)
logger.info("Finish connecting to database...")
def insert_mysql(self, sql, varTuple):
try:
self.cur.execute(sql, varTuple)
logger.debug("Data inserted")
except pymysql.Error as e:
logger.critical(e)
logger.critical(sql%varTuple)
exit()
def insert_data(self, table, value_part, var_tuple, morph_end):
if "context" in table:
#sql = "INSERT INTO {}(id, season, data_type, title, context{}, c_datetime) VALUES {}".\
sql = "INSERT INTO {}(id, season, data_type, title, context{}) VALUES {}".\
format(table, morph_end, value_part)
else:
sql = "INSERT INTO {}(c_id, q_id, question{}, answer_start{}, answer_end{}, answer{}, cate1, cate2, cate3) VALUES {}".\
format(table, morph_end, morph_end, morph_end, morph_end, value_part)
self.insert_mysql(sql, var_tuple)
def fetch_text(self):
#sql = "SELECT c.id, c.title, c.context, q.question, q.answer, c.c_datetime " \
sql = "SELECT c.id, c.title, c.context, q.question, q.answer " \
"FROM all_context c, all_qna q WHERE q.c_id=c.id AND q.q_id = '{}';"
final_list = list()
with open(os.path.join(self.data_root_dir, self.correction_data), "r") as f:
for line in f:
item = line.strip().split("\t")
self.cur.execute(sql.format(item[0]))
row = self.cur.fetchone()
new_list = [str(x) for x in item + list(row)]
final_list.append("\t".join(new_list))
with open(os.path.join(self.data_root_dir,
"{}_original.tsv".format(self.correction_data.split(".")[0])),
"w") as f2:
f2.write("\n".join(final_list))
def squad2db(self, json_location, start_id, season, data_type, table_name):
self.connect_db(table_name)
with open(json_location) as f:
data = json.load(f)
data = data['data']
if start_id is None:
start_id = 1
for d in data:
try:
logger.info(d['title'])
title = d['title']
# c_datetime = d['c_datetime']
except KeyError:
continue
for para in d['paragraphs']:
if self.is_divide:
if random.random() >= self.test_ratio:
data_type = "train"
else:
data_type = "dev"
q_context = str(para['context'])
try:
self.context_ori = str(para['context_ori'])
except KeyError:
if self.context_ori == "":
exit("There's no context_ori")
# var_tuple_ctx = (start_id, season, data_type, title.strip(), q_context.strip(), c_datetime)
var_tuple_ctx = (start_id, season, data_type, title.strip(), q_context.strip())
#var_tuple_ctx_ori = (start_id, season, data_type, title.strip(), self.context_ori.strip(),c_datetime)
var_tuple_ctx_ori = (start_id, season, data_type, title.strip(), self.context_ori.strip())
self.insert_data(table="all_context", value_part=self.context_table, var_tuple=var_tuple_ctx, morph_end="")
self.insert_data(table="all_context_ori", value_part=self.context_table, var_tuple=var_tuple_ctx_ori, morph_end="")
if self.is_divide:
self.insert_data(table="{}_context".format(data_type), value_part=self.context_table, var_tuple=var_tuple_ctx, morph_end="")
self.insert_data(table="{}_context_ori".format(data_type), value_part=self.context_table, var_tuple=var_tuple_ctx_ori, morph_end="")
for qa in para['qas']:
q = str(qa['question'])
q_id = qa['id']
# cates = qa['category'].split("-")
for a in qa['answers']:
a_start = a['answer_start'] # int
try:
a_end = a['answer_end'] # int
except KeyError:
a_end = -1
text = a['text'] # answer text
var_tuple_qa = (start_id, q_id, q.strip().strip("?").strip(), a_start, a_end, text.strip(),
'', '', '')
self.insert_data(table="all_qna", value_part=self.qna_table, var_tuple=var_tuple_qa, morph_end="")
start_id += 1
logger.debug("num of para: %i" % len(d['paragraphs']))
def update_devset(self):
dev_id_list = list()
header = True
with open(self.test_id_file) as f:
for line in f:
if header:
header = False
continue
# lv.1 lv.2 category q_id question answer
item = line.strip().split("\t")
dev_id_list.append(item[3])
logger.info("Len of dev_id_list: {}".format(len(dev_id_list)))
fetch_sql_q = "SELECT c_id, q_id FROM all_qna WHERE q_id IN %s;"
logger.debug(tuple(dev_id_list))
self.cur.execute(fetch_sql_q, (tuple(dev_id_list),))
test_rows = self.cur.fetchall()
logger.info("Len of test_rows: {}".format(len(test_rows)))
dev_ctx_id_list = list()
dev_update_q = "UPDATE all_qna SET is_fixed = 1 WHERE q_id = %s;"
for test_row in test_rows:
logger.debug("test_row[1]: {}".format(test_row[1]))
self.cur.execute(dev_update_q, (test_row[1],)) # update dev questions
dev_ctx_id_list.append(test_row[0])
insert_dev_ctx = "INSERT INTO dev_context_fix SELECT * FROM all_context_all WHERE id IN %s;"
self.cur.execute(insert_dev_ctx, (tuple(dev_ctx_id_list),))
insert_dev_q = "INSERT INTO dev_qna_fix SELECT * FROM all_qna " \
"WHERE q_id IN (SELECT q_id FROM all_qna WHERE c_id IN %s);"
self.cur.execute(insert_dev_q, (tuple(dev_ctx_id_list),))
def check_data(self, season):
workers = 30
r = 300
fetch_sql = "SELECT c_id, q_id, question_morph, answer_morph, answer_start_morph, answer_end_morph, c.context_morph " \
"FROM all_context_all c, all_qna q WHERE c.id=q.c_id AND q.q_id LIKE '{}_%-1' " \
"ORDER BY cast(c_id as unsigned), q_id;".format(season)
self.cur.execute(fetch_sql)
qas = self.cur.fetchall()
logger.info("Fetch all qns data finished")
#check_index(qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(check_index, qas, n, r) for n in range(0, len(qas), r)}
def make_plain_list(self, input_list):
return_list = list()
depth = 0
for x in input_list: # x: sentence
for y in x: # y: token
if type(y) != list:
return_list.append(y)
depth = 2
else:
for z in y: # z: morph
return_list.append(z)
depth = 3
return return_list, depth
def extract_passage(self, ctx, answer, location, c_id, q_id):
sentence_list = list()
is_skip = False
logger.info(answer)
processed_ans = self.nlp_analyze.get_tree_result(answer) # list
logger.debug(processed_ans)
processed_ans_plain, depth = self.make_plain_list(processed_ans)
processed_ans_plain = ['|/sw'] * 5 + processed_ans_plain + ['|/sw'] * 5 # plain list
if depth == 2:
processed_ans = [processed_ans]
logger.debug("processed_ans: {}".format(processed_ans))
logger.debug("processed_ans_plain: {}".format(processed_ans_plain))
ctx = "{}{}{}{}{}".format(ctx[:location], "|"*5, ctx[location:location+len(answer)], "|"*5, ctx[location+len(answer):])
processed_txt, sentence_list = self.nlp_analyze.get_tree_result(ctx, sentence_list=True)
logger.debug(processed_txt)
processed_txt_plain, depth = self.make_plain_list(processed_txt) # plain list
if depth == 2:
processed_txt = [processed_txt]
logger.debug("processed_ans: {}".format(processed_ans))
logger.debug("processed_ans_plain: {}".format(processed_ans_plain))
logger.debug("processed_txt: {}".format(processed_txt))
logger.debug("processed_txt_plain: {}".format(processed_txt_plain))
marker_idxes = [(j, j + 5) for j in range(len(processed_txt_plain))
if processed_txt_plain[j:j + 5] == ['|/sw'] * 5]
logger.debug(marker_idxes)
if len(marker_idxes) % 2 == 0:
if len(marker_idxes) == 2:
start_idx = marker_idxes[0][0]
end_idx = marker_idxes[1][1] - 10
else:
logger.critical("Not 2 markers...({}) skip: {}".format(len(marker_idxes), q_id))
is_skip = True
return 0, 0, 0, 0, is_skip
else:
logger.critical("Not 2 markers...({}) skip: {}".format(len(marker_idxes), q_id))
is_skip = True
return 0, 0, 0, 0, is_skip
logger.debug("start_idx: {}".format(start_idx))
logger.debug("end_idx: {}".format(end_idx))
for k in range(len(processed_txt)): # sentence
for l in range(len(processed_txt[k])): # token
logger.debug(processed_txt[k][l])
tmp_idxes = [(j, j + 5) for j in range(len(processed_txt[k][l]))
if processed_txt[k][l][j:j + 5] == ['|/sw'] * 5]
if len(tmp_idxes) != 0:
logger.debug(tmp_idxes)
new_processed_txt = self.remove_list_sequence(processed_txt[k][l], tmp_idxes)
logger.debug(new_processed_txt)
processed_txt[k][l] = new_processed_txt
#processed_txt[k][l] = list(filter('|/sw'.__ne__, processed_txt[k][l]))
logger.debug(processed_txt[k][l])
logger.debug(processed_txt)
final_answer = list()
cnt = 0
for k in range(len(processed_txt)):
tmp = list()
for l in range(len(processed_txt[k])):
tmp2 = list()
for m in range(len(processed_txt[k][l])): # morph
if cnt >= start_idx and cnt < end_idx:
logger.debug(processed_txt[k][l][m])
tmp2.append(processed_txt[k][l][m])
cnt += 1
if len(tmp2) > 0:
tmp.append(tmp2)
if len(tmp) > 0:
final_answer.append(tmp)
processed_txt_plain = self.remove_list_sequence(processed_txt_plain, marker_idxes)
#processed_txt_plain = list(filter('|/sw'.__ne__, processed_txt_plain))
final_answer_plain, depth = self.make_plain_list(final_answer)
try:
assert (processed_txt_plain[start_idx:end_idx] == final_answer_plain)
except AssertionError:
logger.error("{} != {}".format(processed_txt_plain[start_idx:end_idx],
final_answer_plain))
is_skip = True
return 0, 0, 0, 0, is_skip
logger.debug("answer_processed: {}".format(processed_txt_plain[start_idx:end_idx]))
logger.debug("answer_processed_return: {}".format(final_answer))
logger.debug(str(processed_txt))
return start_idx, end_idx, str(processed_txt), str(final_answer), is_skip, sentence_list
def remove_list_sequence(self, input_list, marker_idxes):
logger.debug(input_list)
logger.debug(marker_idxes)
new_ptp = list()
if len(marker_idxes) > 1:
for i in range(len(marker_idxes)):
if i == 0:
new_ptp += input_list[:marker_idxes[i][0]]
new_ptp += input_list[marker_idxes[i][1]:marker_idxes[i+1][0]]
logger.debug(input_list[:marker_idxes[i][0]])
else:
new_ptp += input_list[marker_idxes[i][1]:]
logger.debug(input_list[marker_idxes[i][1]:])
else:
new_ptp += input_list[:marker_idxes[0][0]]
new_ptp += input_list[marker_idxes[0][1]:]
logger.debug(new_ptp)
return new_ptp
def process_qa(self, type, season, table_name):
db_cnf_dict = {"host": 'localhost', "usr": "root", "pwd": "data~secret!", "db": table_name, "encoding": "utf8"}
self.connect_db(table_name)
if type == "q_only":
fetch_sql = "SELECT c_id, q_id, question FROM all_qna q " \
"WHERE q_id LIKE '%-2' AND question_morph IS NULL AND q_id LIKE '{}_%'" \
"ORDER BY cast(c_id as unsigned), q_id;".format(season)
elif type == "check_dp_length":
fetch_sql = "SELECT id, context, context_morph, context_dp " \
"FROM all_context_all c ORDER BY id;"
elif type == "dp":
fetch_sql = "SELECT id, context FROM all_context " \
"WHERE context_dp IS NULL AND cast(id AS unsigned) >= {} ORDER BY id;".format(self.start_id)
elif type == "dp_q":
fetch_sql = "SELECT c_id, q_id, question FROM all_qna " \
"WHERE question_dp IS NULL ORDER BY c_id, q_id;"
elif type == "patch":
#fetch_sql = "select c_id, q_id, answer, answer_start, context, context_morph from all_qna q, all_context c " \
# "where q.q_id LIKE '%-1' AND q.c_id = c.id ORDER BY c_id, q_id;"
fetch_sql = "SELECT c_id, q_id, answer, answer_start, context " \
"FROM (SELECT * FROM all_context_all WHERE context LIKE '%|%') t, all_qna q " \
"WHERE t.id = q.c_id " \
"ORDER BY c_id, q_id;"
elif type == "re_patch":
with open("check/re_patch_all.txt") as f: # 바꿔줌
qas = f.readlines()
elif type == "context":
# process only data created at certain season
fetch_sql = "select c_id, q_id, question, answer, answer_start, context from all_qna q, all_context c " \
"where q.q_id LIKE '%-1' AND q.q_id LIKE '{}_%' AND q.c_id = c.id AND question_morph is NULL and c.id> 0 " \
"ORDER BY CAST(c_id AS UNSIGNED), q_id;".format(season)
else:
logger.error("You select the wrong type({}). Please re-check your command".format(type))
exit()
if type != "re_patch":
self.cur.execute(fetch_sql)
qas = self.cur.fetchall()
logger.info("len of qas: {}".format(len(qas)))
workers = 20 # 20 으로 바꿈
r = 300 # 오류나서 300 을 200 으로 줄임
if type == "q_only":
#update_set_q(table_name, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(update_set_q, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "dp":
#get_dp_multi("context", self.db_cnf_dict, qas, 0, len(qas))
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(get_dp_multi, "context", self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "dp_q":
#get_dp_multi("question", self.db_cnf_dict, qas, 0, len(qas))
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(get_dp_multi, "question", self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "patch":
#patch(table_name, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(patch, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "re_patch":
# re_patch(table_name, qas, 0, r) # 위 r 의 수를 re_patch 해서 나온 오류 개수 만큼 늘려줘야함.
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(re_patch, table_name, qas, n, r) for n in range(0, len(qas), r)}
elif type == "check_dp_length":
#check_dp_length(self.db_cnf_dict, qas, 0, r)
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
fs = {exe.submit(check_dp_length, self.db_cnf_dict, qas, n, r) for n in range(0, len(qas), r)}
elif type == "context":
morph_core(table_name, qas, 0, r)
# with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as exe:
# fs = {exe.submit(morph_core, table_name, qas, n, r) for n in range(0, len(qas), r)}
def get_dp(self, q):
c_id = q[0]
ctx = q[1]
dp = self.nlp_analyze.get_dependency_parser_result(ctx)
sql = "UPDATE all_context SET context_dp = %s WHERE id = %s;"
self.cur.execute(sql, (str(dp), c_id))
def longtext(self):
sql = "SELECT id, context FROM all_context WHERE id = %s;"
self.cur.execute(sql, (2,))
row = self.cur.fetchone()
dp_content = self.nlp_analyze.get_dependency_parser_result(row[1])
update_sql = "UPDATE all_context SET context_dp = %s WHERE id = %s"
self.cur.execute(update_sql, (str(dp_content), row[0]))
logger.info("finished")
def create_dev_kang(self):
qid_list = list()
with open("dev_qids.txt") as f:
for line in f:
qid_list.append(line.strip())
qid_part = ', '.join(list(map(lambda x: '%s', qid_list)))
sql = "INSERT INTO dev_qna_kang SELECT * FROM all_qna " \
"WHERE q_id IN ({});".format(qid_part)
self.cur.execute(sql, qid_list)
cid_list = list()
for qid in qid_list:
sql = "SELECT c_id FROM all_qna WHERE q_id = %s;"
self.cur.execute(sql, (qid,))
c_id = self.cur.fetchone()[0]
logger.info(c_id)
cid_list.append(c_id)
cid_list = list(set(cid_list))
cid_part = ', '.join(list(map(lambda x: '%s', cid_list)))
sql = "INSERT INTO dev_context_kang SELECT * FROM all_context " \
"WHERE id IN ({});".format(cid_part)
self.cur.execute(sql, cid_list)
def check_dp_length(self, qas, n, r):
exec("j{} = SquadDb()".format(n))
processed_ctx_list = list()
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
# id, context, context_morph, context_dp
c_id = q[0]; ctx = q[1]; ctx_morph = q[2]; ctx_dp = q[3]
new_ctx = eval("j{}".format(n)).nlp_analyze.get_tree_result(ctx)
try:
assert(len([x for x in eval(ctx_dp) if x['id'] == 0]) == len(eval(ctx_morph)))
except AssertionError:
logger.critical("Different sentence length: {}".format(c_id))
with open("check/sentence_length.txt", "a") as f:
f.write("{}\n".format(c_id))
try:
assert(len([x for x in eval(ctx_dp) if x['id'] == 0]) == len(new_ctx))
except AssertionError:
logger.error("len of new_dp != len of ctx_morph: {}".format(c_id))
exit()
'''if "-" in c_id:
c_id = c_id.split("-")[0]
if c_id not in processed_ctx_list:
update_sql = "UPDATE all_context SET context_morph = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(update_sql, (str(new_ctx), c_id))
logger.info("ctx_morph update")'''
def re_patch(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q_line in qas[n:n+r]:
item = q_line.strip().split("\t")
fetch_sql = "select q.c_id, q.q_id, q.answer, q.answer_start, c.context from all_qna q, all_context c " \
"where q.c_id = %s AND q.q_id = %s AND q.c_id = c.id;"
eval("j{}".format(n)).cur.execute(fetch_sql, (item[0], item[1]))
q = eval("j{}".format(n)).cur.fetchone()
# q: c_id, q_id, answer, answer_start, context
c_id = q[0]; q_id = q[1]; answer = q[2]; answer_s = q[3]; ctx = q[4]
new_s, new_e, new_ctx, new_ans, is_skip, sentence_list = \
eval("j{}".format(n)).extract_passage(ctx, answer, answer_s, c_id, q_id)
sql = "INSERT INTO all_context_diff VALUE(%s, %s, %s);"
sql_diff = "SELECT u_id FROM all_context_diff WHERE depend_id = %s ORDER BY u_id DESC LIMIT 1;"
eval("j{}".format(n)).cur.execute(sql_diff, c_id)
uu = eval("j{}".format(n)).cur.fetchone()
if uu is None:
u_id = 1
else:
u_id = uu[0] + 1
eval("j{}".format(n)).cur.execute(sql, (c_id, u_id, new_ctx))
logger.info("Insert new c: {}-{}".format(c_id, u_id))
sql = "UPDATE all_qna SET c_id = %s WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, ("{}-{}".format(c_id, u_id), c_id, q_id))
eval("j{}".format(n)).cur.execute(sql, ("{}-{}".format(c_id, u_id), c_id, "{}2".format(q_id[:-1])))
logger.info("Update q: {}".format(q_id))
logger.debug(new_ans)
def patch(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
processed_ctx = list()
for q in qas[n:n+r]:
# q: c_id, q_id, answer, answer_start, context
c_id = q[0]; q_id = q[1]; answer = q[2]; answer_s = q[3]; ctx = q[4]
new_s, new_e, new_ctx, new_ans, is_skip, sentence_list = \
eval("j{}".format(n)).extract_passage(ctx, answer, answer_s, c_id, q_id)
if is_skip:
logger.error(q)
exit()
logger.info(new_ans)
if c_id not in processed_ctx:
if "-" in c_id:
depend_id = c_id.split("-")[0]; u_id = c_id.split("-")[1]
sql = "UPDATE all_context_diff SET context_morph = %s WHERE depend_id = %s AND u_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), depend_id, u_id))
else:
sql = "UPDATE all_context SET context_morph = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), c_id))
logger.info("Update c: {}".format(c_id))
sql = "UPDATE all_qna SET answer_start_morph = %s, answer_end_morph = %s, answer_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (new_s, new_e, str(new_ans), c_id, q_id))
logger.info("Update q: {}".format(q_id))
re_quotation = re.compile(r"\[+[\"\'](\[\[.+\]\])[\"\']\]+")
def check_index(qas, n, r):
exec("j{} = SquadDb()".format(n))
return_list = list()
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
ctx_plain, depth = eval("j{}".format(n)).make_plain_list(eval(q[6]))
answer_plain, depth = eval("j{}".format(n)).make_plain_list(eval(q[3]))
try:
assert (ctx_plain[q[4]:q[5]] == answer_plain)
except AssertionError:
return_list.append("{}\t{}\t{}\t{}".format(q[0], q[1], ctx_plain[q[4]:q[5]], answer_plain))
if len(return_list) != 0:
with open("check/re_patch_{}.txt".format(n), "a") as f:
f.write("\n".join(return_list))
f.write("\n")
def get_dp_multi(type, db_cnf, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db()".format(n))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
c_id = q[0]
if type == "context":
txt = q[1]
elif type == "question":
q_id = q[1]
txt = q[2]
else:
logger.error("get_dp_multi - type is wrong. stop process..")
exit()
dp = eval("j{}".format(n)).nlp_analyze.get_dependency_parser_result(txt)
logger.debug(dp)
if type == "context":
sql = "UPDATE all_context SET context_dp = %s WHERE id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(dp), c_id))
elif type == "question":
sql = "UPDATE all_qna SET question_dp = %s WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(sql, (str(dp), c_id, q_id))
def update_set_q(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
print("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
question = q[2]
question_list = eval("j{}".format(n)).nlp_analyze.get_tree_result(question)
logger.debug(question_list)
fetch_sql = "SELECT answer_morph, answer_start_morph, answer_end_morph FROM all_qna " \
"WHERE c_id = %s AND q_id = %s;"
eval("j{}".format(n)).cur.execute(fetch_sql, [q[0], "{}1".format(q[1][:-1])]) # fetch '-1' info
original = eval("j{}".format(n)).cur.fetchone()
logger.debug(original)
update_sql = "UPDATE all_qna SET question_morph = %s, answer_morph = %s, " \
"answer_start_morph = %s, answer_end_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
val_tuple = (str(question_list), original[0], original[1], original[2], q[0], q[1])
logger.debug(val_tuple)
eval("j{}".format(n)).cur.execute(update_sql, val_tuple)
def morph_core(table_name, qas, n, r):
exec("j{} = SquadDb()".format(n))
eval("j{}.connect_db('{}')".format(n, table_name))
logger.info("Finish connecting to database...: {}".format(n))
# c_id, q_id, question, answer, answer_start, context
logger.info("processing: {} ..< {}".format(n, n + r))
for q in qas[n:n+r]:
question = q[2]
answer = q[3]
answer_start = q[4]
context = q[5]
try:
assert (context[answer_start:answer_start+len(answer)] == answer)
except AssertionError:
logger.info(q[1])
logger.critical("real answer: {}".format(answer))
logger.critical("extracted answer: {}".format(context[answer_start:answer_start+len(answer)]))
exit()
new_s, new_e, new_ctx, new_answer, isSkip, sentence_list = \
eval("j{}".format(n)).extract_passage(context, answer, answer_start, q[0], q[1])
logger.info("isskip: {}".format(isSkip))
if not isSkip:
# question
question_list = eval("j{}".format(n)).nlp_analyze.get_tree_result(question)
if q[0] not in eval("j{}".format(n)).processed_ctx_list:
sql = "UPDATE all_context SET context_morph = %s, context_sent = %s WHERE id = %s"
eval("j{}".format(n)).cur.execute(sql, (str(new_ctx), str(sentence_list), q[0]))
eval("j{}".format(n)).processed_ctx_list.append(q[0])
sql = "UPDATE all_qna SET question_morph = %s, answer_morph = %s, " \
"answer_start_morph = %s, answer_end_morph = %s " \
"WHERE c_id = %s AND q_id = %s;"
val_tuple = (str(question_list), str(new_answer), new_s, new_e, q[0], q[1])
logger.debug(val_tuple)
eval("j{}".format(n)).cur.execute(sql, val_tuple)
time.sleep(0.2)
if __name__ == "__main__":
try:
mode = sys.argv[1]
season = sys.argv[2]
db_table = sys.argv[3]
json_input = sys.argv[4]
start_id = sys.argv[5]
data_type = sys.argv[6]
except: print("")
j = SquadDb()
j.connect_db(db_table)
if mode == "squad2db":
j.squad2db(json_input, int(start_id), season, data_type, db_table)
elif mode == "context":
j.process_qa('context', season, db_table)
elif mode == "q_only":
j.process_qa('q_only', season, db_table)
elif mode == "check_data":
j.check_data(season)
elif mode == "re_patch":
j.process_qa('re_patch', season, db_table)
logger.info("All finished")
|
[
"ylunar@naver.com"
] |
ylunar@naver.com
|
ff54c9c913e9a0d876744c59e5036fa4992f106c
|
cbab8b9218b4c7965e6d1dacfb2104e4096c18d1
|
/backend/helo_tesrt_dev_2404/urls.py
|
fbe6d1e20a84147bb29ba5acf7482a6bb158ce79
|
[] |
no_license
|
crowdbotics-apps/helo-tesrt-dev-2404
|
480408a7b426413ebaa8803e8e754efae08ea091
|
44599c5058dc7b284bdd3eaba3e38b2b5d5c33db
|
refs/heads/master
| 2023-02-06T01:56:10.488868
| 2020-04-07T15:51:58
| 2020-04-07T15:51:58
| 253,755,570
| 0
| 0
| null | 2023-01-24T01:57:30
| 2020-04-07T10:04:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
"""helo_tesrt_dev_2404 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "helo tesrt"
admin.site.site_title = "helo tesrt Admin Portal"
admin.site.index_title = "helo tesrt Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="helo tesrt API",
default_version="v1",
description="API documentation for helo tesrt App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
5905d6b90141004034939f0dd662452a6aaefdaa
|
334c92f848ae51e10b713e305d91f40b22e13371
|
/파이썬/13908_비밀번호.py
|
86a0cf6b892469a1acf6b65cb20dae3f814c89ed
|
[] |
no_license
|
brrgi/-Algo
|
798e5d705cad43dab16f5daacb8ef53f9027316e
|
2d04794c5d3213d3e30772bdf1e9fd25aae12e14
|
refs/heads/master
| 2023-08-13T04:19:16.264407
| 2021-10-08T14:53:02
| 2021-10-08T14:53:02
| 207,121,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
result=0
start=10
n,m=map(int, input().split())
k=list(map(int, input().split()))
def fact(a):
temp=1
for i in range(a):
temp*=i+1
return temp
for i in range(m+1):
result+=(int)(fact(m)/(fact(m-i)*fact(i)))*pow(start,n)*((-1)**i)
start-=1
print(result)
|
[
"48500985+brrgi@users.noreply.github.com"
] |
48500985+brrgi@users.noreply.github.com
|
44e13c1fded16f49030901f88a5514fd6330ae64
|
8a69e01a697b1583814de2fa23f3c6f90425192a
|
/device_classify1.2/main.py
|
72e374160c6ddb6c1be25d685fe39a639f47729b
|
[] |
no_license
|
forever0136789/device_classify
|
d4ead55e1566cac0d5d6b3d240e2a6e1ea381e6c
|
e69b03473ac5f78b62be610f065b93ea6b417680
|
refs/heads/master
| 2020-05-20T11:15:53.898356
| 2019-06-21T03:08:11
| 2019-06-21T03:08:11
| 185,545,222
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,243
|
py
|
import preprocess
import config
import os
import time
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, roc_auc_score
def train_model(X_train, y_train, X_test, y_test, model_name, model, param_range):
"""
根据给定的参数训练模型,并返回
1. 最优模型
2. 平均训练耗时
3. 准确率
"""
print('训练{}...'.format(model_name))
clf = GridSearchCV(estimator=model,
param_grid=param_range,
cv=5,
scoring='accuracy',
refit=True)#refit=True,完成五折交叉验证后又进行一次整个训练集的训练
start = time.time()
clf.fit(X_train, y_train)
# 计时
end = time.time()
duration = end - start
print('耗时{:.4f}s'.format(duration))
# 验证模型
print('训练准确率:{:.3f}'.format(clf.score(X_train, y_train)))
score = clf.score(X_test, y_test)
print('测试准确率:{:.3f}'.format(score))
print('训练模型耗时: {:.4f}s'.format(duration))
print()
return clf, score, duration
def main():
"""
主函数
"""
# 准备数据集
train_data, test_data = preprocess.prepare_data()
# 查看数据集
preprocess.inspect_dataset(train_data, test_data)
# 特征工程处理
# 构建训练测试数据
X_train, X_test = preprocess.do_feature_engineering(train_data, test_data)
print('共有{}维特征。'.format(X_train.shape[1]))
# 标签处理
y_train = train_data['label'].values
y_test = test_data['label'].values
# 数据建模及验证
print('\n===================== 数据建模及验证 =====================')
sclf = StackingClassifier(classifiers=[KNeighborsClassifier(),
SVC(kernel='linear'),
DecisionTreeClassifier()],
meta_classifier=LogisticRegression())
# 指定各分类器的参数
model_name_param_dict = {'kNN': (KNeighborsClassifier(),
{'n_neighbors': [5, 15, 25]}),
'LR': (LogisticRegression(),
{'C': [0.01, 1, 100]}),
'SVM': (SVC(kernel='linear'),
{'C': [0.01, 1, 100]}),
'DT': (DecisionTreeClassifier(),
{'max_depth': [50, 100, 150]}),
'Stacking': (sclf,
{'kneighborsclassifier__n_neighbors': [5, 15, 25],
'svc__C': [0.01, 1, 100],
'decisiontreeclassifier__max_depth': [50, 100, 150],
'meta-logisticregression__C': [0.01, 1, 100]}),
'AdaBoost': (AdaBoostClassifier(),
{'n_estimators': [50, 100, 150, 200]}),
'GBDT': (GradientBoostingClassifier(),
{'learning_rate': [0.01, 0.1, 1, 10, 100]}),
'RF': (RandomForestClassifier(),
{'n_estimators': [100, 150, 200, 250]}),
'NB': (GaussianNB(), {'priors': [None]})}
# model_name_param_dict = {'NB': (GaussianNB(), {'priors': [None]})}
# 比较结果的DataFrame
results_df = pd.DataFrame(columns=['Accuracy (%)', 'Time (s)'],
index=list(model_name_param_dict.keys()))
results_df.index.name = 'Model'
for model_name, (model, param_range) in model_name_param_dict.items():
best_clf, best_acc, mean_duration = train_model(X_train, y_train, X_test, y_test,
model_name, model, param_range)
results_df.loc[model_name, 'Accuracy (%)'] = best_acc * 100
results_df.loc[model_name, 'Time (s)'] = mean_duration
results_df.to_csv(os.path.join(config.output_path, 'model_comparison.csv'))
# 模型及结果比较
print('\n===================== 模型及结果比较 =====================')
plt.figure(figsize=(10, 4))
ax1 = plt.subplot(1, 2, 1)
results_df.plot(y=['Accuracy (%)'], kind='bar', ylim=[40, 100], ax=ax1, title='Accuracy(%)', legend=False)
ax2 = plt.subplot(1, 2, 2)
results_df.plot(y=['Time (s)'], kind='bar', ax=ax2, title='Time (s)', legend=False)
plt.tight_layout()
plt.savefig(os.path.join(config.output_path, './pred_results.png'))
plt.show()
if __name__ == '__main__':
main()
|
[
"18795880359@163.com"
] |
18795880359@163.com
|
a75ea16fc96deba8ed6592b54837c4622da43d54
|
b9a2097b1ff526f0f980cb44f321ecdecc071baf
|
/backend/manage.py
|
4772a9003f0358346446794a2b58b87dce416f4e
|
[] |
no_license
|
crowdbotics-apps/nwh-elkhart-metrics-26614
|
ce08c984d6c939b7f7cd5158b5c39fe37be94dcc
|
e86088482281f83fe789ce0b492e76981df1c08c
|
refs/heads/master
| 2023-05-01T08:17:44.464562
| 2021-05-12T18:42:43
| 2021-05-12T18:42:43
| 366,794,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "nwh_elkhart_metrics_26614.settings"
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bc488533414e97ad8722f8961c39cf8f985447c0
|
d4a62bea6e8393d47576d66de3b0a187df8dfd02
|
/erlotinib/apps/_simulation.py
|
f50a26c223dfb055ae92d509ea670cf6ca7cebaa
|
[
"BSD-3-Clause"
] |
permissive
|
pkpdapp-team/erlotinib
|
13ece87728473a428fed9d77c094c29ed0e45b4b
|
fbdfffe0d0e3feab6b45c9a3f983f8dd3fcfdad3
|
refs/heads/main
| 2023-02-28T09:43:53.439689
| 2021-02-10T15:22:25
| 2021-02-10T15:22:25
| 337,726,135
| 0
| 0
|
BSD-3-Clause
| 2021-02-10T13:03:58
| 2021-02-10T13:03:57
| null |
UTF-8
|
Python
| false
| false
| 13,900
|
py
|
#
# This file is part of the erlotinib repository
# (https://github.com/DavAug/erlotinib/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import warnings
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import erlotinib as erlo
import erlotinib.apps as apps
class PDSimulationController(apps.BaseApp):
"""
Creates an app which simulates a :class:`erlotinib.PharmacodynamicModel`.
Parameter sliders can be used to adjust parameter values during
the simulation.
Extends :class:`BaseApp`.
Example
-------
::
# Set up app with data and model
app = PDSimulationController()
app.add_model(model)
app.add_data(data, biomarker='<Biomarker name>')
# Define a simulation callback that updates the simulation according
# to the sliders
sliders = app.slider_ids()
@app.app.callback(
Output('fig', 'figure'),
[Input(s, 'value') for s in sliders])
def update_simulation(*args):
parameters = args
fig = app.update_simulation(parameters)
return fig
# Start the app
app.start_application()
"""
def __init__(self):
super(PDSimulationController, self).__init__(
name='PDSimulationController')
# Instantiate figure and sliders
self._fig = erlo.plots.PDTimeSeriesPlot(updatemenu=False)
self._sliders = _SlidersComponent()
# Create default layout
self._set_layout()
# Create defaults
self._model = None
self._times = np.linspace(start=0, stop=30)
def _add_simulation(self):
"""
Adds trace of simulation results to the figure.
"""
# Make sure that parameters and sliders are ordered the same
if self._model.parameters() != list(self._sliders.sliders().keys()):
raise Warning('Model parameters do not align with slider.')
# Get parameter values
parameters = []
for slider in self._sliders.sliders().values():
value = slider.value
parameters.append(value)
# Add simulation to figure
result = self._simulate(parameters)
self._fig.add_simulation(result)
# Remember index of model trace for update callback
n_traces = len(self._fig._fig.data)
self._model_trace = n_traces - 1
def _create_figure_component(self):
"""
Returns a figure component.
"""
figure = dbc.Col(
children=[dcc.Graph(
figure=self._fig._fig,
id='fig',
style={'height': '67vh'})],
md=9
)
return figure
def _create_sliders(self):
"""
Creates one slider for each parameter, and groups the slider by
1. Pharmacokinetic input
2. Initial values (of states)
3. Parameters
"""
parameters = self._model.parameters()
# Add one slider for each parameter
for parameter in parameters:
self._sliders.add_slider(slider_id=parameter)
# Split parameters into initial values, and parameters
n_states = self._model._n_states
states = parameters[:n_states]
parameters = parameters[n_states:]
# Group parameters:
# Create PK input slider group
pk_input = self._model.pk_input()
if pk_input is not None:
self._sliders.group_sliders(
slider_ids=[pk_input], group_id='Pharmacokinetic input')
# Make sure that pk input is not assigned to two sliders
parameters.remove(pk_input)
# Create initial values slider group
self._sliders.group_sliders(
slider_ids=states, group_id='Initial values')
# Create parameters slider group
self._sliders.group_sliders(
slider_ids=parameters, group_id='Parameters')
def _create_sliders_component(self):
"""
Returns a slider component.
"""
sliders = dbc.Col(
children=self._sliders(),
md=3,
style={'marginTop': '5em'}
)
return sliders
def _set_layout(self):
"""
Sets the layout of the app.
- Plot of simulation/data on the left.
- Parameter sliders on the right.
"""
self.app.layout = dbc.Container(
children=[dbc.Row([
self._create_figure_component(),
self._create_sliders_component()])],
style={'marginTop': '5em'})
def _simulate(self, parameters):
"""
Returns simulation of pharmacodynamic model in standard format, i.e.
pandas.DataFrame with 'Time' and 'Biomarker' column.
"""
# Solve the model
result = self._model.simulate(parameters, self._times)
# Rearrange results into a pandas.DataFrame
result = pd.DataFrame({'Time': self._times, 'Biomarker': result[0, :]})
return result
def add_data(
self, data, biomarker, id_key='ID', time_key='Time',
biom_key='Biomarker', meas_key='Measurement'):
"""
Adds pharmacodynamic time series data of (multiple) individuals to
the figure.
Expects a :class:`pandas.DataFrame` with an ID, a time and a PD
biomarker column, and adds a scatter plot of the biomarker time series
to the figure. Each individual receives a unique colour.
Parameters
----------
data
A :class:`pandas.DataFrame` with the time series PD data in form of
an ID, time, and biomarker column.
biomarker
Selector for the displayed biomarker. The provided value has to be
an element of the biomarker column.
id_key
Key label of the :class:`DataFrame` which specifies the ID column.
The ID refers to the identity of an individual. Defaults to
``'ID'``.
time_key
Key label of the :class:`DataFrame` which specifies the time
column. Defaults to ``'Time'``.
biom_key
Key label of the :class:`DataFrame` which specifies the PD
biomarker column. Defaults to ``'Biomarker'``.
meas_key
Key label of the :class:`DataFrame` which specifies the column of
the measured PD biomarker. Defaults to ``'Measurement'``.
"""
# Add data to figure
self._fig.add_data(
data, biomarker, id_key, time_key, biom_key, meas_key)
# Set axes labels to time_key and biom_key
self._fig.set_axis_labels(xlabel=time_key, ylabel=biom_key)
def add_model(self, model):
"""
Adds a :class:`erlotinib.PharmacodynamicModel` to the application.
One parameter slider is generated for each model parameter, and
the solution for a default set of parameters is added to the figure.
"""
if self._model is not None:
# This is a temporary fix! In a future issue we will handle the
# simulation of multiple models
warnings.warn(
'A model has been set previously. The passed model was '
'therefore ignored.')
return None
if not isinstance(model, erlo.PharmacodynamicModel):
raise TypeError(
'Model has to be an instance of '
'erlotinib.PharmacodynamicModel.')
self._model = model
# Add one slider for each parameter to the app
self._create_sliders()
# Add simulation of model to the figure
self._add_simulation()
# Update layout
self._set_layout()
def slider_ids(self):
"""
Returns a list of the slider ids.
"""
return list(self._sliders.sliders().keys())
def update_simulation(self, parameters):
"""
Simulates the model for the provided parameters and replaces the
current simulation plot by the new one.
"""
# Solve model
result = self._model.simulate(parameters, self._times).flatten()
# Replace simulation values in plotly.Figure
self._fig._fig.data[self._model_trace].y = result
return self._fig._fig
class _SlidersComponent(object):
"""
A helper class that helps to organise the sliders of the
:class:`SimulationController`.
The sliders are arranged horizontally. Sliders may be grouped by meaning.
"""
def __init__(self):
# Set defaults
self._sliders = {}
self._slider_groups = {}
def __call__(self):
# Returns the contents in form of a list of dash components.
# If no sliders have been added, print a default message.
if not self._sliders:
default = [dbc.Alert(
"No model has been chosen.", color="primary")]
return default
# If sliders have not been grouped, print a default message.
if not self._sliders:
default = [dbc.Alert(
"Sliders have not been grouped.", color="primary")]
return default
# Group and label sliders
contents = self._compose_contents()
return contents
def _compose_contents(self):
"""
Returns the grouped sliders with labels as a list of dash components.
"""
contents = []
for group_id in self._slider_groups.keys():
# Create label for group
group_label = html.Label(group_id)
# Group sliders
group = self._slider_groups[group_id]
container = []
for slider_id in group:
# Create label for slider
label = html.Label(slider_id, style={'fontSize': '0.8rem'})
slider = self._sliders[slider_id]
# Add label and slider to group container
container += [
dbc.Col(children=[label], width=12),
dbc.Col(children=[slider], width=12)]
# Convert slider group to dash component
group = dbc.Row(
children=container, style={'marginBottom': '1em'})
# Add label and group to contents
contents += [group_label, group]
return contents
def add_slider(
self, slider_id, value=0.5, min_value=0, max_value=2,
step_size=0.01):
"""
Adds a slider.
Parameters
----------
slider_id
ID of the slider.
value
Default value of the slider.
min_value
Minimal value of slider.
max_value
Maximal value of slider.
step_size
Elementary step size of slider.
"""
# Replace "."s by a spaces in slider_ids if present
# (plotly doesn't allow "." for slider_ids in callbacks)
if '.' in slider_id:
warnings.warn(
'Dots (.) have been removed in parameter names when creating '
'the sliders.')
slider_id = slider_id.replace(oldvalue='.', newvalue=' ')
self._sliders[slider_id] = dcc.Slider(
id=slider_id,
value=value,
min=min_value,
max=max_value,
step=step_size,
marks={
str(min_value): str(min_value),
str(max_value): str(max_value)},
updatemode='drag')
def group_sliders(self, slider_ids, group_id):
"""
Visually groups sliders. Group ID will be used as label.
Each slider can only be in one group.
"""
# Check that incoming sliders do not belong to any group already
for index, existing_group in enumerate(self._slider_groups.values()):
for slider in slider_ids:
if slider in existing_group:
raise ValueError(
'Slider <' + str(slider) + '> exists already in group '
'<' + str(self._slider_groups.keys()[index]) + '>.')
self._slider_groups[group_id] = slider_ids
def sliders(self):
"""
Returns a dictionary of slider objects with the slider ID as key and
the slider object as value.
"""
return self._sliders
# For simple debugging the app can be launched by executing the python file.
if __name__ == "__main__":
from dash.dependencies import Input, Output
# Get data and model
data = erlo.DataLibrary().lung_cancer_control_group()
path = erlo.ModelLibrary().tumour_growth_inhibition_model_koch()
model = erlo.PharmacodynamicModel(path)
model.set_parameter_names(names={
'myokit.drug_concentration': 'Drug concentration in mg/L',
'myokit.tumour_volume': 'Tumour volume in cm^3',
'myokit.kappa': 'Potency in L/mg/day',
'myokit.lambda_0': 'Exponential growth rate in 1/day',
'myokit.lambda_1': 'Linear growth rate in cm^3/day'})
# Set up demo app
app = PDSimulationController()
app.add_model(model)
app.add_data(data, biomarker='Tumour volume')
# Define a simulation callback
sliders = app.slider_ids()
@app.app.callback(
Output('fig', 'figure'),
[Input(s, 'value') for s in sliders])
def update_simulation(*args):
"""
Simulates the model for the current slider values and updates the
model plot in the figure.
"""
parameters = args
fig = app.update_simulation(parameters)
return fig
app.start_application(debug=True)
|
[
"david.augustin@gmx.net"
] |
david.augustin@gmx.net
|
ea051c5911af350e0ba353248c8ec40515baaea9
|
132f19c0424b4e0943567cb0bfb27380087fe3cc
|
/pynanacolight/core.py
|
247334466f6d98b0cb4c8ef44205ae68eb8b4f87
|
[
"MIT"
] |
permissive
|
mursts/PyNanacoLight
|
bb9a9d3c572ef8ee7eff6103892beed171a36304
|
5a9be2a571c2ecbc20a9a43d333ae1e94232d000
|
refs/heads/master
| 2020-03-31T05:33:19.586432
| 2018-09-22T13:22:34
| 2018-09-22T13:22:34
| 151,950,219
| 0
| 0
|
MIT
| 2018-10-07T14:30:31
| 2018-10-07T14:30:30
| null |
UTF-8
|
Python
| false
| false
| 5,520
|
py
|
# -*- coding: utf-8 -*-
from pynanacolight.page import LoginPage, MenuPage
from pynanacolight.page_creditcharge import CreditChargeMenuPage, CreditChargeHistoryPage, CreditChargePasswordAuthPage, \
CreditChargeInputPage, CreditChargeConfirmPage, CreditChargeCancelPage, CreditChargeCancelConfirmPage
# CreditChargeRegisterGuidePage, CreditChargeRegisterAgreePage, CreditChargeRegisterInputPage1, \
# CreditChargeRegisterInputPage2, CreditChargeRegisterConfirmPage
from pynanacolight.page_gift import RegisterGiftPage, RegisterGiftCodeInputPage, RegisterGiftCodeConfirmPage
from requests import session
class PyNanacoLight:
def __init__(self, session: session()):
self._session = session
self._html = None
self.balance_card = None
self.balance_center = None
self.can_credit_charge = None
self.credit_charge_password = ''
self.registered_creditcard = ''
self.charge_count = None
self.charge_amount = None
def login(self, nanaco_number:str, card_number:str=None, password:str=None):
page = LoginPage(self._session)
page.input_nanaco_number(nanaco_number)
if card_number:
page.input_card_number(card_number)
self._html = page.click_login_by_card_number()
elif password:
page.input_password(password)
self._html = page.click_login_by_password()
else:
return
page = MenuPage(self._session, self._html)
self.balance_card = page.text_balance_card
self.balance_center = page.text_balance_center
def login_credit_charge(self, password:str):
self.credit_charge_password = password
page = MenuPage(self._session, self._html)
self._html = page.click_login_credit_charge()
self.can_credit_charge = page.can_credit_charge
if self.can_credit_charge:
page = CreditChargePasswordAuthPage(self._session, self._html)
page.input_credit_charge_password(password)
self._html = page.click_next()
page = CreditChargeMenuPage(self._session, self._html)
html = page.click_history()
page = CreditChargeHistoryPage(self._session, html)
self.registered_creditcard = page.text_registered_credit_card
self.charge_count = page.text_charge_count
self.charge_amount = page.text_charge_amount
# def register(self,
# number: str,
# expire_month: str, expire_year: str, code: str, phone: str,
# name: str, birth_year: str, birth_month: str, birth_day: str, password: str, mail: str, send_info: str,
# security_code: str
# ):
#
# page = MenuPage(self._session, self._html)
# self._html = page.click_login_credit_charge()
#
# page = CreditChargeRegisterGuidePage(self._session, self._html)
# self._html = page.click_next()
#
# page = CreditChargeRegisterAgreePage(self._session, self._html)
# self._html = page.click_agree()
#
# page = CreditChargeRegisterInputPage1(self._session, self._html)
# page.input_creditcard_number_1(number[:4])
# page.input_creditcard_number_2(number[4:8])
# page.input_creditcard_number_3(number[8:12])
# page.input_creditcard_number_4(number[12:16])
#
# page.input_creditcard_expire_month(expire_month)
# page.input_creditcard_expire_year(expire_year)
#
# page.input_security_code(code)
# page.input_phone_number(phone)
# self._html = page.click_next()
#
# page = CreditChargeRegisterInputPage2(self._session, self._html)
# page.input_kana_name(name)
# page.input_birth_year(birth_year)
# page.input_birth_month(birth_month)
# page.input_birth_day(birth_day)
# page.input_creditcharge_password(password)
# page.input_email(mail)
# page.select_send_information(send_info)
# self._html = page.click_next()
#
# page = CreditChargeRegisterConfirmPage(self._session, self._html)
# self._html = page.click_confirm()
def charge(self, value: int):
page = CreditChargeMenuPage(self._session, self._html)
self._html = page.click_charge()
page = CreditChargeInputPage(self._session, self._html)
page.input_charge_amount(value)
self._html = page.click_next()
page = CreditChargeConfirmPage(self._session, self._html)
self._html = page.click_confirm()
def cancel(self, password):
page = CreditChargeMenuPage(self._session, self._html)
self._html = page.click_cancel()
page = CreditChargeCancelPage(self._session, self._html)
page.input_credit_charge_password(password)
self._html = page.click_next()
page = CreditChargeCancelConfirmPage(self._session, self._html)
self._html = page.click_confirm()
def register_giftcode(self, code):
page = MenuPage(self._session, self._html)
self._html = page.click_register_gift()
page = RegisterGiftPage(self._session, self._html)
self._html = page.click_accept()
page = RegisterGiftCodeInputPage(self._session, self._html)
page.input_code(code)
self._html = page.click_submit()
page = RegisterGiftCodeConfirmPage(self._session, self._html)
self._html = page.click_confirm()
|
[
"riskreturn5@gmail.com"
] |
riskreturn5@gmail.com
|
a43bb394d9813203066b09dc5296d553f9d21924
|
e9b1be3f47a135aa08c2dfd9630f364dda7a0f8f
|
/Problemsolving/4933 뉴턴의 사과.py
|
f3feed3acfa38a7f347c5550c80480cf49520331
|
[] |
no_license
|
JudyH0pps/Algorithms-and-Problem-Solving
|
0acdf9a5799b2d7f256cf3fd7517faef0d5bf8f9
|
203dc39978549a20e29fa0a2c1ad7bb780e61853
|
refs/heads/master
| 2021-11-23T03:16:48.270092
| 2021-11-04T05:14:59
| 2021-11-04T05:14:59
| 235,740,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import sys
sys.stdin = open('input.txt')
def recur(nodes1, nodes2):
print(nodes1, nodes2)
if nodes1[-1] != nodes2[-1]:
return False
if len(nodes1) == 1:
return True
l = (len(nodes1) - 1) // 2
return recur(nodes1[:l + 1], nodes2[:l + 1]) and recur(nodes1[l + 1:-1], nodes2[l + 1:-1])
T = int(input())
for tc in range(1, T+1):
nodes1 = list(input().split())[:-1]
nodes2 = list(input().split())[:-1]
print(recur(nodes1, nodes2))
|
[
"ssafy_coach_43@ssafy.com"
] |
ssafy_coach_43@ssafy.com
|
788398854e79143d77bd7bcbbc79202a74d49414
|
3e1beedf80c60153482192b086347d0530701c37
|
/problem solving/cinema.py
|
cfee1d1abc3a0f3637d3211ba9876bb01e88668e
|
[] |
no_license
|
rishi772001/Competetive-programming
|
ac130bde426844e09a3e5162e279d61278c7c502
|
3493991cac55f225eeee67dd49f1caed8211465c
|
refs/heads/master
| 2023-04-12T14:59:59.447354
| 2021-04-30T05:05:13
| 2021-04-30T05:05:13
| 267,785,820
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
# https://leetcode.com/problems/cinema-seat-allocation/
n = 2
booked = [[1,6],[1,8],[1,3],[2,3],[1,10],[1,2],[1,5],[2,2],[2,4],[2,10],[1,7],[2,5]]
theatre = [[0]*10 for i in range(n)]
for i in range(len(booked)):
theatre[booked[i][0] - 1][booked[i][1] - 1] += 1
print(theatre)
count = 0
for i in range(len(theatre)):
sum = theatre[i][1] + theatre[i][2] + theatre[i][3] + theatre[i][4]
j = 5
flag = False
if sum == 0:
count += 1
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j = j + 3
while j < 10:
if j - 4 == 1 or j - 4 == 3 or j - 4 == 5:
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
continue
if (sum == 0):
count += 1
if(j + 4 < 10):
sum = theatre[i][j] + theatre[i][j + 1] + theatre[i][j + 2] + theatre[i][j + 3]
j += 3
else:
break
sum += theatre[i][j]
sum -= theatre[i][j - 4]
j += 1
print(count)
|
[
"noreply@github.com"
] |
noreply@github.com
|
fa6df867465274ac8444a135a311aa00afd86d2c
|
48d08e7c20628479ea69b4a1a51f99a3db26c79d
|
/MathPy/04_sympy_intro.py
|
9a8b4683b4bf945cb6a3376f182c1efe1b83b73d
|
[] |
no_license
|
PyRPy/stats_py
|
59ae0975c5b549fb47f7630b1f232caf715fe2ff
|
0c87ebf7f84eb7a21bcedb3234170ef220ca2f14
|
refs/heads/master
| 2022-09-27T21:01:53.316765
| 2022-09-17T01:52:09
| 2022-09-17T01:52:09
| 167,268,454
| 4
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
from sympy import Symbol
# ------------------Defining Symbols and Symbolic Operations -------------------
x = Symbol('x')
print(x + x + 1)
a = Symbol('x')
print(a + a + 1)
# find the original symbol object
print(a.name)
# define multiple symbols
from sympy import symbols
x, y, z = symbols('x, y, z')
s = x*(x + y) + x*(y + z)
print(s)
print(x*x*(1 + x))
|
[
"noreply@github.com"
] |
noreply@github.com
|
8b3c2658c664a3df8bd6335c9965bd6652a6eaea
|
34507bb3b99e262844fd6fb452278a22b1a39f63
|
/bq_elt/scratch.py
|
99fe6400f05462f4e027847fc3939f94933b66d5
|
[
"MIT"
] |
permissive
|
dkapitan/bq-elt
|
418ad8c7afa59896aa695b3e9e6d843b2963c3f9
|
f0297f4a7f6511894121672bd2f202d9ce3e84a6
|
refs/heads/master
| 2023-01-13T08:06:09.321786
| 2019-07-28T21:56:18
| 2019-07-28T21:56:18
| 199,340,183
| 0
| 0
|
MIT
| 2022-12-26T20:47:45
| 2019-07-28T21:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
import asyncio
import time
from google.cloud.bigquery import Client
async def await_jobs(job_ids, polling_delay=1):
"""
Await set of job_ids
"""
def callback(future):
awaiting_jobs.discard(future.id)
while job_ids:
print('waiting for jobs to finish ... sleeping for 1s')
asyncio.sleep(1)
bq = Client('mediquest-sandbox')
query_1 = """
SELECT
language.name,
average(language.bytes)
FROM `bigquery-public-data.github_repos.languages`
, UNNEST(language) AS language
GROUP BY language.name
"""
query_2 = 'SELECT 2'
queries = [query_1, query_2]
def main2():
jobs = set()
for query in queries:
job = bq.query(query)
jobs.add(job.job_id)
job.add_done_callback(await_jobs.callback)
print('all jobs done, do your stuff')
async def say_after(delay, what):
await asyncio.sleep(delay)
print(what)
async def main():
print(f"started at {time.strftime('%X')}")
await say_after(1, 'hello')
await say_after(2, 'world')
print(f"finished at {time.strftime('%X')}")
if __name__ == '__main__':
# asyncio.run(main())
main2()
|
[
"dkapitan@mediquest.cloud"
] |
dkapitan@mediquest.cloud
|
9875f1144c46fe404d66afb507d923964e8c92d5
|
e273b9562d73e4d4467efca14f735464eeebf624
|
/face.py
|
44b1f218ec90a64841d009c472b9b9dfb9e058c1
|
[] |
no_license
|
kaifcoder/face-detection
|
b608317c1c7a7175a8bfccb2017c803fee765e14
|
048f897518fdb2ec7a8f7b5c2560f8297d71150d
|
refs/heads/main
| 2023-08-04T09:47:42.937897
| 2021-09-30T14:32:56
| 2021-09-30T14:32:56
| 412,099,027
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('D:\python automation\haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
|
[
"noreply@github.com"
] |
noreply@github.com
|
beaf4d2b007e4eeedabf1b919b8e3bc69653fcd8
|
9387c48878a2315ccf5a3e18d2001615e5cda4dc
|
/utils/config.py
|
186efffdc7da23f6ad3b04f73e360e690a8c6c0b
|
[
"Apache-2.0"
] |
permissive
|
sungsulim/RLControl
|
8db77a57b564eb278b532fd0e1341424ce303d21
|
1af29e446958dc8c99ab101ddf5df969888d1e2e
|
refs/heads/master
| 2022-12-11T20:05:43.745490
| 2020-08-11T20:41:56
| 2020-08-11T20:41:56
| 134,628,706
| 10
| 3
|
Apache-2.0
| 2022-12-08T05:02:52
| 2018-05-23T21:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 617
|
py
|
class Config:
# default setting
def __init__(self):
# self.parser = argparse.ArgumentParser()
self.norm = None
self.exploration_policy = None
self.warmup_steps = 0
self.batch_size = 32
self.buffer_size = 1e6
self.tau = 0.01
self.gamma = 0.99
# if using OU noise for exploration
self.ou_theta = 0.15
self.ou_mu = 0.0
self.ou_sigma = 0.2
# add custom setting
def merge_config(self, custom_config):
for key in custom_config.keys():
setattr(self, key, custom_config[key])
|
[
"ss.sungsulim@gmail.com"
] |
ss.sungsulim@gmail.com
|
82ac55bdbfc5d0abb3e09f82ee978dcb53c47267
|
967f244bf52f03546c77d395bdb3781ded865958
|
/oct_task.py
|
1654eeb8c74e9b2ba4da123691833aa105ebd741
|
[
"MIT"
] |
permissive
|
user5111/ESS-SDK-for-Human
|
bc8f6c71444437051cc3440eb57c9e7a979254b3
|
682ba361798e39b32270569b9fe07b9a6a6e9e0c
|
refs/heads/master
| 2021-01-01T17:50:26.865355
| 2017-10-19T16:55:38
| 2017-10-19T16:55:38
| 98,172,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: Wang Wentao
# Created on 2017-10-18 13:42:00
from datetime import datetime ,timedelta
import requests
from io import BytesIO
from PIL import Image
from bs4 import BeautifulSoup
from openpyxl import Workbook
import login
import custinfo
import departinfo
USERNAME = 'zhangqian77'
DEPARTID = '11a0271'
PASSWORD = 'et3YzxYGJfhigS2oi+dh/5J/3WU='
def handle(tradeid):
for data in tradeid.find_all('data'):
if data['tradeTypeCode'] == '10' or data['tradeTypeCode'] == '12':
tradeId = data['tradeId']
return tradeId
session = requests.Session()
urls = login.LoginEssSystem(USERNAME,DEPARTID,PASSWORD,session)
login.LoginService(urls, '用户资料综合查询','custserv', session)
custinfos = custinfo.GetCustinfoByNum('01013820852', session)
login.LoginService(urls, '用户照片查询','custserv', session)
tradeid = handle(custinfo.GetTradeId('01013820852','2017-05-06', session))
print(tradeid)
live_photo_status = custinfo.GetCustLivePhotoByTradeId(tradeid, session, fileaddr='C:/Users/tao/Desktop')
print(custinfos)
|
[
"wojiaowwt@gmail.com"
] |
wojiaowwt@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.